diff --git a/.github/actions/get-latest-cli/action.yaml b/.github/actions/get-latest-cli/action.yaml new file mode 100644 index 0000000000000..edc450368f63f --- /dev/null +++ b/.github/actions/get-latest-cli/action.yaml @@ -0,0 +1,21 @@ +name: "Get latest Aptos CLI" +description: | + Fetches the latest released Aptos CLI. +inputs: + destination_directory: + description: "Directory to install the CLI in" + required: true + +runs: + using: composite + steps: + - name: Setup python + uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # pin@v4 + - name: Get installation script + shell: bash + run: | + curl -fsSL "https://aptos.dev/scripts/install_cli.py" > install_cli.py + - name: Run installation script + shell: bash + run: | + python3 install_cli.py --bin-dir ${{ inputs.destination_directory }} diff --git a/.github/actions/install-grpcurl/action.yml b/.github/actions/install-grpcurl/action.yml new file mode 100644 index 0000000000000..cb39bf045663c --- /dev/null +++ b/.github/actions/install-grpcurl/action.yml @@ -0,0 +1,25 @@ +name: "Install grpcurl" +description: | + Installs grpcurl https://github.com/fullstorydev/grpcurl +inputs: + install_directory: + description: "Where to install grpcurl binary. Defaults to github.workspace." + required: false + +runs: + using: composite + steps: + - name: Install grpcurl + shell: bash + run: ${{ github.action_path }}/install_grpcurl.sh + - name: Add grpcurl to install directory and path + shell: bash + run: | + if [ -z "${INSTALL_DIRECTORY}" ]; then + INSTALL_DIRECTORY=${{ github.workspace }} + else + mv grpcurl $INSTALL_DIRECTORY + fi + echo "${INSTALL_DIRECTORY}" | tee -a $GITHUB_PATH + env: + INSTALL_DIRECTORY: ${{ inputs.install_directory }} diff --git a/.github/actions/install-grpcurl/install_grpcurl.sh b/.github/actions/install-grpcurl/install_grpcurl.sh new file mode 100755 index 0000000000000..7e54b14a6fd82 --- /dev/null +++ b/.github/actions/install-grpcurl/install_grpcurl.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -ex + +# A quick script that installs grpcurl if it's not already installed. + +if ! command -v grpcurl &>/dev/null; then + wget https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz + sha=$(shasum -a 256 grpcurl_1.8.7_linux_x86_64.tar.gz | awk '{ print $1 }') + [ "$sha" != "b50a9c9cdbabab03c0460a7218eab4a954913d696b4d69ffb720f42d869dbdd5" ] && echo "shasum mismatch" && exit 1 + tar -xvf grpcurl_1.8.7_linux_x86_64.tar.gz + chmod +x grpcurl + ./grpcurl -version +fi + +echo "grpcurl is installed" diff --git a/.github/workflows/build-node-binaries.yaml b/.github/workflows/build-node-binaries.yaml new file mode 100644 index 0000000000000..a010b9d77f487 --- /dev/null +++ b/.github/workflows/build-node-binaries.yaml @@ -0,0 +1,47 @@ +# This defines a workflow to make a release build of the aptos node. +# In order to trigger it go to the Actions Tab of the Repo, click "Build Aptos Node Binaries" and then "Run Workflow". + +name: "Build Aptos Node Binaries" + +on: + pull_request: + paths: + - ".github/workflows/build-node-binaries.yaml" + workflow_dispatch: + inputs: + git_ref: + type: string + required: true + description: "The ref to build from i.e. aptos-node-vX.X.X" + +jobs: + build-node-binary: + strategy: + matrix: + os: [ubuntu-20.04, ubuntu-22.04] + name: "Build Aptos Node Binary on ${{ matrix.os }}" + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 + with: + # ref: ${{ github.event.inputs.git_ref }} + ref: aptos-node-v1.4.3 + - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main + - name: Build Aptos Node Binary ${{ matrix.os }} + run: | + set -eux + + OS="${{ matrix.os }}" + SANITIZED_OS="${OS//./-}" + TARNAME="aptos-node-$SANITIZED_OS.tgz" + + cargo build -p aptos-node --release + cd target/release + tar czvf "$TARNAME" aptos-node + mv "$TARNAME" ../../ + + - name: Upload Binary + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # pin@v3 + with: + name: aptos-node-${{ matrix.os }} + path: aptos-node-*.tgz \ No newline at end of file diff --git a/.github/workflows/cli-e2e-tests.yaml b/.github/workflows/cli-e2e-tests.yaml index 066e582f80d18..20c50ef37302e 100644 --- a/.github/workflows/cli-e2e-tests.yaml +++ b/.github/workflows/cli-e2e-tests.yaml @@ -24,6 +24,8 @@ jobs: id-token: write steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.GIT_SHA }} - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index 80f7ab8d936d9..1f97b20f7152a 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -61,6 +61,10 @@ env: # We use `pr-` as cache-id for PRs and simply otherwise. TARGET_CACHE_ID: ${{ github.event.number && format('pr-{0}', github.event.number) || github.ref_name }} + # On PRs, only build and push to GCP + # On push, build and push to all remote registries + TARGET_REGISTRY: ${{ github.event_name == 'pull_request_target' && 'remote' || 'remote-all' }} + permissions: contents: read id-token: write #required for GCP Workload Identity federation which we use to login into Google Artifact Registry @@ -99,9 +103,11 @@ jobs: run: | echo "GIT_SHA: ${GIT_SHA}" echo "TARGET_CACHE_ID: ${TARGET_CACHE_ID}" + echo "TARGET_REGISTRY: ${TARGET_REGISTRY}" outputs: gitSha: ${{ env.GIT_SHA }} targetCacheId: ${{ env.TARGET_CACHE_ID }} + targetRegistry: ${{ env.TARGET_REGISTRY }} rust-images: needs: [permission-check, determine-docker-build-metadata] @@ -112,6 +118,7 @@ jobs: TARGET_CACHE_ID: ${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} PROFILE: release BUILD_ADDL_TESTING_IMAGES: true + TARGET_REGISTRY: ${{ needs.determine-docker-build-metadata.outputs.targetRegistry }} rust-images-indexer: needs: [permission-check, determine-docker-build-metadata] @@ -127,6 +134,7 @@ jobs: PROFILE: release FEATURES: indexer BUILD_ADDL_TESTING_IMAGES: true + TARGET_REGISTRY: ${{ needs.determine-docker-build-metadata.outputs.targetRegistry }} rust-images-failpoints: needs: [permission-check, determine-docker-build-metadata] @@ -142,6 +150,7 @@ jobs: PROFILE: release FEATURES: failpoints BUILD_ADDL_TESTING_IMAGES: true + TARGET_REGISTRY: ${{ needs.determine-docker-build-metadata.outputs.targetRegistry }} rust-images-performance: needs: [permission-check, determine-docker-build-metadata] @@ -156,6 +165,7 @@ jobs: TARGET_CACHE_ID: ${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} PROFILE: performance BUILD_ADDL_TESTING_IMAGES: true + TARGET_REGISTRY: ${{ needs.determine-docker-build-metadata.outputs.targetRegistry }} rust-images-consensus-only-perf-test: needs: [permission-check, determine-docker-build-metadata] @@ -170,87 +180,60 @@ jobs: PROFILE: release FEATURES: consensus-only-perf-test BUILD_ADDL_TESTING_IMAGES: true - - rust-images-all: - needs: - [ - determine-docker-build-metadata, - rust-images, - rust-images-indexer, - rust-images-failpoints, - rust-images-performance, - rust-images-consensus-only-perf-test, - ] - if: always() # this ensures that the job will run even if the previous jobs were skipped - runs-on: ubuntu-latest - steps: - - name: fail if rust-images job failed - if: ${{ needs.rust-images.result == 'failure' }} - run: exit 1 - - name: fail if rust-images-indexer job failed - if: ${{ needs.rust-images-indexer.result == 'failure' }} - run: exit 1 - - name: fail if rust-images-failpoints job failed - if: ${{ needs.rust-images-failpoints.result == 'failure' }} - run: exit 1 - - name: fail if rust-images-performance job failed - if: ${{ needs.rust-images-performance.result == 'failure' }} - run: exit 1 - - name: fail if rust-images-consensus-only-perf-test job failed - if: ${{ needs.rust-images-consensus-only-perf-test.result == 'failure' }} - run: exit 1 - outputs: - rustImagesResult: ${{ needs.rust-images.result }} - rustImagesIndexerResult: ${{ needs.rust-images-indexer.result }} - rustImagesFailpointsResult: ${{ needs.rust-images-failpoints.result }} - rustImagesPerformanceResult: ${{ needs.rust-images-performance.result }} - rustImagesConsensusOnlyPerfTestResult: ${{ needs.rust-images-consensus-only-perf-test.result }} + TARGET_REGISTRY: ${{ needs.determine-docker-build-metadata.outputs.targetRegistry }} sdk-release: - needs: [rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" + needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" if: | (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || github.event.pull_request.auto_merge != null || contains(github.event.pull_request.body, '#e2e') - uses: ./.github/workflows/sdk-release.yaml + uses: aptos-labs/aptos-core/.github/workflows/sdk-release.yaml@main secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} cli-e2e-tests: - needs: [rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" + needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" if: | - !contains(github.event.pull_request.labels.*.name, 'CICD:skip-sdk-integration-test') && ( + ( github.event_name == 'push' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || github.event.pull_request.auto_merge != null) || contains(github.event.pull_request.body, '#e2e' ) - uses: ./.github/workflows/cli-e2e-tests.yaml + uses: aptos-labs/aptos-core/.github/workflows/cli-e2e-tests.yaml@main secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} indexer-grpc-e2e-tests: - needs: [rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" + needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" if: | (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || github.event.pull_request.auto_merge != null || contains(github.event.pull_request.body, '#e2e') - uses: ./.github/workflows/docker-indexer-grpc-test.yaml + uses: aptos-labs/aptos-core/.github/workflows/docker-indexer-grpc-test.yaml@main secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} forge-e2e-test: - needs: [rust-images-all, determine-docker-build-metadata] - if: | # always() ensures that the job will run even if some of the previous docker variant build jobs were skipped https://docs.github.com/en/actions/learn-github-actions/expressions#status-check-functions - always() && needs.rust-images-all.result == 'success' && ( + needs: + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || @@ -272,9 +255,16 @@ jobs: # Run e2e compat test against testnet branch forge-compat-test: - needs: [rust-images-all, determine-docker-build-metadata] - if: | # always() ensures that the job will run even if some of the previous docker variant build jobs were skipped https://docs.github.com/en/actions/learn-github-actions/expressions#status-check-functions - always() && needs.rust-images-all.result == 'success' && ( + needs: + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || @@ -293,9 +283,16 @@ jobs: # Run forge framework upgradability test forge-framework-upgrade-test: - needs: [rust-images-all, determine-docker-build-metadata] - if: | # always() ensures that the job will run even if some of the previous docker variant build jobs were skipped https://docs.github.com/en/actions/learn-github-actions/expressions#status-check-functions - always() && needs.rust-images-all.result == 'success' && ( + needs: + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || @@ -313,9 +310,16 @@ jobs: FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} forge-consensus-only-perf-test: - needs: [rust-images-all, determine-docker-build-metadata] - if: | # always() ensures that the job will run even if some of the previous docker variant build jobs were skipped https://docs.github.com/en/actions/learn-github-actions/expressions#status-check-functions - always() && needs.rust-images-all.result == 'success' && + needs: + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && contains(github.event.pull_request.labels.*.name, 'CICD:run-consensus-only-perf-test') uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit @@ -326,3 +330,27 @@ jobs: FORGE_RUNNER_DURATION_SECS: 300 COMMENT_HEADER: forge-consensus-only-perf-test FORGE_NAMESPACE: forge-consensus-only-perf-test-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} + + # Run forge multiregion test. This test uses the multiregion forge cluster that deploys pods in three GCP regions. + forge-multiregion-test: + needs: + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && + contains(github.event.pull_request.labels.*.name, 'CICD:run-multiregion-test') + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + FORGE_TEST_SUITE: multiregion_benchmark_test + IMAGE_TAG: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + FORGE_RUNNER_DURATION_SECS: 300 + COMMENT_HEADER: forge-multiregion-test + FORGE_NAMESPACE: forge-multiregion-test-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} + FORGE_CLUSTER_NAME: forge-multiregion diff --git a/.github/workflows/docker-indexer-grpc-test.yaml b/.github/workflows/docker-indexer-grpc-test.yaml index 0c42f5d789bf7..045efe595bfd3 100644 --- a/.github/workflows/docker-indexer-grpc-test.yaml +++ b/.github/workflows/docker-indexer-grpc-test.yaml @@ -20,7 +20,7 @@ jobs: VALIDATOR_IMAGE_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/validator FAUCET_IMAGE_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/faucet INDEXER_GRPC_IMAGE_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/indexer-grpc - IMAGE_TAG: ${{ inputs.GIT_SHA || 'f4100b21da4e9ba10fadd184e92e3d1c22bc282e' }} # hardcode to a known good build when not running on workflow_call + IMAGE_TAG: ${{ inputs.GIT_SHA || 'devnet' }} # hardcode to a known good build when not running on workflow_call steps: - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 @@ -36,23 +36,12 @@ jobs: AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} - - name: Run validator-testnet docker compose - run: docker-compose up -d - shell: bash - working-directory: docker/compose/validator-testnet - - - name: Wait for the validator to make some progress - run: sleep 30 - shell: bash - - - name: Run indexer-grpc docker compose - run: docker-compose up -d - shell: bash - working-directory: docker/compose/indexer-grpc + - name: Install grpcurl + uses: aptos-labs/aptos-core/.github/actions/install-grpcurl@main - - name: Test with grpcurl - run: ./docker/compose/indexer-grpc/test_indexer_grpc_docker_compose.sh + - name: Set up and verify indexer GRPC local docker shell: bash + run: ./testsuite/indexer_grpc_local.py --verbose start - name: Print docker-compose validator-testnet logs on failure if: ${{ failure() }} diff --git a/.github/workflows/execution-performance.yaml b/.github/workflows/execution-performance.yaml index 67216ee58e3f6..970839dca8eec 100644 --- a/.github/workflows/execution-performance.yaml +++ b/.github/workflows/execution-performance.yaml @@ -7,4 +7,12 @@ jobs: uses: aptos-labs/aptos-core/.github/workflows/workflow-run-execution-performance.yaml@main secrets: inherit with: - GIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} \ No newline at end of file + GIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} + RUNNER_NAME: executor-benchmark-runner + + spot-runner-execution-performance: + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-execution-performance.yaml@main + secrets: inherit + with: + GIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} + RUNNER_NAME: spot-runner \ No newline at end of file diff --git a/.github/workflows/forge-gcp.yaml b/.github/workflows/forge-gcp.yaml deleted file mode 100644 index 86152ef0edeb1..0000000000000 --- a/.github/workflows/forge-gcp.yaml +++ /dev/null @@ -1,91 +0,0 @@ -# Continuously run Forge tests on GCP against the main branch. -name: Continuous Forge Tests - GCP - -permissions: - issues: write - pull-requests: write - contents: read - id-token: write - actions: write #required for workflow cancellation via check-aptos-core - -on: - # Allow triggering manually - workflow_dispatch: - inputs: - IMAGE_TAG: - required: false - type: string - description: The docker image tag to test. This may be a git SHA1, or a tag like "_". If not specified, Forge will find the latest build based on the git history (starting from GIT_SHA input) - GIT_SHA: - required: false - type: string - description: The git SHA1 to checkout. This affects the Forge test runner that is used. If not specified, the latest main will be used - schedule: - - cron: "37 * * * *" # the main branch cadence - pull_request: - paths: - - ".github/workflows/forge-gcp.yaml" - -env: - AWS_ACCOUNT_NUM: ${{ secrets.ENV_ECR_AWS_ACCOUNT_NUM }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - IMAGE_TAG: ${{ inputs.IMAGE_TAG }} # this is only used for workflow_dispatch, otherwise defaults to empty - AWS_REGION: us-west-2 - -jobs: - # This job determines the image tag and branch to test, and passes them to the other jobs - # NOTE: this may be better as a separate workflow as the logic is quite complex but generalizable - determine-test-metadata: - runs-on: ubuntu-latest - outputs: - IMAGE_TAG: ${{ steps.determine-test-image-tag.outputs.IMAGE_TAG }} - BRANCH: ${{ steps.determine-test-branch.outputs.BRANCH }} - steps: - - name: Determine branch based on cadence - id: determine-test-branch - run: | - BRANCH=main - [[ -n "${{ inputs.GIT_SHA }}" ]] && BRANCH=${{ inputs.GIT_SHA }} - echo "BRANCH=$BRANCH" >> $GITHUB_OUTPUT - - - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 - with: - ref: ${{ steps.determine-test-branch.outputs.BRANCH }} - fetch-depth: 0 - - - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main - with: - cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork - - - uses: ./.github/actions/python-setup - with: - pyproject_directory: testsuite - - - name: Determine image tag - id: determine-test-image-tag - # forge relies on the default and failpoints variants - run: ./testrun find_latest_image.py --variant failpoints --variant performance - shell: bash - working-directory: testsuite - - - name: Write summary - run: | - IMAGE_TAG=${{ steps.determine-test-image-tag.outputs.IMAGE_TAG }} - BRANCH=${{ steps.determine-test-branch.outputs.BRANCH }} - if [ -n "${BRANCH}" ]; then - echo "BRANCH: [${BRANCH}](https://github.com/${{ github.repository }}/tree/${BRANCH})" >> $GITHUB_STEP_SUMMARY - fi - echo "IMAGE_TAG: [${IMAGE_TAG}](https://github.com/${{ github.repository }}/commit/${IMAGE_TAG})" >> $GITHUB_STEP_SUMMARY - - forge-land-blocking: - if: ${{ github.event_name != 'pull_request' }} - needs: determine-test-metadata - uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main - secrets: inherit - with: - COMMENT_HEADER: forge-continuous - # This test suite is configured using the forge.py config test command - FORGE_TEST_SUITE: land_blocking - FORGE_CLUSTER_NAME: aptos-forge-0 - FORGE_RUNNER_DURATION_SECS: 1800 diff --git a/.github/workflows/forge-stable.yaml b/.github/workflows/forge-stable.yaml index 37bb72629c537..3e98857906b16 100644 --- a/.github/workflows/forge-stable.yaml +++ b/.github/workflows/forge-stable.yaml @@ -25,6 +25,7 @@ on: pull_request: paths: - ".github/workflows/forge-stable.yaml" + - "testsuite/find_latest_image.py" push: branches: - aptos-release-v* # the aptos release branches @@ -75,6 +76,18 @@ jobs: with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork + # find_latest_images.py requires docker utilities and having authenticated to internal docker image registries + - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main + id: docker-setup + with: + GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} + EXPORT_GCP_PROJECT_VARIABLES: "false" + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} + GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + - uses: ./.github/actions/python-setup with: pyproject_directory: testsuite @@ -95,44 +108,84 @@ jobs: fi echo "IMAGE_TAG: [${IMAGE_TAG}](https://github.com/${{ github.repository }}/commit/${IMAGE_TAG})" >> $GITHUB_STEP_SUMMARY - ### Performance Forge tests - run-forge-consensus-stress-test: + ### Real-world-network tests. + + run-forge-realistic-env-max-load-long: if: ${{ github.event_name != 'pull_request' }} needs: determine-test-metadata uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit with: IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-consensus-stress-test-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 2400 - FORGE_TEST_SUITE: consensus_stress_test - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + FORGE_NAMESPACE: forge-realistic-env-max-load-long-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_RUNNER_DURATION_SECS: 7200 + FORGE_TEST_SUITE: realistic_env_max_load + POST_TO_SLACK: true - run-forge-account-creation-test: + run-forge-realistic-env-load-sweep: if: ${{ github.event_name != 'pull_request' }} needs: determine-test-metadata uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit with: IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-account-creation-test-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 900 - FORGE_TEST_SUITE: account_creation - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + FORGE_NAMESPACE: forge-realistic-env-load-sweep-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + # 5 tests, each 300s + FORGE_RUNNER_DURATION_SECS: 1500 + FORGE_TEST_SUITE: realistic_env_load_sweep + POST_TO_SLACK: true - run-forge-performance-test: + run-forge-realistic-env-graceful-overload: if: ${{ github.event_name != 'pull_request' }} needs: determine-test-metadata uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit with: IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-performance-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 7200 - FORGE_TEST_SUITE: land_blocking + FORGE_NAMESPACE: forge-realistic-env-graceful-overload-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_RUNNER_DURATION_SECS: 1200 + FORGE_TEST_SUITE: realistic_env_graceful_overload + POST_TO_SLACK: true + + run-forge-realistic-network-tuned-for-throughput: + if: ${{ github.event_name != 'pull_request' }} + needs: determine-test-metadata + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_NAMESPACE: forge-realistic-network-tuned-for-throughput-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_RUNNER_DURATION_SECS: 900 + FORGE_TEST_SUITE: realistic_network_tuned_for_throughput FORGE_ENABLE_PERFORMANCE: true - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true + + ### Forge Correctness/Componenet/Stress tests + + run-forge-consensus-stress-test: + if: ${{ github.event_name != 'pull_request' }} + needs: determine-test-metadata + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_NAMESPACE: forge-consensus-stress-test-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_RUNNER_DURATION_SECS: 2400 + FORGE_TEST_SUITE: consensus_stress_test + POST_TO_SLACK: true + + run-forge-workload-mix-test: + if: ${{ github.event_name != 'pull_request' }} + needs: determine-test-metadata + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_NAMESPACE: forge-workload-mix-test-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_RUNNER_DURATION_SECS: 900 + FORGE_TEST_SUITE: workload_mix + POST_TO_SLACK: true run-forge-single-vfn-perf: if: ${{ github.event_name != 'pull_request' }} @@ -145,7 +198,7 @@ jobs: # Run for 8 minutes FORGE_RUNNER_DURATION_SECS: 480 FORGE_TEST_SUITE: single_vfn_perf - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true run-forge-haproxy: if: ${{ github.event_name != 'pull_request' }} @@ -158,7 +211,19 @@ jobs: FORGE_RUNNER_DURATION_SECS: 600 FORGE_ENABLE_HAPROXY: true FORGE_TEST_SUITE: land_blocking - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true + + run-forge-fullnode-reboot-stress-test: + if: ${{ github.event_name != 'pull_request' }} + needs: determine-test-metadata + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_NAMESPACE: forge-fullnode-reboot-stress-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_RUNNER_DURATION_SECS: 1800 + FORGE_TEST_SUITE: fullnode_reboot_stress_test + POST_TO_SLACK: true ### Compatibility Forge tests @@ -175,33 +240,7 @@ jobs: FORGE_TEST_SUITE: compat IMAGE_TAG: testnet GIT_SHA: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} # this is the git ref to checkout - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch - - ### Chaos Forge tests - - run-forge-three-region: - if: ${{ github.event_name != 'pull_request' }} - needs: determine-test-metadata - uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main - secrets: inherit - with: - IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-three-region-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 1800 - FORGE_TEST_SUITE: three_region_simulation - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch - - run-forge-fullnode-reboot-stress-test: - if: ${{ github.event_name != 'pull_request' }} - needs: determine-test-metadata - uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main - secrets: inherit - with: - IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-fullnode-reboot-stress-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 1800 - FORGE_TEST_SUITE: fullnode_reboot_stress_test - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true ### Changing working quorum Forge tests @@ -215,7 +254,7 @@ jobs: FORGE_NAMESPACE: forge-changing-working-quorum-test-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 1200 FORGE_TEST_SUITE: changing_working_quorum_test - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true FORGE_ENABLE_FAILPOINTS: true run-forge-changing-working-quorum-test-high-load: @@ -228,7 +267,7 @@ jobs: FORGE_NAMESPACE: forge-changing-working-quorum-test-high-load-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 900 FORGE_TEST_SUITE: changing_working_quorum_test_high_load - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true FORGE_ENABLE_FAILPOINTS: true ### State sync Forge tests @@ -257,7 +296,7 @@ jobs: # Run for 40 minutes FORGE_RUNNER_DURATION_SECS: 2400 FORGE_TEST_SUITE: state_sync_perf_fullnodes_execute_transactions - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true run-forge-state-sync-perf-fullnode-fast-sync-test: if: ${{ github.event_name != 'pull_request' }} @@ -270,7 +309,7 @@ jobs: # Run for 40 minutes FORGE_RUNNER_DURATION_SECS: 2400 FORGE_TEST_SUITE: state_sync_perf_fullnodes_fast_sync - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch + POST_TO_SLACK: true run-forge-state-sync-perf-fullnode-apply-test: if: ${{ github.event_name != 'pull_request' }} @@ -282,30 +321,4 @@ jobs: FORGE_NAMESPACE: forge-state-sync-perf-fullnode-apply-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 2400 FORGE_TEST_SUITE: state_sync_perf_fullnodes_apply_outputs - POST_TO_SLACK: ${{ needs.determine-test-metadata.outputs.BRANCH == 'main' }} # only post to slack on main branch - - ### Additional three-region tests. Eventually all consensus-related tests should migrate to three-regions. - - run-forge-land-blocking-three-region: - if: ${{ github.event_name != 'pull_request' }} - needs: determine-test-metadata - uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main - secrets: inherit - with: - IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-land-blocking-three-region-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 1800 - FORGE_TEST_SUITE: land_blocking_three_region - POST_TO_SLACK: true - - run-forge-three-region-graceful-overload: - if: ${{ github.event_name != 'pull_request' }} - needs: determine-test-metadata - uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main - secrets: inherit - with: - IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-three-region-graceful-overload-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_RUNNER_DURATION_SECS: 1800 - FORGE_TEST_SUITE: three_region_simulation_graceful_overload POST_TO_SLACK: true diff --git a/.github/workflows/forge-unstable.yaml b/.github/workflows/forge-unstable.yaml index f7dd7e47f75f1..299318a0f23b8 100644 --- a/.github/workflows/forge-unstable.yaml +++ b/.github/workflows/forge-unstable.yaml @@ -69,6 +69,18 @@ jobs: with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork + # find_latest_images.py requires docker utilities and having authenticated to internal docker image registries + - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main + id: docker-setup + with: + GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} + EXPORT_GCP_PROJECT_VARIABLES: "false" + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} + GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + - uses: ./.github/actions/python-setup with: pyproject_directory: testsuite diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index 4e1612f7d73f2..0972fd8e1e8d6 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -32,4 +32,5 @@ jobs: - name: Run Helm Lint if: steps.should-run-tests.outputs.SHOULD_RUN == 'true' - run: ./testsuite/testrun lint.py helm terraform/helm/* + run: ./testrun lint.py helm ../terraform/helm/* + working-directory: testsuite diff --git a/.github/workflows/indexer-grpc-integration-tests.yaml b/.github/workflows/indexer-grpc-integration-tests.yaml new file mode 100644 index 0000000000000..c4e664bee9b8f --- /dev/null +++ b/.github/workflows/indexer-grpc-integration-tests.yaml @@ -0,0 +1,59 @@ +name: "Indexer gRPC Integration Tests" +on: + pull_request: + +# cancel redundant builds +concurrency: + # cancel redundant builds on PRs (only on PR, not on branches) + group: ${{ github.workflow }}-${{ (github.event_name == 'pull_request' && github.ref) || github.sha }} + cancel-in-progress: true + +jobs: + run-tests-local-testnet: + runs-on: high-perf-docker + permissions: + contents: read + id-token: write + env: + # spin up the local testnet using the latest devnet image + VALIDATOR_IMAGE_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/validator + FAUCET_IMAGE_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/faucet + INDEXER_GRPC_IMAGE_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/indexer-grpc + IMAGE_TAG: devnet + + steps: + - uses: actions/checkout@v3 + + - name: Set up Rust + uses: aptos-labs/aptos-core/.github/actions/rust-setup@main + with: + GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + + - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main + with: + GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} + GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + + - name: Run indexer gRPC dependnencies locally (${{ env.IMAGE_TAG }}) + shell: bash + run: ./testsuite/indexer_grpc_local.py --verbose start --no-indexer-grpc + + - name: Run indexer gRPC integration tests + shell: bash + run: cargo nextest run --features integration-tests --package aptos-indexer-grpc-integration-tests + + - name: Print docker-compose indexer-grpc deps logs on failure + if: ${{ failure() }} + working-directory: docker/compose/indexer-grpc + run: docker-compose logs + + - name: Print docker-compose validator-testnet logs on failure + if: ${{ failure() }} + working-directory: docker/compose/validator-testnet + run: docker-compose logs + +# validator-testnet-validator-1 diff --git a/.github/workflows/merge-gatekeeper.yaml b/.github/workflows/merge-gatekeeper.yaml new file mode 100644 index 0000000000000..145392174f201 --- /dev/null +++ b/.github/workflows/merge-gatekeeper.yaml @@ -0,0 +1,36 @@ +name: "*Merge Gatekeeper" + +on: + pull_request: + types: [labeled, opened, synchronize, reopened, auto_merge_enabled] + +env: + MERGE_GATEKEEPER_ALLOWLIST: "rustielin,perryjrandall,geekflyer,sherry-x,sionescu,ibalajiarun,igor-aptos,sitalkedia" + +jobs: + merge-gatekeeper: + runs-on: ubuntu-latest + # Restrict permissions of the GITHUB_TOKEN. + # Docs: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs + permissions: + checks: read + statuses: read + steps: + - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 + - name: Beta allowlist for certain actors + shell: bash + run: | + if grep -v "$GITHUB_ACTOR" <<< "$MERGE_GATEKEEPER_ALLOWLIST"; then + echo "Your username is not in the beta testing list for merge gatekeeper." + echo "Please add yourself if you are interested, in the env MERGE_GATEKEEPER_ALLOWLIST." + gh run cancel ${{ github.run_id }} + fi + env: + GH_TOKEN: ${{ github.token }} + + - name: Run Merge Gatekeeper + uses: upsidr/merge-gatekeeper@09af7a82c1666d0e64d2bd8c01797a0bcfd3bb5d # pin v1.2.1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + interval: 60 # 1 minute + timeout: 5400 # 1.5 hour diff --git a/.github/workflows/replay-verify.yaml b/.github/workflows/replay-verify.yaml index f57af8b0c17c7..2bbc6e9d6e0ee 100644 --- a/.github/workflows/replay-verify.yaml +++ b/.github/workflows/replay-verify.yaml @@ -23,7 +23,7 @@ on: - ".github/workflows/replay-verify.yaml" - "testsuite/replay_verify.py" schedule: - - cron: "0 8,20 * * *" + - cron: "0 22 * * *" push: branches: - aptos-release-v* # the aptos release branches @@ -79,7 +79,7 @@ jobs: BUCKET: aptos-mainnet-backup-backup-831a69a8 SUB_DIR: e1 HISTORY_START: 0 - TXNS_TO_SKIP: 12253479 12277499 + TXNS_TO_SKIP: 12253479 12277499 148358668 BACKUP_CONFIG_TEMPLATE_PATH: terraform/helm/fullnode/files/backup/s3-public.yaml # workflow config RUNS_ON: "high-perf-docker-with-local-ssd" diff --git a/.github/workflows/sdk-release.yaml b/.github/workflows/sdk-release.yaml index a4a3020d36c5d..9a2c93bef3c9b 100644 --- a/.github/workflows/sdk-release.yaml +++ b/.github/workflows/sdk-release.yaml @@ -8,11 +8,9 @@ ## this within an in-review PR. ## If the above approach is too slow (since you have to wait for the rust images -## to build), you can cut the iteration time dramatically by adding a push trigger -## and replacing inputs.GITHUB_SHA with a specific commit from the main branch for -## which you know the rust images step has completed. So to be clear: -## - Replace ${{ inputs.GIT_SHA }} for the checkout step with ${{ github.sha }} -## - Replace all other instances of ${{ inputs.GIT_SHA }} with the said specific commit. +## to build), you can cut the iteration time dramatically by changing the envs +## - Replace env.IMAGE_TAG for a known image tag +## - env.GIT_SHA will resolve to that of your PR branch name: "API + TS SDK CI" on: @@ -24,6 +22,15 @@ on: required: true type: string description: Use this to override the git SHA1, branch name (e.g. devnet) or tag to release the SDK from + pull_request: + paths: + - .github/workflows/sdk-release.yaml + +env: + # This is the docker image tag that will be used for the SDK release. + # It is also used to pull the docker images for the CI. + IMAGE_TAG: ${{ inputs.GIT_SHA || 'devnet' }} # default to "devnet" tag when not running on workflow_call + GIT_SHA: ${{ inputs.GIT_SHA || github.event.pull_request.head.sha || github.sha }} # default to PR branch sha when not running on workflow_call jobs: # Confirm that the generated client within the TS SDK has been re-generated @@ -43,7 +50,7 @@ jobs: steps: - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 with: - ref: ${{ inputs.GIT_SHA }} + ref: ${{ env.GIT_SHA }} - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: @@ -61,7 +68,7 @@ jobs: # Self hosted runners don't have pnpm preinstalled. # https://github.com/actions/setup-node/issues/182 - - uses: pnpm/action-setup@v2 + - uses: pnpm/action-setup@537643d491d20c2712d11533497cb47b2d0eb9d5 # pin https://github.com/pnpm/action-setup/releases/tag/v2.2.3 # When using high-perf-docker, the CI is actually run with two containers # in a k8s pod, one for docker commands run in the CI steps (docker), and @@ -76,13 +83,13 @@ jobs: with: max_attempts: 3 timeout_minutes: 20 - command: docker run --rm --mount=type=bind,source=${{ runner.temp }}/specs,target=/specs ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${{ inputs.GIT_SHA }} aptos-openapi-spec-generator -f yaml -o /specs/spec.yaml + command: docker run --rm --mount=type=bind,source=${{ runner.temp }}/specs,target=/specs ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${IMAGE_TAG} aptos-openapi-spec-generator -f yaml -o /specs/spec.yaml - uses: nick-fields/retry@7f8f3d9f0f62fe5925341be21c2e8314fd4f7c7c # pin@v2 name: generate-json-spec with: max_attempts: 3 timeout_minutes: 20 - command: docker run --rm --mount=type=bind,source=${{ runner.temp }}/specs,target=/specs ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${{ inputs.GIT_SHA }} aptos-openapi-spec-generator -f json -o /specs/spec.json + command: docker run --rm --mount=type=bind,source=${{ runner.temp }}/specs,target=/specs ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${IMAGE_TAG} aptos-openapi-spec-generator -f json -o /specs/spec.json # Confirm that the specs we built here are the same as those checked in. - run: echo "If this step fails, run the following commands locally to fix it:" @@ -100,7 +107,7 @@ jobs: # These two have to be defined here and not in the env section because the runner # context is only available here. - - run: echo "APTOS_INVOCATION='docker run -v ${{ runner.temp }}/ans:/tmp/ans --network host ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${{ inputs.GIT_SHA }} aptos'" >> ./ecosystem/typescript/sdk/.env + - run: echo "APTOS_INVOCATION='docker run -v ${{ runner.temp }}/ans:/tmp/ans --network host ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${IMAGE_TAG} aptos'" >> ./ecosystem/typescript/sdk/.env - run: echo "ANS_REPO_LOCATION=${{ runner.temp }}/ans" >> ./ecosystem/typescript/sdk/.env - run: cp ./ecosystem/typescript/sdk/.env ./ecosystem/typescript/sdk/examples/typescript/.env @@ -117,7 +124,7 @@ jobs: - run: git diff --no-index --ignore-space-at-eol --ignore-blank-lines ./ecosystem/typescript/sdk/src/generated/ /tmp/generated_client/ # Run a local testnet built from the same commit. - - run: docker run -p 8080:8080 -p 8081:8081 --name=local-testnet --detach ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${{ inputs.GIT_SHA }} aptos node run-local-testnet --with-faucet + - run: docker run -p 8080:8080 -p 8081:8081 --name=local-testnet --detach ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}/tools:${IMAGE_TAG} aptos node run-local-testnet --with-faucet # Wait for the API and faucet to startup. - run: npm install -g wait-on @@ -130,7 +137,7 @@ jobs: with: max_attempts: 3 timeout_minutes: 20 - command: cd ./ecosystem/typescript/sdk && pnpm test + command: cd ./ecosystem/typescript/sdk && pnpm run test:ci - run: cd ./ecosystem/typescript/sdk && pnpm build # Confirm the Rust API client examples pass. @@ -141,3 +148,31 @@ jobs: if: ${{ failure() }} working-directory: docker/compose/validator-testnet run: docker logs local-testnet + + # Run the TS SDK indexer tests. Note: indexer service can be flaky and we + # dont want those tests to be land blocking for any PR on the aptos repo. + # This is why we run those tests separate from + # test-sdk-confirm-client-generated-publish. + run-indexer-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 + with: + ref: ${{ env.GIT_SHA }} + - uses: actions/setup-node@969bd2663942d722d85b6a8626225850c2f7be4b # pin@v3 + with: + node-version-file: .node-version + registry-url: "https://registry.npmjs.org" + - uses: pnpm/action-setup@537643d491d20c2712d11533497cb47b2d0eb9d5 # pin https://github.com/pnpm/action-setup/releases/tag/v2.2.3 + + # Run package install. If install fails, it probably means the lockfile + # was not included in the commit. + - run: cd ./ecosystem/typescript/sdk && pnpm install --frozen-lockfile + + # Run indexer tests. + - uses: nick-fields/retry@7f8f3d9f0f62fe5925341be21c2e8314fd4f7c7c # pin@v2 + name: ts-sdk-indexer-test + with: + max_attempts: 3 + timeout_minutes: 20 + command: cd ./ecosystem/typescript/sdk && pnpm run test:indexer diff --git a/.github/workflows/workflow-run-docker-rust-build.yaml b/.github/workflows/workflow-run-docker-rust-build.yaml index 8208263bfe427..f11a01352f60c 100644 --- a/.github/workflows/workflow-run-docker-rust-build.yaml +++ b/.github/workflows/workflow-run-docker-rust-build.yaml @@ -25,6 +25,12 @@ on: required: false type: boolean description: Whether to build additional testing images. If not specified, only the base release images will be built + TARGET_REGISTRY: + default: gcp + required: false + type: string + description: The target docker registry to push to + workflow_dispatch: inputs: GIT_SHA: @@ -45,6 +51,11 @@ on: required: false type: boolean description: Whether to build additional testing images. If not specified, only the base release images will be built + TARGET_REGISTRY: + default: gcp + required: false + type: string + description: The target docker registry to push to env: GIT_SHA: ${{ inputs.GIT_SHA }} @@ -55,6 +66,11 @@ env: GCP_DOCKER_ARTIFACT_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }} GCP_DOCKER_ARTIFACT_REPO_US: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO_US }} AWS_ECR_ACCOUNT_NUM: ${{ secrets.ENV_ECR_AWS_ACCOUNT_NUM }} + TARGET_REGISTRY: ${{ inputs.TARGET_REGISTRY }} + +permissions: + contents: read + id-token: write #required for GCP Workload Identity federation which we use to login into Google Artifact Registry jobs: rust-all: @@ -80,3 +96,4 @@ jobs: FEATURES: ${{ env.FEATURES }} BUILD_ADDL_TESTING_IMAGES: ${{ env.BUILD_ADDL_TESTING_IMAGES }} GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + TARGET_REGISTRY: ${{ env.TARGET_REGISTRY }} diff --git a/.github/workflows/workflow-run-execution-performance.yaml b/.github/workflows/workflow-run-execution-performance.yaml index 4e28733022147..fcb5c6f8ffb89 100644 --- a/.github/workflows/workflow-run-execution-performance.yaml +++ b/.github/workflows/workflow-run-execution-performance.yaml @@ -8,6 +8,10 @@ on: required: true type: string description: The git SHA1 to test. + RUNNER_NAME: + required: false + default: executor-benchmark-runner + type: string # This allows the workflow to be triggered manually from the Github UI or CLI # NOTE: because the "number" type is not supported, we default to 720 minute timeout workflow_dispatch: @@ -16,11 +20,19 @@ on: required: true type: string description: The git SHA1 to test. + RUNNER_NAME: + required: false + default: executor-benchmark-runner + type: choice + options: + - spot-runner + - executor-benchmark-runner + description: The name of the runner to use for the test. jobs: sequential-execution-performance: timeout-minutes: 30 - runs-on: executor-benchmark-runner + runs-on: ${{ inputs.RUNNER_NAME }} steps: - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 with: @@ -36,7 +48,7 @@ jobs: parallel-execution-performance: timeout-minutes: 60 - runs-on: executor-benchmark-runner + runs-on: ${{ inputs.RUNNER_NAME }} steps: - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 with: @@ -52,7 +64,7 @@ jobs: single-node-performance: timeout-minutes: 60 - runs-on: executor-benchmark-runner + runs-on: ${{ inputs.RUNNER_NAME }} steps: - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3 with: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9497c341472b4..db412e537ccbf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ title: Contributing to Aptos Core # Contributing -Our goal is to make contributing to Aptos Core easy and transparent. See [Aptos Community](https://aptos.dev/community/help-index) for full details. This page describes [our development process](#our-development-process). +Our goal is to make contributing to Aptos Core easy and transparent. See [Aptos Community](https://aptos.dev/community) for full details. This page describes [our development process](#our-development-process). ## Aptos Core diff --git a/Cargo.lock b/Cargo.lock index 43d5c8692d643..6084e94c3363b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -101,6 +101,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aho-corasick" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +dependencies = [ + "memchr", +] + [[package]] name = "aliasable" version = "0.1.3" @@ -142,9 +151,10 @@ dependencies = [ [[package]] name = "aptos" -version = "1.0.14" +version = "2.0.1" dependencies = [ "anyhow", + "aptos-api-types", "aptos-backup-cli", "aptos-bitvec", "aptos-build-info", @@ -480,6 +490,24 @@ dependencies = [ "rayon", ] +[[package]] +name = "aptos-block-partitioner" +version = "0.1.0" +dependencies = [ + "anyhow", + "aptos-crypto", + "aptos-logger", + "aptos-metrics-core", + "aptos-types", + "bcs 0.1.4", + "clap 3.2.23", + "dashmap", + "itertools", + "move-core-types", + "rand 0.7.3", + "rayon", +] + [[package]] name = "aptos-bounded-executor" version = "0.1.0" @@ -559,6 +587,7 @@ dependencies = [ "poem-openapi", "rand 0.7.3", "serde 1.0.149", + "serde_merge", "serde_yaml 0.8.26", "thiserror", "url", @@ -741,6 +770,7 @@ dependencies = [ name = "aptos-data-client" version = "0.1.0" dependencies = [ + "anyhow", "aptos-channels", "aptos-config", "aptos-crypto", @@ -750,6 +780,7 @@ dependencies = [ "aptos-metrics-core", "aptos-netcore", "aptos-network", + "aptos-storage-interface", "aptos-storage-service-client", "aptos-storage-service-server", "aptos-storage-service-types", @@ -761,6 +792,7 @@ dependencies = [ "futures", "itertools", "maplit", + "mockall", "rand 0.7.3", "serde 1.0.149", "thiserror", @@ -989,6 +1021,7 @@ name = "aptos-executor" version = "0.1.0" dependencies = [ "anyhow", + "aptos-block-partitioner", "aptos-cached-packages", "aptos-config", "aptos-consensus-types", @@ -1014,6 +1047,7 @@ dependencies = [ "fail 0.5.0", "itertools", "move-core-types", + "num_cpus", "once_cell", "proptest", "rand 0.7.3", @@ -1064,6 +1098,29 @@ dependencies = [ "toml 0.5.9", ] +[[package]] +name = "aptos-executor-service" +version = "0.1.0" +dependencies = [ + "anyhow", + "aptos-config", + "aptos-crypto", + "aptos-executor-types", + "aptos-language-e2e-tests", + "aptos-logger", + "aptos-retrier", + "aptos-secure-net", + "aptos-state-view", + "aptos-types", + "aptos-vm", + "bcs 0.1.4", + "clap 3.2.23", + "itertools", + "serde 1.0.149", + "serde_json", + "thiserror", +] + [[package]] name = "aptos-executor-test-helpers" version = "0.1.0" @@ -1092,6 +1149,7 @@ name = "aptos-executor-types" version = "0.1.0" dependencies = [ "anyhow", + "aptos-block-partitioner", "aptos-crypto", "aptos-scratchpad", "aptos-secure-net", @@ -1221,6 +1279,7 @@ dependencies = [ "aptos-retrier", "aptos-sdk", "aptos-secure-storage", + "aptos-short-hex-str", "aptos-state-sync-driver", "aptos-transaction-emitter-lib", "aptos-transaction-generator-lib", @@ -1532,6 +1591,7 @@ name = "aptos-indexer-grpc-cache-worker" version = "1.0.0" dependencies = [ "anyhow", + "aptos-config", "aptos-indexer-grpc-server-framework", "aptos-indexer-grpc-utils", "aptos-metrics-core", @@ -1547,9 +1607,11 @@ dependencies = [ "once_cell", "prost", "redis", + "reqwest", "serde 1.0.149", "serde_json", "serde_yaml 0.8.26", + "tempfile", "tokio", "tonic", "tracing", @@ -1659,6 +1721,48 @@ dependencies = [ "tonic", ] +[[package]] +name = "aptos-indexer-grpc-integration-tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "aptos-config", + "aptos-indexer-grpc-cache-worker", + "aptos-indexer-grpc-data-service", + "aptos-indexer-grpc-file-store", + "aptos-indexer-grpc-server-framework", + "aptos-indexer-grpc-utils", + "aptos-inspection-service", + "aptos-logger", + "aptos-protos", + "aptos-runtimes", + "aptos-transaction-emitter-lib", + "aptos-transaction-generator-lib", + "aptos-types", + "async-trait", + "backoff", + "base64 0.13.0", + "clap 3.2.23", + "futures", + "futures-core", + "futures-util", + "itertools", + "prometheus", + "prost", + "redis", + "regex", + "reqwest", + "serde 1.0.149", + "serde_json", + "serde_yaml 0.8.26", + "tempfile", + "tokio", + "tonic", + "tracing", + "url", + "warp", +] + [[package]] name = "aptos-indexer-grpc-parser" version = "1.0.0" @@ -1873,6 +1977,8 @@ dependencies = [ name = "aptos-ledger" version = "0.2.0" dependencies = [ + "aptos-crypto", + "aptos-types", "hex", "ledger-apdu", "ledger-transport-hid", @@ -2194,7 +2300,7 @@ dependencies = [ [[package]] name = "aptos-node" -version = "1.4.0" +version = "1.5.0" dependencies = [ "anyhow", "aptos-api", @@ -2252,7 +2358,6 @@ dependencies = [ "rayon", "serde 1.0.149", "serde_json", - "serde_merge", "serde_yaml 0.8.26", "tokio", "tokio-stream", @@ -2684,6 +2789,7 @@ dependencies = [ "bitvec 0.19.6", "criterion", "itertools", + "jemallocator", "once_cell", "proptest", "rand 0.7.3", @@ -2696,6 +2802,7 @@ name = "aptos-sdk" version = "0.0.3" dependencies = [ "anyhow", + "aptos-api-types", "aptos-cached-packages", "aptos-crypto", "aptos-global-constants", @@ -3085,7 +3192,9 @@ name = "aptos-transaction-benchmarks" version = "0.1.0" dependencies = [ "aptos-bitvec", + "aptos-block-partitioner", "aptos-crypto", + "aptos-executor-service", "aptos-gas", "aptos-language-e2e-tests", "aptos-logger", @@ -3097,7 +3206,9 @@ dependencies = [ "criterion", "criterion-cpu-time", "num_cpus", + "once_cell", "proptest", + "rayon", ] [[package]] @@ -3231,6 +3342,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.7.3", + "rayon", "regex", "serde 1.0.149", "serde_bytes", @@ -3289,6 +3401,7 @@ dependencies = [ "anyhow", "aptos-aggregator", "aptos-block-executor", + "aptos-block-partitioner", "aptos-crypto", "aptos-crypto-derive", "aptos-framework", @@ -3320,6 +3433,7 @@ dependencies = [ "once_cell", "ouroboros 0.15.6", "proptest", + "rand 0.7.3", "rayon", "serde 1.0.149", "serde_json", @@ -3473,9 +3587,9 @@ checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" [[package]] name = "arbitrary" -version = "1.1.7" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86fd10d912cab78764cc44307d9cd5f164e09abbeb87fb19fb6d95937e8da5f" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" dependencies = [ "derive_arbitrary", ] @@ -4289,16 +4403,6 @@ dependencies = [ "regex-automata", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "bumpalo" version = "3.11.0" @@ -4321,7 +4425,7 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" name = "bytecode-verifier-libfuzzer" version = "0.0.0" dependencies = [ - "arbitrary 1.1.7", + "arbitrary 1.3.0", "libfuzzer-sys 0.4.6", "move-binary-format", "move-bytecode-verifier", @@ -4839,7 +4943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e4b6aa369f41f5faa04bb80c9b1f4216ea81646ed6124d76ba5c49a7aafd9cd" dependencies = [ "cookie", - "idna", + "idna 0.2.3", "log", "publicsuffix", "serde 1.0.149", @@ -5256,6 +5360,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "dearbitrary" +version = "1.2.0" +source = "git+https://github.com/otter-sec/dearbitrary#08de30e99c6c6b9a3d3f4959f22bb245faa8da8b" +dependencies = [ + "derive_dearbitrary", +] + [[package]] name = "debug-ignore" version = "1.0.3" @@ -5284,9 +5396,19 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.6" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "226ad66541d865d7a7173ad6a9e691c33fdb910ac723f4bc734b3e5294a1f931" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" +dependencies = [ + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", +] + +[[package]] +name = "derive_dearbitrary" +version = "1.2.0" +source = "git+https://github.com/otter-sec/dearbitrary#08de30e99c6c6b9a3d3f4959f22bb245faa8da8b" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", @@ -5974,11 +6096,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ - "matches", "percent-encoding", ] @@ -5990,9 +6111,9 @@ checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" [[package]] name = "fs_extra" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "funty" @@ -6116,6 +6237,28 @@ dependencies = [ "slab", ] +[[package]] +name = "fuzzer" +version = "0.1.0" + +[[package]] +name = "fuzzer-fuzz" +version = "0.0.0" +dependencies = [ + "aptos-consensus", + "aptos-consensus-types", + "aptos-types", + "arbitrary 1.3.0", + "bcs 0.1.4", + "libfuzzer-sys 0.4.6", + "move-binary-format", + "move-bytecode-verifier", + "move-core-types", + "move-vm-runtime", + "move-vm-test-utils", + "move-vm-types", +] + [[package]] name = "gcc" version = "0.3.55" @@ -6285,7 +6428,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ - "aho-corasick", + "aho-corasick 0.7.18", "bstr", "fnv", "log", @@ -6607,9 +6750,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "httpmock" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c159c4fc205e6c1a9b325cb7ec135d13b5f47188ce175dabb76ec847f331d9bd" +checksum = "c6b56b6265f15908780cbee987912c1e98dbca675361f748291605a8a3a1df09" dependencies = [ "assert-json-diff", "async-object-pool", @@ -6739,6 +6882,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "ignore" version = "0.4.18" @@ -7259,7 +7412,7 @@ dependencies = [ "petgraph 0.6.2", "pico-args", "regex", - "regex-syntax", + "regex-syntax 0.6.27", "string_cache", "term", "tiny-keccak", @@ -7400,7 +7553,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beb09950ae85a0a94b27676cccf37da5ff13f27076aa1adbc6545dd0d0e1bd4e" dependencies = [ - "arbitrary 1.1.7", + "arbitrary 1.3.0", "cc", "once_cell", ] @@ -7429,6 +7582,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "libm" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" + [[package]] name = "libnghttp2-sys" version = "0.1.7+1.45.0" @@ -7968,7 +8127,8 @@ name = "move-binary-format" version = "0.0.3" dependencies = [ "anyhow", - "arbitrary 1.1.7", + "arbitrary 1.3.0", + "dearbitrary", "indexmap", "move-core-types", "once_cell", @@ -8145,8 +8305,9 @@ name = "move-core-types" version = "0.0.4" dependencies = [ "anyhow", - "arbitrary 1.1.7", + "arbitrary 1.3.0", "bcs 0.1.4", + "dearbitrary", "ethnum", "hex", "num", @@ -8769,9 +8930,9 @@ dependencies = [ [[package]] name = "multer" -version = "2.0.3" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a30ba6d97eb198c5e8a35d67d5779d6680cca35652a60ee90fc23dc431d4fde8" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" dependencies = [ "bytes", "encoding_rs", @@ -8786,24 +8947,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error 1.2.3", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", -] - [[package]] name = "named-lock" version = "0.2.0" @@ -9038,6 +9181,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -9082,9 +9226,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -9392,9 +9536,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" @@ -9958,22 +10102,22 @@ dependencies = [ [[package]] name = "proptest" -version = "1.0.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5" +checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", "bitflags 1.3.2", "byteorder", "lazy_static 1.4.0", "num-traits 0.2.15", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.6.27", "rusty-fork", "tempfile", + "unarray", ] [[package]] @@ -10048,7 +10192,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aeeedb0b429dc462f30ad27ef3de97058b060016f47790c066757be38ef792b4" dependencies = [ - "idna", + "idna 0.2.3", "psl-types", ] @@ -10083,12 +10227,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-xml" version = "0.22.0" @@ -10266,10 +10404,11 @@ dependencies = [ [[package]] name = "random_word" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b2bb830d03b36582fd6723a57d2451b9db74574d21c34db9d7122c96b24fa0" +checksum = "1d0f7171155590e912ab907550240a5764c665388ab0a1e46d783a493e816ff3" dependencies = [ + "once_cell", "rand 0.8.5", ] @@ -10380,13 +10519,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.2", "memchr", - "regex-syntax", + "regex-syntax 0.7.2", ] [[package]] @@ -10395,7 +10534,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.27", ] [[package]] @@ -10404,6 +10543,12 @@ version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +[[package]] +name = "regex-syntax" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -10735,7 +10880,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] @@ -10746,12 +10891,6 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -11107,6 +11246,17 @@ dependencies = [ "digest 0.10.5", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", +] + [[package]] name = "sha1_smol" version = "1.0.0" @@ -11906,9 +12056,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.2" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "bytes", @@ -11922,7 +12072,7 @@ dependencies = [ "socket2", "tokio-macros", "tracing", - "winapi 0.3.9", + "windows-sys 0.45.0", ] [[package]] @@ -12015,9 +12165,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.17.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" dependencies = [ "futures-util", "log", @@ -12371,9 +12521,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.17.3" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ "base64 0.13.0", "byteorder", @@ -12382,21 +12532,12 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "sha-1", + "sha1", "thiserror", "url", "utf-8", ] -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - [[package]] name = "typed-arena" version = "2.0.2" @@ -12449,6 +12590,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "uncased" version = "0.9.7" @@ -12519,9 +12666,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" @@ -12540,9 +12687,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] @@ -12612,13 +12759,12 @@ dependencies = [ [[package]] name = "url" -version = "2.2.2" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.4.0", "percent-encoding", "serde 1.0.149", ] @@ -12737,9 +12883,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" +checksum = "ba431ef570df1287f7f8b07e376491ad54f84d26ac473489427231e1718e1f69" dependencies = [ "bytes", "futures-channel", @@ -12750,10 +12896,10 @@ dependencies = [ "log", "mime", "mime_guess", - "multipart", + "multer", "percent-encoding", "pin-project", - "rustls-pemfile 0.2.1", + "rustls-pemfile 1.0.1", "scoped-tls", "serde 1.0.149", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 78f5fb1ee19cd..ba32a6fad3c61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,15 +93,18 @@ members = [ "ecosystem/indexer-grpc/indexer-grpc-data-service", "ecosystem/indexer-grpc/indexer-grpc-file-store", "ecosystem/indexer-grpc/indexer-grpc-fullnode", + "ecosystem/indexer-grpc/indexer-grpc-integration-tests", "ecosystem/indexer-grpc/indexer-grpc-parser", "ecosystem/indexer-grpc/indexer-grpc-post-processor", - "ecosystem/indexer-grpc/indexer-grpc-server-framework", + "ecosystem/indexer-grpc/indexer-grpc-server-framework", "ecosystem/indexer-grpc/indexer-grpc-utils", "ecosystem/node-checker", "ecosystem/node-checker/fn-check-client", + "execution/block-partitioner", "execution/db-bootstrapper", "execution/executor", "execution/executor-benchmark", + "execution/executor-service", "execution/executor-test-helpers", "execution/executor-types", "mempool", @@ -146,6 +149,8 @@ members = [ "testsuite/dos/sender", "testsuite/forge", "testsuite/forge-cli", + "testsuite/fuzzer", + "testsuite/fuzzer/fuzz", "testsuite/generate-format", "testsuite/module-publish", "testsuite/smoke-test", @@ -271,6 +276,8 @@ aptos-debugger = { path = "aptos-move/aptos-debugger" } aptos-event-notifications = { path = "state-sync/inter-component/event-notifications" } aptos-executable-store = { path = "storage/executable-store" } aptos-executor = { path = "execution/executor" } +aptos-block-partitioner = { path = "execution/block-partitioner" } +aptos-executor-service = { path = "execution/executor-service" } aptos-executor-test-helpers = { path = "execution/executor-test-helpers" } aptos-executor-types = { path = "execution/executor-types" } aptos-faucet-cli = { path = "crates/aptos-faucet/cli" } @@ -281,6 +288,7 @@ aptos-fallible = { path = "crates/fallible" } aptos-forge = { path = "testsuite/forge" } aptos-framework = { path = "aptos-move/framework" } aptos-fuzzer = { path = "testsuite/aptos-fuzzer" } +fuzzer = { path = "testsuite/fuzzer" } aptos-gas = { path = "aptos-move/aptos-gas" } aptos-gas-algebra-ext = { path = "aptos-move/gas-algebra-ext" } aptos-gas-profiling = { path = "aptos-move/aptos-gas-profiling" } @@ -424,7 +432,13 @@ dashmap = "5.2.0" datatest-stable = "0.1.1" debug-ignore = { version = "1.0.3", features = ["serde"] } derivative = "2.2.0" -diesel = { version = "2.1.0", features = ["chrono", "postgres", "r2d2", "numeric", "serde_json"] } +diesel = { version = "2.1.0", features = [ + "chrono", + "postgres", + "r2d2", + "numeric", + "serde_json", +] } diesel_migrations = { version = "2.1.0", features = ["postgres"] } digest = "0.9.0" dir-diff = "0.3.2" @@ -517,7 +531,12 @@ rayon = "1.5.2" redis = { version = "0.22.3", features = ["tokio-comp", "script"] } redis-test = { version = "0.1.1", features = ["aio"] } regex = "1.5.5" -reqwest = { version = "0.11.11", features = ["blocking", "cookies", "json", "stream"] } +reqwest = { version = "0.11.11", features = [ + "blocking", + "cookies", + "json", + "stream", +] } reqwest-middleware = "0.2.0" reqwest-retry = "0.2.1" ring = { version = "0.16.20", features = ["std"] } @@ -613,9 +632,13 @@ move-resource-viewer = { path = "third_party/move/tools/move-resource-viewer" } move-symbol-pool = { path = "third_party/move/move-symbol-pool" } move-table-extension = { path = "third_party/move/extensions/move-table-extension" } move-transactional-test-runner = { path = "third_party/move/testing-infra/transactional-test-runner" } -move-unit-test = { path = "third_party/move/tools/move-unit-test", features = ["table-extension"] } +move-unit-test = { path = "third_party/move/tools/move-unit-test", features = [ + "table-extension", +] } move-vm-runtime = { path = "third_party/move/move-vm/runtime" } -move-vm-test-utils = { path = "third_party/move/move-vm/test-utils", features = ["table-extension"] } +move-vm-test-utils = { path = "third_party/move/move-vm/test-utils", features = [ + "table-extension", +] } move-vm-types = { path = "third_party/move/move-vm/types" } [profile.release] diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json index f391eb2756dfe..43518fccbf8f6 100644 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json +++ b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json @@ -114,7 +114,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json index d2034bc4b4f8b..9952e5d835109 100644 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json +++ b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json @@ -119,7 +119,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -397,7 +397,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -675,7 +675,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -953,7 +953,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -1231,7 +1231,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -1509,7 +1509,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -1787,7 +1787,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", @@ -2065,7 +2065,7 @@ "state_change_hash": "", "event_root_hash": "", "state_checkpoint_hash": null, - "gas_used": "16", + "gas_used": "6", "success": true, "vm_status": "Executed successfully", "accumulator_root_hash": "", diff --git a/api/src/tests/converter_test.rs b/api/src/tests/converter_test.rs index 59c710780b14c..5b14f866162d9 100644 --- a/api/src/tests/converter_test.rs +++ b/api/src/tests/converter_test.rs @@ -57,8 +57,8 @@ async fn test_value_conversion() { ); } -fn assert_value_conversion<'r, R: MoveResolverExt, V: Serialize>( - converter: &MoveConverter<'r, R>, +fn assert_value_conversion( + converter: &MoveConverter<'_, R>, json_move_type: &str, json_value: V, expected_vm_value: VmMoveValue, @@ -76,8 +76,8 @@ fn assert_value_conversion<'r, R: MoveResolverExt, V: Serialize>( assert_eq!(json_value_back, json!(json_value)); } -fn assert_value_conversion_bytes<'r, R: MoveResolverExt>( - converter: &MoveConverter<'r, R>, +fn assert_value_conversion_bytes( + converter: &MoveConverter<'_, R>, json_move_type: &str, vm_bytes: &[u8], ) { diff --git a/api/test-context/src/test_context.rs b/api/test-context/src/test_context.rs index 49accba9b661b..3e91b09d4a0a8 100644 --- a/api/test-context/src/test_context.rs +++ b/api/test-context/src/test_context.rs @@ -604,7 +604,7 @@ impl TestContext { let parent_id = self.executor.committed_block_id(); let result = self .executor - .execute_block((metadata.id(), txns.clone()), parent_id) + .execute_block((metadata.id(), txns.clone()).into(), parent_id, None) .unwrap(); let mut compute_status = result.compute_status().clone(); assert_eq!(compute_status.len(), txns.len(), "{:?}", result); diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index 49307836c9563..33e42902f161d 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -80,7 +80,7 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { .collect() } - pub fn try_into_resource<'b>(&self, typ: &StructTag, bytes: &'b [u8]) -> Result { + pub fn try_into_resource(&self, typ: &StructTag, bytes: &'_ [u8]) -> Result { self.inner.view_resource(typ, bytes)?.try_into() } @@ -101,10 +101,10 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { .collect::>>() } - pub fn move_struct_fields<'b>( + pub fn move_struct_fields( &self, typ: &StructTag, - bytes: &'b [u8], + bytes: &'_ [u8], ) -> Result> { self.inner.move_struct_fields(typ, bytes) } diff --git a/api/types/src/lib.rs b/api/types/src/lib.rs index 4d29db8f80fdb..b5785d92c7fb9 100644 --- a/api/types/src/lib.rs +++ b/api/types/src/lib.rs @@ -2,6 +2,8 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::match_result_ok)] // Required to overcome the limitations of deriving Union + mod account; mod address; mod block; diff --git a/aptos-move/aptos-debugger/src/lib.rs b/aptos-move/aptos-debugger/src/lib.rs index eed8ee3b9b064..bf34e3043917d 100644 --- a/aptos-move/aptos-debugger/src/lib.rs +++ b/aptos-move/aptos-debugger/src/lib.rs @@ -58,7 +58,7 @@ impl AptosDebugger { txns: Vec, ) -> Result> { let state_view = DebuggerStateView::new(self.debugger.clone(), version); - AptosVM::execute_block(txns, &state_view) + AptosVM::execute_block(txns, &state_view, None) .map_err(|err| format_err!("Unexpected VM Error: {:?}", err)) } diff --git a/aptos-move/aptos-gas-profiling/src/profiler.rs b/aptos-move/aptos-gas-profiling/src/profiler.rs index c682cd882ddb6..92ea37e82e4f0 100644 --- a/aptos-move/aptos-gas-profiling/src/profiler.rs +++ b/aptos-move/aptos-gas-profiling/src/profiler.rs @@ -428,11 +428,13 @@ where &mut self, addr: AccountAddress, ty: impl TypeView, - loaded: Option<(NumBytes, impl ValueView)>, + val: Option, + bytes_loaded: NumBytes, ) -> PartialVMResult<()> { let ty_tag = ty.to_type_tag(); - let (cost, res) = self.delegate_charge(|base| base.charge_load_resource(addr, ty, loaded)); + let (cost, res) = + self.delegate_charge(|base| base.charge_load_resource(addr, ty, val, bytes_loaded)); self.active_event_stream() .push(ExecutionGasEvent::LoadResource { @@ -476,6 +478,14 @@ where fn storage_discount_for_events(&self, total_cost: Fee) -> Fee; fn storage_fee_for_transaction_storage(&self, txn_size: NumBytes) -> Fee; + + fn execution_gas_used(&self) -> Gas; + + fn io_gas_used(&self) -> Gas; + + fn storage_fee_used_in_gas_units(&self) -> Gas; + + fn storage_fee_used(&self) -> Fee; } delegate_mut! { diff --git a/aptos-move/aptos-gas/src/gas_meter.rs b/aptos-move/aptos-gas/src/gas_meter.rs index 4b249d34ab338..93438c868456d 100644 --- a/aptos-move/aptos-gas/src/gas_meter.rs +++ b/aptos-move/aptos-gas/src/gas_meter.rs @@ -33,6 +33,10 @@ use move_vm_types::{ use std::collections::BTreeMap; // Change log: +// - V10 +// - Storage gas charges (excluding "storage fees") stop respecting the storage gas curves +// - V9 +// - Accurate tracking of the cost of loading resource groups // - V8 // - Added BLS12-381 operations. // - V7 @@ -52,14 +56,14 @@ use std::collections::BTreeMap; // - Storage charges: // - Distinguish between new and existing resources // - One item write comes with 1K free bytes -// - abort with STORATGE_WRITE_LIMIT_REACHED if WriteOps or Events are too large +// - abort with STORAGE_WRITE_LIMIT_REACHED if WriteOps or Events are too large // - V2 // - Table // - Fix the gas formula for loading resources so that they are consistent with other // global operations. // - V1 // - TBA -pub const LATEST_GAS_FEATURE_VERSION: u64 = 8; +pub const LATEST_GAS_FEATURE_VERSION: u64 = 10; pub(crate) const EXECUTION_GAS_MULTIPLIER: u64 = 20; @@ -330,6 +334,18 @@ pub trait AptosGasMeter: MoveGasMeter { Ok(()) } + + /// Return the total gas used for execution. + fn execution_gas_used(&self) -> Gas; + + /// Return the total gas used for io. + fn io_gas_used(&self) -> Gas; + + /// Return the total gas used for storage. + fn storage_fee_used_in_gas_units(&self) -> Gas; + + /// Return the total fee used for storage. + fn storage_fee_used(&self) -> Fee; } /// The official gas meter used inside the Aptos VM. @@ -344,6 +360,9 @@ pub struct StandardGasMeter { execution_gas_used: InternalGas, io_gas_used: InternalGas, + // The gas consumed by the storage operations. + storage_fee_in_internal_units: InternalGas, + // The storage fee consumed by the storage operations. storage_fee_used: Fee, should_leak_memory_for_native: bool, @@ -366,6 +385,7 @@ impl StandardGasMeter { balance, execution_gas_used: 0.into(), io_gas_used: 0.into(), + storage_fee_in_internal_units: 0.into(), storage_fee_used: 0.into(), memory_quota, should_leak_memory_for_native: false, @@ -489,11 +509,12 @@ impl MoveGasMeter for StandardGasMeter { &mut self, _addr: AccountAddress, _ty: impl TypeView, - loaded: Option<(NumBytes, impl ValueView)>, + val: Option, + bytes_loaded: NumBytes, ) -> PartialVMResult<()> { if self.feature_version != 0 { // TODO(Gas): Rewrite this in a better way. - if let Some((_, val)) = &loaded { + if let Some(val) = &val { self.use_heap_memory( self.gas_params .misc @@ -502,10 +523,13 @@ impl MoveGasMeter for StandardGasMeter { )?; } } + if self.feature_version <= 8 && val.is_none() && bytes_loaded != 0.into() { + return Err(PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message("in legacy versions, number of bytes loaded must be zero when the resource does not exist ".to_string())); + } let cost = self .storage_gas_params .pricing - .calculate_read_gas(loaded.map(|(num_bytes, _)| num_bytes)); + .calculate_read_gas(val.is_some(), bytes_loaded); self.charge_io(cost) } @@ -992,6 +1016,7 @@ impl AptosGasMeter for StandardGasMeter { self.charge(gas_consumed_internal)?; + self.storage_fee_in_internal_units += gas_consumed_internal; self.storage_fee_used += amount; if self.feature_version >= 7 && self.storage_fee_used > self.gas_params.txn.max_storage_fee { @@ -1028,4 +1053,23 @@ impl AptosGasMeter for StandardGasMeter { self.charge_execution(cost) .map_err(|e| e.finish(Location::Undefined)) } + + fn execution_gas_used(&self) -> Gas { + self.execution_gas_used + .to_unit_round_up_with_params(&self.gas_params.txn) + } + + fn io_gas_used(&self) -> Gas { + self.io_gas_used + .to_unit_round_up_with_params(&self.gas_params.txn) + } + + fn storage_fee_used_in_gas_units(&self) -> Gas { + self.storage_fee_in_internal_units + .to_unit_round_up_with_params(&self.gas_params.txn) + } + + fn storage_fee_used(&self) -> Fee { + self.storage_fee_used + } } diff --git a/aptos-move/aptos-gas/src/lib.rs b/aptos-move/aptos-gas/src/lib.rs index ddd7fd868146f..e2a091c947d38 100644 --- a/aptos-move/aptos-gas/src/lib.rs +++ b/aptos-move/aptos-gas/src/lib.rs @@ -45,4 +45,6 @@ pub use move_core_types::gas_algebra::{ Arg, Byte, GasQuantity, InternalGas, InternalGasPerArg, InternalGasPerByte, InternalGasUnit, NumArgs, NumBytes, UnitDiv, }; -pub use transaction::{ChangeSetConfigs, StorageGasParameters, TransactionGasParameters}; +pub use transaction::{ + ChangeSetConfigs, StorageGasParameters, StoragePricing, TransactionGasParameters, +}; diff --git a/aptos-move/aptos-gas/src/params.rs b/aptos-move/aptos-gas/src/params.rs index 5aa7079b82a09..a394ba691fc7d 100644 --- a/aptos-move/aptos-gas/src/params.rs +++ b/aptos-move/aptos-gas/src/params.rs @@ -9,6 +9,7 @@ macro_rules! define_gas_parameters_extract_key_at_version { ({ $($ver: pat => $key: literal),+ }, $cur_ver: expr) => { match $cur_ver { $($ver => Some($key)),+, + #[allow(unreachable_patterns)] _ => None, } } diff --git a/aptos-move/aptos-gas/src/transaction/mod.rs b/aptos-move/aptos-gas/src/transaction/mod.rs index 99db9c8d8944e..df8f71898ef3f 100644 --- a/aptos-move/aptos-gas/src/transaction/mod.rs +++ b/aptos-move/aptos-gas/src/transaction/mod.rs @@ -18,7 +18,7 @@ use move_core_types::gas_algebra::{ mod storage; -pub use storage::{ChangeSetConfigs, StorageGasParameters}; +pub use storage::{ChangeSetConfigs, StorageGasParameters, StoragePricing}; const GAS_SCALING_FACTOR: u64 = 1_000_000; @@ -79,18 +79,22 @@ crate::params::define_gas_parameters!( GAS_SCALING_FACTOR ], // Gas Parameters for reading data from storage. - [load_data_base: InternalGas, "load_data.base", 16_000], [ - load_data_per_byte: InternalGasPerByte, - "load_data.per_byte", - 1_000 + storage_io_per_state_slot_read: InternalGasPerArg, + { 0..=9 => "load_data.base", 10.. => "storage_io_per_state_slot_read"}, + 300_000, + ], + [ + storage_io_per_state_byte_read: InternalGasPerByte, + { 0..=9 => "load_data.per_byte", 10.. => "storage_io_per_state_byte_read"}, + 300, ], [load_data_failure: InternalGas, "load_data.failure", 0], // Gas parameters for writing data to storage. [ - write_data_per_op: InternalGasPerArg, - "write_data.per_op", - 160_000 + storage_io_per_state_slot_write: InternalGasPerArg, + { 0..=9 => "write_data.per_op", 10.. => "storage_io_per_state_slot_write"}, + 300_000, ], [ write_data_per_new_item: InternalGasPerArg, @@ -98,9 +102,9 @@ crate::params::define_gas_parameters!( 1_280_000 ], [ - write_data_per_byte_in_key: InternalGasPerByte, - "write_data.per_byte_in_key", - 10_000 + storage_io_per_state_byte_write: InternalGasPerByte, + { 0..=9 => "write_data.per_byte_in_key", 10.. => "storage_io_per_state_byte_write"}, + 5_000 ], [ write_data_per_byte_in_val: InternalGasPerByte, diff --git a/aptos-move/aptos-gas/src/transaction/storage.rs b/aptos-move/aptos-gas/src/transaction/storage.rs index fbe34fe9515e5..00197e0bb111d 100644 --- a/aptos-move/aptos-gas/src/transaction/storage.rs +++ b/aptos-move/aptos-gas/src/transaction/storage.rs @@ -3,7 +3,9 @@ use crate::{AptosGasParameters, LATEST_GAS_FEATURE_VERSION}; use aptos_types::{ - on_chain_config::StorageGasSchedule, state_store::state_key::StateKey, write_set::WriteOp, + on_chain_config::{ConfigStorage, OnChainConfig, StorageGasSchedule}, + state_store::state_key::StateKey, + write_set::WriteOp, }; use aptos_vm_types::{change_set::VMChangeSet, check_change_set::CheckChangeSet}; use move_core_types::{ @@ -26,12 +28,12 @@ pub struct StoragePricingV1 { impl StoragePricingV1 { fn new(gas_params: &AptosGasParameters) -> Self { Self { - write_data_per_op: gas_params.txn.write_data_per_op, + write_data_per_op: gas_params.txn.storage_io_per_state_slot_write, write_data_per_new_item: gas_params.txn.write_data_per_new_item, - write_data_per_byte_in_key: gas_params.txn.write_data_per_byte_in_key, + write_data_per_byte_in_key: gas_params.txn.storage_io_per_state_byte_write, write_data_per_byte_in_val: gas_params.txn.write_data_per_byte_in_val, - load_data_base: gas_params.txn.load_data_base, - load_data_per_byte: gas_params.txn.load_data_per_byte, + load_data_base: gas_params.txn.storage_io_per_state_slot_read * NumArgs::new(1), + load_data_per_byte: gas_params.txn.storage_io_per_state_byte_read, load_data_failure: gas_params.txn.load_data_failure, } } @@ -89,33 +91,17 @@ pub struct StoragePricingV2 { impl StoragePricingV2 { pub fn zeros() -> Self { - Self::new( - LATEST_GAS_FEATURE_VERSION, - &StorageGasSchedule::zeros(), - &AptosGasParameters::zeros(), - ) + Self::new_without_storage_curves(LATEST_GAS_FEATURE_VERSION, &AptosGasParameters::zeros()) } - pub fn new( + pub fn new_with_storage_curves( feature_version: u64, storage_gas_schedule: &StorageGasSchedule, gas_params: &AptosGasParameters, ) -> Self { - assert!(feature_version > 0); - - let free_write_bytes_quota = if feature_version >= 5 { - gas_params.txn.free_write_bytes_quota - } else if feature_version >= 3 { - 1024.into() - } else { - // for feature_version 2 and below `free_write_bytes_quota` won't be used anyway - // but let's set it properly to reduce confusion. - 0.into() - }; - Self { feature_version, - free_write_bytes_quota, + free_write_bytes_quota: Self::get_free_write_bytes_quota(feature_version, gas_params), per_item_read: storage_gas_schedule.per_item_read.into(), per_item_create: storage_gas_schedule.per_item_create.into(), per_item_write: storage_gas_schedule.per_item_write.into(), @@ -125,6 +111,34 @@ impl StoragePricingV2 { } } + pub fn new_without_storage_curves( + feature_version: u64, + gas_params: &AptosGasParameters, + ) -> Self { + Self { + feature_version, + free_write_bytes_quota: Self::get_free_write_bytes_quota(feature_version, gas_params), + per_item_read: gas_params.txn.storage_io_per_state_slot_read, + per_item_create: gas_params.txn.storage_io_per_state_slot_write, + per_item_write: gas_params.txn.storage_io_per_state_slot_write, + per_byte_read: gas_params.txn.storage_io_per_state_byte_read, + per_byte_create: gas_params.txn.storage_io_per_state_byte_write, + per_byte_write: gas_params.txn.storage_io_per_state_byte_write, + } + } + + fn get_free_write_bytes_quota( + feature_version: u64, + gas_params: &AptosGasParameters, + ) -> NumBytes { + match feature_version { + 0 => unreachable!("PricingV2 not applicable for feature version 0"), + 1..=2 => 0.into(), + 3..=4 => 1024.into(), + 5.. => gas_params.txn.free_write_bytes_quota, + } + } + fn write_op_size(&self, key: &StateKey, value: &[u8]) -> NumBytes { let value_size = NumBytes::new(value.len() as u64); @@ -143,12 +157,8 @@ impl StoragePricingV2 { } } - fn calculate_read_gas(&self, loaded: Option) -> InternalGas { - self.per_item_read * (NumArgs::from(1)) - + match loaded { - Some(num_bytes) => self.per_byte_read * num_bytes, - None => 0.into(), - } + fn calculate_read_gas(&self, loaded: NumBytes) -> InternalGas { + self.per_item_read * (NumArgs::from(1)) + self.per_byte_read * loaded } fn io_gas_per_write(&self, key: &StateKey, op: &WriteOp) -> InternalGas { @@ -175,12 +185,42 @@ pub enum StoragePricing { } impl StoragePricing { - pub fn calculate_read_gas(&self, loaded: Option) -> InternalGas { + pub fn new( + feature_version: u64, + gas_params: &AptosGasParameters, + config_storage: &impl ConfigStorage, + ) -> StoragePricing { + use StoragePricing::*; + + match feature_version { + 0 => V1(StoragePricingV1::new(gas_params)), + 1..=9 => match StorageGasSchedule::fetch_config(config_storage) { + None => V1(StoragePricingV1::new(gas_params)), + Some(schedule) => V2(StoragePricingV2::new_with_storage_curves( + feature_version, + &schedule, + gas_params, + )), + }, + 10.. => V2(StoragePricingV2::new_without_storage_curves( + feature_version, + gas_params, + )), + } + } + + pub fn calculate_read_gas(&self, resource_exists: bool, bytes_loaded: NumBytes) -> InternalGas { use StoragePricing::*; match self { - V1(v1) => v1.calculate_read_gas(loaded), - V2(v2) => v2.calculate_read_gas(loaded), + V1(v1) => v1.calculate_read_gas( + if resource_exists { + Some(bytes_loaded) + } else { + None + }, + ), + V2(v2) => v2.calculate_read_gas(bytes_loaded), } } @@ -306,27 +346,16 @@ pub struct StorageGasParameters { impl StorageGasParameters { pub fn new( feature_version: u64, - gas_params: Option<&AptosGasParameters>, - storage_gas_schedule: Option<&StorageGasSchedule>, - ) -> Option { - if feature_version == 0 || gas_params.is_none() { - return None; - } - let gas_params = gas_params.unwrap(); - - let pricing = match storage_gas_schedule { - Some(schedule) => { - StoragePricing::V2(StoragePricingV2::new(feature_version, schedule, gas_params)) - }, - None => StoragePricing::V1(StoragePricingV1::new(gas_params)), - }; - + gas_params: &AptosGasParameters, + config_storage: &impl ConfigStorage, + ) -> Self { + let pricing = StoragePricing::new(feature_version, gas_params, config_storage); let change_set_configs = ChangeSetConfigs::new(feature_version, gas_params); - Some(Self { + Self { pricing, change_set_configs, - }) + } } pub fn free_and_unlimited() -> Self { diff --git a/aptos-move/aptos-release-builder/src/components/consensus_config.rs b/aptos-move/aptos-release-builder/src/components/consensus_config.rs index 4d4f6e890978a..d21499e138fd9 100644 --- a/aptos-move/aptos-release-builder/src/components/consensus_config.rs +++ b/aptos-move/aptos-release-builder/src/components/consensus_config.rs @@ -23,7 +23,7 @@ pub fn generate_consensus_upgrade_proposal( &writer, is_testnet, next_execution_hash.clone(), - "aptos_framework::consensus_config", + &["aptos_framework::consensus_config"], |writer| { let consensus_config_blob = bcs::to_bytes(consensus_config).unwrap(); assert!(consensus_config_blob.len() < 65536); diff --git a/aptos-move/aptos-release-builder/src/components/execution_config.rs b/aptos-move/aptos-release-builder/src/components/execution_config.rs index 457d20485e954..4bc5db4364468 100644 --- a/aptos-move/aptos-release-builder/src/components/execution_config.rs +++ b/aptos-move/aptos-release-builder/src/components/execution_config.rs @@ -23,7 +23,7 @@ pub fn generate_execution_config_upgrade_proposal( &writer, is_testnet, next_execution_hash.clone(), - "aptos_framework::execution_config", + &["aptos_framework::execution_config"], |writer| { let execution_config_blob = bcs::to_bytes(execution_config).unwrap(); assert!(execution_config_blob.len() < 65536); diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index 4876fc6977a04..0dbc07b984529 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -91,7 +91,7 @@ pub fn generate_feature_upgrade_proposal( &writer, is_testnet, next_execution_hash.clone(), - "std::features", + &["std::features"], |writer| { emit!(writer, "let enabled_blob: vector = "); generate_features_blob(writer, &enabled); @@ -106,11 +106,13 @@ pub fn generate_feature_upgrade_proposal( writer, "features::change_feature_flags(framework_signer, enabled_blob, disabled_blob);" ); + emitln!(writer, "aptos_governance::reconfigure(framework_signer);"); } else { emitln!( writer, "features::change_feature_flags(&framework_signer, enabled_blob, disabled_blob);" ); + emitln!(writer, "aptos_governance::reconfigure(&framework_signer);"); } }, ); diff --git a/aptos-move/aptos-release-builder/src/components/gas.rs b/aptos-move/aptos-release-builder/src/components/gas.rs index 592b10b53cd02..64a8044c44626 100644 --- a/aptos-move/aptos-release-builder/src/components/gas.rs +++ b/aptos-move/aptos-release-builder/src/components/gas.rs @@ -44,7 +44,7 @@ pub fn generate_gas_upgrade_proposal( &writer, is_testnet, next_execution_hash.clone(), - "aptos_framework::gas_schedule", + &["aptos_framework::gas_schedule"], |writer| { let gas_schedule_blob = bcs::to_bytes(gas_schedule).unwrap(); assert!(gas_schedule_blob.len() < 65536); diff --git a/aptos-move/aptos-release-builder/src/components/transaction_fee.rs b/aptos-move/aptos-release-builder/src/components/transaction_fee.rs index 5508dd3bf0437..9b4b17dce06c7 100644 --- a/aptos-move/aptos-release-builder/src/components/transaction_fee.rs +++ b/aptos-move/aptos-release-builder/src/components/transaction_fee.rs @@ -19,7 +19,7 @@ pub fn generate_fee_distribution_proposal( &writer, is_testnet, next_execution_hash, - "aptos_framework::transaction_fee", + &["aptos_framework::transaction_fee"], |writer| { emitln!( writer, diff --git a/aptos-move/aptos-release-builder/src/components/version.rs b/aptos-move/aptos-release-builder/src/components/version.rs index 75ea9440515c0..6e88d5174e279 100644 --- a/aptos-move/aptos-release-builder/src/components/version.rs +++ b/aptos-move/aptos-release-builder/src/components/version.rs @@ -19,7 +19,7 @@ pub fn generate_version_upgrade_proposal( &writer, is_testnet, next_execution_hash.clone(), - "aptos_framework::version", + &["aptos_framework::version"], |writer| { if is_testnet && next_execution_hash.is_empty() { emitln!( diff --git a/aptos-move/aptos-release-builder/src/utils.rs b/aptos-move/aptos-release-builder/src/utils.rs index 195140e02688c..2b49469918a0b 100644 --- a/aptos-move/aptos-release-builder/src/utils.rs +++ b/aptos-move/aptos-release-builder/src/utils.rs @@ -55,7 +55,7 @@ pub(crate) fn generate_next_execution_hash_blob( pub(crate) fn generate_governance_proposal_header( writer: &CodeWriter, - deps_name: &str, + deps_names: &[&str], is_multi_step: bool, next_execution_hash: Vec, ) { @@ -63,7 +63,9 @@ pub(crate) fn generate_governance_proposal_header( writer.indent(); emitln!(writer, "use aptos_framework::aptos_governance;"); - emitln!(writer, "use {};", deps_name); + for deps_name in deps_names { + emitln!(writer, "use {};", deps_name); + } if next_execution_hash == "vector::empty()".as_bytes() { emitln!(writer, "use std::vector;"); } @@ -83,12 +85,14 @@ pub(crate) fn generate_governance_proposal_header( } } -pub(crate) fn generate_testnet_header(writer: &CodeWriter, deps_name: &str) { +pub(crate) fn generate_testnet_header(writer: &CodeWriter, deps_names: &[&str]) { emitln!(writer, "script {"); writer.indent(); emitln!(writer, "use aptos_framework::aptos_governance;"); - emitln!(writer, "use {};", deps_name); + for deps_name in deps_names { + emitln!(writer, "use {};", deps_name); + } emitln!(writer); emitln!(writer, "fun main(core_resources: &signer) {"); @@ -116,7 +120,7 @@ pub(crate) fn generate_governance_proposal( writer: &CodeWriter, is_testnet: bool, next_execution_hash: Vec, - deps_name: &str, + deps_names: &[&str], body: F, ) -> String where @@ -124,17 +128,17 @@ where { if next_execution_hash.is_empty() { if is_testnet { - generate_testnet_header(writer, deps_name); + generate_testnet_header(writer, deps_names); } else { generate_governance_proposal_header( writer, - deps_name, + deps_names, false, "".to_owned().into_bytes(), ); } } else { - generate_governance_proposal_header(writer, deps_name, true, next_execution_hash); + generate_governance_proposal_header(writer, deps_names, true, next_execution_hash); }; body(writer); diff --git a/aptos-move/aptos-transaction-benchmarks/Cargo.toml b/aptos-move/aptos-transaction-benchmarks/Cargo.toml index 26553f2b7f7e3..c12858ab2218f 100644 --- a/aptos-move/aptos-transaction-benchmarks/Cargo.toml +++ b/aptos-move/aptos-transaction-benchmarks/Cargo.toml @@ -14,7 +14,9 @@ rust-version = { workspace = true } [dependencies] aptos-bitvec = { workspace = true } +aptos-block-partitioner = { workspace = true } aptos-crypto = { workspace = true } +aptos-executor-service = { workspace = true } aptos-gas = { workspace = true, features = ["testing"] } aptos-language-e2e-tests = { workspace = true } aptos-logger = { workspace = true } @@ -26,7 +28,9 @@ clap = { workspace = true } criterion = { workspace = true, features = ["html_reports"] } criterion-cpu-time = { workspace = true } num_cpus = { workspace = true } +once_cell = { workspace = true } proptest = { workspace = true } +rayon = { workspace = true } [[bench]] name = "transaction_benches" diff --git a/aptos-move/aptos-transaction-benchmarks/src/main.rs b/aptos-move/aptos-transaction-benchmarks/src/main.rs index e710dd0c604d5..5c3dbde34ecc3 100755 --- a/aptos-move/aptos-transaction-benchmarks/src/main.rs +++ b/aptos-move/aptos-transaction-benchmarks/src/main.rs @@ -9,7 +9,10 @@ use aptos_push_metrics::MetricsPusher; use aptos_transaction_benchmarks::transactions::TransactionBencher; use clap::{Parser, Subcommand}; use proptest::prelude::*; -use std::time::{SystemTime, UNIX_EPOCH}; +use std::{ + net::SocketAddr, + time::{SystemTime, UNIX_EPOCH}, +}; /// This is needed for filters on the Grafana dashboard working as its used to populate the filter /// variables. @@ -49,7 +52,7 @@ struct ParamSweepOpt { pub num_runs: usize, #[clap(long)] - pub maybe_gas_limit: Option, + pub maybe_block_gas_limit: Option, } #[derive(Debug, Parser)] @@ -72,11 +75,14 @@ struct ExecuteOpt { #[clap(long, default_value = "1")] pub num_executor_shards: usize, + #[clap(long, min_values = 1, conflicts_with = "num_executor_shards")] + pub remote_executor_addresses: Option>, + #[clap(long, default_value = "true")] pub no_conflict_txns: bool, #[clap(long)] - pub maybe_gas_limit: Option, + pub maybe_block_gas_limit: Option, } fn param_sweep(opt: ParamSweepOpt) { @@ -91,7 +97,7 @@ fn param_sweep(opt: ParamSweepOpt) { let run_parallel = !opt.skip_parallel; let run_sequential = !opt.skip_sequential; - let maybe_gas_limit = opt.maybe_gas_limit; + let maybe_block_gas_limit = opt.maybe_block_gas_limit; assert!( run_sequential || run_parallel, @@ -109,8 +115,9 @@ fn param_sweep(opt: ParamSweepOpt) { opt.num_runs, 1, concurrency_level, + None, false, - maybe_gas_limit, + maybe_block_gas_limit, ); par_tps.sort(); seq_tps.sort(); @@ -170,8 +177,9 @@ fn execute(opt: ExecuteOpt) { opt.num_blocks, opt.num_executor_shards, opt.concurrency_level_per_shard, + opt.remote_executor_addresses, opt.no_conflict_txns, - opt.maybe_gas_limit, + opt.maybe_block_gas_limit, ); let sum: usize = par_tps.iter().sum(); diff --git a/aptos-move/aptos-transaction-benchmarks/src/transactions.rs b/aptos-move/aptos-transaction-benchmarks/src/transactions.rs index fb6d65617cf72..57eb02485b856 100644 --- a/aptos-move/aptos-transaction-benchmarks/src/transactions.rs +++ b/aptos-move/aptos-transaction-benchmarks/src/transactions.rs @@ -3,7 +3,9 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_bitvec::BitVec; +use aptos_block_partitioner::sharded_block_partitioner::ShardedBlockPartitioner; use aptos_crypto::HashValue; +use aptos_executor_service::remote_executor_client::RemoteExecutorClient; use aptos_language_e2e_tests::{ account_universe::{AUTransactionGen, AccountPickStyle, AccountUniverse, AccountUniverseGen}, data_store::FakeDataStore, @@ -11,18 +13,34 @@ use aptos_language_e2e_tests::{ gas_costs::TXN_RESERVED, }; use aptos_types::{ + block_executor::partitioner::BlockExecutorTransactions, block_metadata::BlockMetadata, on_chain_config::{OnChainConfig, ValidatorSet}, - transaction::Transaction, + transaction::{analyzed_transaction::AnalyzedTransaction, Transaction}, +}; +use aptos_vm::{ + block_executor::BlockAptosVM, + data_cache::AsMoveResolver, + sharded_block_executor::{block_executor_client::LocalExecutorClient, ShardedBlockExecutor}, }; -use aptos_vm::{data_cache::AsMoveResolver, sharded_block_executor::ShardedBlockExecutor}; use criterion::{measurement::Measurement, BatchSize, Bencher}; +use once_cell::sync::Lazy; use proptest::{ collection::vec, strategy::{Strategy, ValueTree}, test_runner::TestRunner, }; -use std::{sync::Arc, time::Instant}; +use std::{net::SocketAddr, sync::Arc, time::Instant}; + +pub static RAYON_EXEC_POOL: Lazy> = Lazy::new(|| { + Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_cpus::get()) + .thread_name(|index| format!("par_exec_{}", index)) + .build() + .unwrap(), + ) +}); /// Benchmarking support for transactions. #[derive(Clone)] @@ -72,8 +90,8 @@ where self.num_accounts, self.num_transactions, 1, - AccountPickStyle::Unlimited, None, + AccountPickStyle::Unlimited, ) }, |state| state.execute_sequential(), @@ -91,8 +109,8 @@ where self.num_accounts, self.num_transactions, 1, - AccountPickStyle::Unlimited, None, + AccountPickStyle::Unlimited, ) }, |state| state.execute_parallel(), @@ -112,8 +130,9 @@ where num_runs: usize, num_executor_shards: usize, concurrency_level_per_shard: usize, + remote_executor_addresses: Option>, no_conflict_txn: bool, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> (Vec, Vec) { let mut par_tps = Vec::new(); let mut seq_tps = Vec::new(); @@ -137,8 +156,8 @@ where num_accounts, num_txn, num_executor_shards, + remote_executor_addresses, account_pick_style, - maybe_gas_limit, ); for i in 0..total_runs { @@ -149,6 +168,7 @@ where run_seq, no_conflict_txn, concurrency_level_per_shard, + maybe_block_gas_limit, ); } else { let tps = state.execute_blockstm_benchmark( @@ -156,6 +176,7 @@ where run_seq, no_conflict_txn, concurrency_level_per_shard, + maybe_block_gas_limit, ); par_tps.push(tps.0); seq_tps.push(tps.1); @@ -170,8 +191,8 @@ struct TransactionBenchState { num_transactions: usize, strategy: S, account_universe: AccountUniverse, - parallel_block_executor: Arc>, - sequential_block_executor: Arc>, + parallel_block_executor: Option>>, + block_partitioner: Option, validator_set: ValidatorSet, state_view: Arc, } @@ -187,15 +208,15 @@ where num_accounts: usize, num_transactions: usize, num_executor_shards: usize, + remote_executor_addresses: Option>, account_pick_style: AccountPickStyle, - maybe_gas_limit: Option, ) -> Self { Self::with_universe( strategy, universe_strategy(num_accounts, num_transactions, account_pick_style), num_transactions, num_executor_shards, - maybe_gas_limit, + remote_executor_addresses, ) } @@ -206,7 +227,7 @@ where universe_strategy: impl Strategy, num_transactions: usize, num_executor_shards: usize, - maybe_gas_limit: Option, + remote_executor_addresses: Option>, ) -> Self { let mut runner = TestRunner::default(); let universe_gen = universe_strategy @@ -221,13 +242,26 @@ where let universe = universe_gen.setup_gas_cost_stability(&mut executor); let state_view = Arc::new(executor.get_state_view().clone()); - let parallel_block_executor = Arc::new(ShardedBlockExecutor::new( - num_executor_shards, - None, - maybe_gas_limit, - )); - let sequential_block_executor = - Arc::new(ShardedBlockExecutor::new(1, Some(1), maybe_gas_limit)); + let (parallel_block_executor, block_partitioner) = if num_executor_shards == 1 { + (None, None) + } else { + let parallel_block_executor = + if let Some(remote_executor_addresses) = remote_executor_addresses { + let remote_executor_clients = remote_executor_addresses + .into_iter() + .map(|addr| RemoteExecutorClient::new(addr, 10000)) + .collect::>(); + Arc::new(ShardedBlockExecutor::new(remote_executor_clients)) + } else { + let local_executor_client = + LocalExecutorClient::create_local_clients(num_executor_shards, None); + Arc::new(ShardedBlockExecutor::new(local_executor_client)) + }; + ( + Some(parallel_block_executor), + Some(ShardedBlockPartitioner::new(num_executor_shards)), + ) + }; let validator_set = ValidatorSet::fetch_config( &FakeExecutor::from_head_genesis() @@ -241,7 +275,7 @@ where strategy, account_universe: universe, parallel_block_executor, - sequential_block_executor, + block_partitioner, validator_set, state_view, } @@ -290,10 +324,7 @@ where // The output is ignored here since we're just testing transaction performance, not trying // to assert correctness. let txns = self.gen_transaction(false); - let executor = self.sequential_block_executor; - executor - .execute_block(self.state_view.clone(), txns, 1) - .expect("VM should not fail to start"); + self.execute_benchmark_sequential(txns, None); } /// Executes this state in a single block. @@ -301,27 +332,65 @@ where // The output is ignored here since we're just testing transaction performance, not trying // to assert correctness. let txns = self.gen_transaction(false); - let executor = self.parallel_block_executor.clone(); - executor - .execute_block(self.state_view.clone(), txns, num_cpus::get()) - .expect("VM should not fail to start"); + self.execute_benchmark_parallel(txns, num_cpus::get(), None); } - fn execute_benchmark( + fn execute_benchmark_sequential( + &self, + transactions: Vec, + maybe_block_gas_limit: Option, + ) -> usize { + let block_size = transactions.len(); + let timer = Instant::now(); + BlockAptosVM::execute_block( + Arc::clone(&RAYON_EXEC_POOL), + BlockExecutorTransactions::Unsharded(transactions), + self.state_view.as_ref(), + 1, + maybe_block_gas_limit, + ) + .expect("VM should not fail to start"); + let exec_time = timer.elapsed().as_millis(); + + block_size * 1000 / exec_time as usize + } + + fn execute_benchmark_parallel( &self, transactions: Vec, - block_executor: Arc>, concurrency_level_per_shard: usize, + maybe_block_gas_limit: Option, ) -> usize { let block_size = transactions.len(); let timer = Instant::now(); - block_executor - .execute_block( - self.state_view.clone(), - transactions, + if let Some(parallel_block_executor) = self.parallel_block_executor.as_ref() { + // TODO(skedia) partition in a pipelined way and evaluate how expensive it is to + // parse the txns in a single thread. + let partitioned_block = self.block_partitioner.as_ref().unwrap().partition( + transactions + .into_iter() + .map(|txn| txn.into()) + .collect::>(), + 1, + ); + parallel_block_executor + .execute_block( + self.state_view.clone(), + partitioned_block, + concurrency_level_per_shard, + maybe_block_gas_limit, + ) + .expect("VM should not fail to start"); + } else { + BlockAptosVM::execute_block( + Arc::clone(&RAYON_EXEC_POOL), + BlockExecutorTransactions::Unsharded(transactions), + self.state_view.as_ref(), concurrency_level_per_shard, + maybe_block_gas_limit, ) .expect("VM should not fail to start"); + } let exec_time = timer.elapsed().as_millis(); block_size * 1000 / exec_time as usize @@ -333,14 +402,15 @@ where run_seq: bool, no_conflict_txns: bool, conurrency_level_per_shard: usize, + maybe_block_gas_limit: Option, ) -> (usize, usize) { let transactions = self.gen_transaction(no_conflict_txns); let par_tps = if run_par { println!("Parallel execution starts..."); - let tps = self.execute_benchmark( + let tps = self.execute_benchmark_parallel( transactions.clone(), - self.parallel_block_executor.clone(), conurrency_level_per_shard, + maybe_block_gas_limit, ); println!("Parallel execution finishes, TPS = {}", tps); tps @@ -349,8 +419,7 @@ where }; let seq_tps = if run_seq { println!("Sequential execution starts..."); - let tps = - self.execute_benchmark(transactions, self.sequential_block_executor.clone(), 1); + let tps = self.execute_benchmark_sequential(transactions, maybe_block_gas_limit); println!("Sequential execution finishes, TPS = {}", tps); tps } else { diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs index fb671b281013a..96f880b559fca 100644 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs @@ -468,7 +468,7 @@ impl<'a> AptosTestAdapter<'a> { /// Should error if the transaction ends up being discarded, or having a status other than /// EXECUTED. fn run_transaction(&mut self, txn: Transaction) -> Result { - let mut outputs = AptosVM::execute_block(vec![txn], &self.storage.clone())?; + let mut outputs = AptosVM::execute_block(vec![txn], &self.storage.clone(), None)?; assert_eq!(outputs.len(), 1); diff --git a/aptos-move/aptos-vm-logging/src/counters.rs b/aptos-move/aptos-vm-logging/src/counters.rs index 106541e018e35..ee3f54dcabfb5 100644 --- a/aptos-move/aptos-vm-logging/src/counters.rs +++ b/aptos-move/aptos-vm-logging/src/counters.rs @@ -9,3 +9,13 @@ use once_cell::sync::Lazy; pub static CRITICAL_ERRORS: Lazy = Lazy::new(|| { register_int_counter!("aptos_vm_critical_errors", "Number of critical errors").unwrap() }); + +/// Count the number of errors within the speculative logging logic / implementation. +/// Intended to trigger lower priority / urgency alerts. +pub static SPECULATIVE_LOGGING_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + "aptos_vm_speculative_logging_errors", + "Number of errors in speculative logging implementation" + ) + .unwrap() +}); diff --git a/aptos-move/aptos-vm-logging/src/lib.rs b/aptos-move/aptos-vm-logging/src/lib.rs index f7ccae76dec36..57637ba18a513 100644 --- a/aptos-move/aptos-vm-logging/src/lib.rs +++ b/aptos-move/aptos-vm-logging/src/lib.rs @@ -11,7 +11,10 @@ pub mod prelude { }; } -use crate::{counters::CRITICAL_ERRORS, log_schema::AdapterLogSchema}; +use crate::{ + counters::{CRITICAL_ERRORS, SPECULATIVE_LOGGING_ERRORS}, + log_schema::AdapterLogSchema, +}; use aptos_logger::{prelude::*, Level}; use aptos_speculative_state_helper::{SpeculativeEvent, SpeculativeEvents}; use arc_swap::ArcSwapOption; @@ -98,11 +101,11 @@ pub fn speculative_log(level: Level, context: &AdapterLogSchema, message: String Some(log_events) => { let log_event = VMLogEntry::new(level, context.clone(), message); if let Err(e) = log_events.record(txn_idx, log_event) { - alert!("{:?}", e); + speculative_alert!("{:?}", e); }; }, None => { - alert!( + speculative_alert!( "Speculative state not initialized to log message = {}", message ); @@ -120,14 +123,16 @@ pub fn flush_speculative_logs(num_to_flush: usize) { match Arc::try_unwrap(log_events_ptr) { Ok(log_events) => log_events.flush(num_to_flush), Err(_) => { - alert!("Speculative log storage must be uniquely owned to flush"); + speculative_alert!("Speculative log storage must be uniquely owned to flush"); }, }; }, None => { if !speculation_disabled() { // Alert only if speculation is not disabled. - alert!("Clear all logs called on uninitialized speculative log storage"); + speculative_alert!( + "Clear all logs called on uninitialized speculative log storage" + ); } }, } @@ -139,19 +144,21 @@ pub fn clear_speculative_txn_logs(txn_idx: usize) { match &*BUFFERED_LOG_EVENTS.load() { Some(log_events) => { if let Err(e) = log_events.clear_txn_events(txn_idx) { - alert!("{:?}", e); + speculative_alert!("{:?}", e); }; }, None => { if !speculation_disabled() { // Alert only if speculation is not disabled. - alert!("Clear all logs called on uninitialized speculative log storage"); + speculative_alert!( + "Clear all logs called on uninitialized speculative log storage" + ); } }, } } -/// Combine logging and error and incrementing critical errors counter for alerting. +/// Alert for vm critical errors. #[macro_export] macro_rules! alert { ($($args:tt)+) => { @@ -160,6 +167,14 @@ macro_rules! alert { }; } +#[macro_export] +macro_rules! speculative_alert { + ($($args:tt)+) => { + warn!($($args)+); + SPECULATIVE_LOGGING_ERRORS.inc(); + }; +} + #[macro_export] macro_rules! speculative_error { ($($args:tt)+) => { diff --git a/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs b/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs index e1ee32f805fcb..ec1ecd8a226af 100644 --- a/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs +++ b/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs @@ -44,7 +44,7 @@ fn main() -> Result<()> { }) .collect(); - let res = AptosVM::execute_block(txns, &state_store)?; + let res = AptosVM::execute_block(txns, &state_store, None)?; for i in 0..NUM_TXNS { assert!(res[i as usize].status().status().unwrap().is_success()); } diff --git a/aptos-move/aptos-vm-types/src/output.rs b/aptos-move/aptos-vm-types/src/output.rs index 593b01996a7a5..5e40abf94dc67 100644 --- a/aptos-move/aptos-vm-types/src/output.rs +++ b/aptos-move/aptos-vm-types/src/output.rs @@ -6,6 +6,7 @@ use aptos_aggregator::delta_change_set::DeltaChangeSet; use aptos_state_view::StateView; use aptos_types::{ contract_event::ContractEvent, + fee_statement::FeeStatement, state_store::state_key::StateKey, transaction::{TransactionOutput, TransactionStatus}, write_set::{WriteOp, WriteSet}, @@ -17,15 +18,19 @@ use move_core_types::vm_status::VMStatus; #[derive(Debug, Clone)] pub struct VMOutput { change_set: VMChangeSet, - gas_used: u64, + fee_statement: FeeStatement, status: TransactionStatus, } impl VMOutput { - pub fn new(change_set: VMChangeSet, gas_used: u64, status: TransactionStatus) -> Self { + pub fn new( + change_set: VMChangeSet, + fee_statement: FeeStatement, + status: TransactionStatus, + ) -> Self { Self { change_set, - gas_used, + fee_statement, status, } } @@ -35,13 +40,17 @@ impl VMOutput { pub fn empty_with_status(status: TransactionStatus) -> Self { Self { change_set: VMChangeSet::empty(), - gas_used: 0, + fee_statement: FeeStatement::zero(), status, } } pub fn unpack(self) -> (VMChangeSet, u64, TransactionStatus) { - (self.change_set, self.gas_used, self.status) + (self.change_set, self.fee_statement.gas_used(), self.status) + } + + pub fn unpack_with_fee_statement(self) -> (VMChangeSet, FeeStatement, TransactionStatus) { + (self.change_set, self.fee_statement, self.status) } pub fn write_set(&self) -> &WriteSet { @@ -57,7 +66,11 @@ impl VMOutput { } pub fn gas_used(&self) -> u64 { - self.gas_used + self.fee_statement.gas_used() + } + + pub fn fee_statement(&self) -> &FeeStatement { + &self.fee_statement } pub fn status(&self) -> &TransactionStatus { @@ -78,9 +91,13 @@ impl VMOutput { } // Try to materialize deltas and add them to the write set. - let (change_set, gas_used, status) = self.unpack(); + let (change_set, fee_statement, status) = self.unpack_with_fee_statement(); let materialized_change_set = change_set.try_materialize(state_view)?; - Ok(VMOutput::new(materialized_change_set, gas_used, status)) + Ok(VMOutput::new( + materialized_change_set, + fee_statement, + status, + )) } /// Converts VMOutput into TransactionOutput which can be used by storage diff --git a/aptos-move/aptos-vm-types/src/tests/utils.rs b/aptos-move/aptos-vm-types/src/tests/utils.rs index 2b2743750c56d..a98da919f083c 100644 --- a/aptos-move/aptos-vm-types/src/tests/utils.rs +++ b/aptos-move/aptos-vm-types/src/tests/utils.rs @@ -4,6 +4,7 @@ use crate::{change_set::VMChangeSet, check_change_set::CheckChangeSet, output::VMOutput}; use aptos_aggregator::delta_change_set::{serialize, DeltaChangeSet, DeltaOp}; use aptos_types::{ + fee_statement::FeeStatement, state_store::state_key::StateKey, transaction::{ExecutionStatus, TransactionStatus}, write_set::{WriteOp, WriteSetMut}, @@ -89,7 +90,7 @@ pub(crate) fn build_vm_output( const STATUS: TransactionStatus = TransactionStatus::Keep(ExecutionStatus::Success); VMOutput::new( build_change_set(write_set, delta_change_set), - GAS_USED, + FeeStatement::new(GAS_USED, GAS_USED, 0, 0, 0), STATUS, ) } diff --git a/aptos-move/aptos-vm/Cargo.toml b/aptos-move/aptos-vm/Cargo.toml index cf2238385084d..b1c2423e953d6 100644 --- a/aptos-move/aptos-vm/Cargo.toml +++ b/aptos-move/aptos-vm/Cargo.toml @@ -16,6 +16,7 @@ rust-version = { workspace = true } anyhow = { workspace = true } aptos-aggregator = { workspace = true } aptos-block-executor = { workspace = true } +aptos-block-partitioner = { workspace = true } aptos-crypto = { workspace = true } aptos-crypto-derive = { workspace = true } aptos-framework = { workspace = true } @@ -45,6 +46,7 @@ move-vm-types = { workspace = true } num_cpus = { workspace = true } once_cell = { workspace = true } ouroboros = { workspace = true } +rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/aptos-move/aptos-vm/src/adapter_common.rs b/aptos-move/aptos-vm/src/adapter_common.rs index 57799a323ceec..eb86a342c348b 100644 --- a/aptos-move/aptos-vm/src/adapter_common.rs +++ b/aptos-move/aptos-vm/src/adapter_common.rs @@ -80,7 +80,7 @@ pub(crate) trait VMAdapter { /// Transactions after signature checking: /// Waypoints and BlockPrologues are not signed and are unaffected by signature checking, /// but a user transaction or writeset transaction is transformed to a SignatureCheckedTransaction. -#[derive(Debug)] +#[derive(Clone, Debug)] pub enum PreprocessedTransaction { UserTransaction(Box), WaypointWriteSet(WriteSetPayload), diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 80163c8661e57..f38c96f75895b 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -9,7 +9,7 @@ use crate::{ aptos_vm_impl::{get_transaction_output, AptosVMImpl, AptosVMInternals}, block_executor::BlockAptosVM, counters::*, - data_cache::{AsMoveResolver, StorageAdapter}, + data_cache::StorageAdapter, errors::expect_only_successful_execution, move_vm_ext::{MoveResolverExt, RespawnedSession, SessionExt, SessionId}, sharded_block_executor::ShardedBlockExecutor, @@ -30,7 +30,9 @@ use aptos_state_view::StateView; use aptos_types::{ account_config, account_config::new_block_event_key, + block_executor::partitioner::{BlockExecutorTransactions, SubBlocksForShard}, block_metadata::BlockMetadata, + fee_statement::FeeStatement, on_chain_config::{new_epoch_event_key, FeatureFlag, TimedFeatureOverride}, transaction::{ EntryFunction, ExecutionError, ExecutionStatus, ModuleBundle, Multisig, @@ -244,6 +246,31 @@ impl AptosVM { .1 } + pub fn as_move_resolver<'a, S: StateView>(&self, state_view: &'a S) -> StorageAdapter<'a, S> { + StorageAdapter::new_with_cached_config( + state_view, + self.0.get_gas_feature_version(), + self.0.get_features(), + ) + } + + fn fee_statement_from_gas_meter( + txn_data: &TransactionMetadata, + gas_meter: &impl AptosGasMeter, + ) -> FeeStatement { + let gas_used = txn_data + .max_gas_amount() + .checked_sub(gas_meter.balance()) + .expect("Balance should always be less than or equal to max gas amount"); + FeeStatement::new( + gas_used.into(), + u64::from(gas_meter.execution_gas_used()), + u64::from(gas_meter.io_gas_used()), + u64::from(gas_meter.storage_fee_used_in_gas_units()), + u64::from(gas_meter.storage_fee_used()), + ) + } + fn failed_transaction_cleanup_and_keep_vm_status( &self, error_code: VMStatus, @@ -294,11 +321,11 @@ impl AptosVM { ) { return discard_error_vm_status(e); } + let fee_statement = AptosVM::fee_statement_from_gas_meter(txn_data, gas_meter); let txn_output = get_transaction_output( &mut (), session, - gas_meter.balance(), - txn_data, + fee_statement, status, change_set_configs, ) @@ -325,14 +352,10 @@ impl AptosVM { .run_success_epilogue(session, gas_meter.balance(), txn_data, log_context) })?; let change_set = respawned_session.finish(change_set_configs)?; - let gas_used = txn_data - .max_gas_amount() - .checked_sub(gas_meter.balance()) - .expect("Balance should always be less than or equal to max gas amount"); - + let fee_statement = AptosVM::fee_statement_from_gas_meter(txn_data, gas_meter); let output = VMOutput::new( change_set, - gas_used.into(), + fee_statement, TransactionStatus::Keep(ExecutionStatus::Success), ); @@ -1019,6 +1042,9 @@ impl AptosVM { // have been previously cached in the prologue. // // TODO(Gas): Do this in a better way in the future, perhaps without forcing the data cache to be flushed. + // By releasing resource group cache, we start with a fresh slate for resource group + // cost accounting. + resolver.release_resource_group_cache(); session = self.0.new_session(resolver, SessionId::txn(txn), true); } @@ -1137,8 +1163,7 @@ impl AptosVM { F: FnOnce(u64, AptosGasParameters, StorageGasParameters, Gas) -> Result, { // TODO(Gas): revisit this. - let resolver = StorageAdapter::new(state_view); - let vm = AptosVM::new(&resolver); + let vm = AptosVM::new(state_view); // TODO(Gas): avoid creating txn metadata twice. let balance = TransactionMetadata::new(txn).max_gas_amount(); @@ -1149,6 +1174,11 @@ impl AptosVM { balance, )?; + let resolver = StorageAdapter::new_with_cached_config( + state_view, + vm.0.get_gas_feature_version(), + vm.0.get_features(), + ); let (status, output) = vm.execute_user_transaction_impl(&resolver, txn, log_context, &mut gas_meter); @@ -1265,7 +1295,7 @@ impl AptosVM { self.read_writeset(resolver, change_set.write_set())?; SYSTEM_TRANSACTIONS_EXECUTED.inc(); - let output = VMOutput::new(change_set, 0, VMStatus::Executed.into()); + let output = VMOutput::new(change_set, FeeStatement::zero(), VMStatus::Executed.into()); Ok((VMStatus::Executed, output)) } @@ -1310,8 +1340,7 @@ impl AptosVM { let output = get_transaction_output( &mut (), session, - 0.into(), - &txn_data, + FeeStatement::zero(), ExecutionStatus::Success, &self .0 @@ -1332,7 +1361,7 @@ impl AptosVM { // Try to simulate with aggregator enabled. let (vm_status, vm_output) = simulation_vm.simulate_signed_transaction( - &state_view.as_move_resolver(), + &simulation_vm.0.as_move_resolver(state_view), txn, &log_context, true, @@ -1349,7 +1378,7 @@ impl AptosVM { Err(_) => { // Conversion to TransactionOutput failed, re-simulate without aggregators. let (vm_status, vm_output) = simulation_vm.simulate_signed_transaction( - &state_view.as_move_resolver(), + &simulation_vm.0.as_move_resolver(state_view), txn, &log_context, false, @@ -1383,8 +1412,8 @@ impl AptosVM { vm.0.get_storage_gas_parameters(&log_context)?.clone(), gas_budget, ); - let resolver = &state_view.as_move_resolver(); - let mut session = vm.new_session(resolver, SessionId::Void, true); + let resolver = vm.as_move_resolver(state_view); + let mut session = vm.new_session(&resolver, SessionId::Void, true); let func_inst = session.load_function(&module_id, &func_name, &type_args)?; let metadata = vm.0.extract_module_metadata(&module_id); @@ -1472,6 +1501,7 @@ impl VMExecutor for AptosVM { fn execute_block( transactions: Vec, state_view: &(impl StateView + Sync), + maybe_block_gas_limit: Option, ) -> Result, VMStatus> { fail_point!("move_adapter::execute_block", |_| { Err(VMStatus::Error( @@ -1489,44 +1519,10 @@ impl VMExecutor for AptosVM { let count = transactions.len(); let ret = BlockAptosVM::execute_block( Arc::clone(&RAYON_EXEC_POOL), - transactions, + BlockExecutorTransactions::Unsharded(transactions), state_view, Self::get_concurrency_level(), - None, - ); - if ret.is_ok() { - // Record the histogram count for transactions per block. - BLOCK_TRANSACTION_COUNT.observe(count as f64); - } - ret - } - - fn execute_block_with_gas_limit( - transactions: Vec, - state_view: &(impl StateView + Sync), - maybe_gas_limit: Option, - ) -> std::result::Result, VMStatus> { - fail_point!("move_adapter::execute_block", |_| { - Err(VMStatus::Error( - StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, - None, - )) - }); - - let log_context = AdapterLogSchema::new(state_view.id(), 0); - info!( - log_context, - "Executing block, transaction count: {}", - transactions.len() - ); - - let count = transactions.len(); - let ret = BlockAptosVM::execute_block( - Arc::clone(&RAYON_EXEC_POOL), - transactions, - state_view, - Self::get_concurrency_level(), - maybe_gas_limit, + maybe_block_gas_limit, ); if ret.is_ok() { // Record the histogram count for transactions per block. @@ -1537,14 +1533,15 @@ impl VMExecutor for AptosVM { fn execute_block_sharded( sharded_block_executor: &ShardedBlockExecutor, - transactions: Vec, + transactions: Vec>, state_view: Arc, + maybe_block_gas_limit: Option, ) -> Result, VMStatus> { let log_context = AdapterLogSchema::new(state_view.id(), 0); info!( log_context, "Executing block, transaction count: {}", - transactions.len() + transactions.iter().map(|s| s.num_txns()).sum::() ); let count = transactions.len(); @@ -1552,6 +1549,7 @@ impl VMExecutor for AptosVM { state_view, transactions, AptosVM::get_concurrency_level(), + maybe_block_gas_limit, ); if ret.is_ok() { // Record the histogram count for transactions per block. @@ -1588,11 +1586,11 @@ impl VMValidator for AptosVM { }, }; - let resolver = &state_view.as_move_resolver(); - let mut session = self.0.new_session(resolver, SessionId::txn(&txn), true); + let resolver = self.as_move_resolver(state_view); + let mut session = self.0.new_session(&resolver, SessionId::txn(&txn), true); let validation_result = self.validate_signature_checked_transaction( &mut session, - resolver, + &resolver, &txn, true, &log_context, diff --git a/aptos-move/aptos-vm/src/aptos_vm_impl.rs b/aptos-move/aptos-vm/src/aptos_vm_impl.rs index a8d75c2cc967b..00053214bfb06 100644 --- a/aptos-move/aptos-vm/src/aptos_vm_impl.rs +++ b/aptos-move/aptos-vm/src/aptos_vm_impl.rs @@ -13,16 +13,17 @@ use crate::{ use aptos_framework::RuntimeModuleMetadataV1; use aptos_gas::{ AbstractValueSizeGasParameters, AptosGasParameters, ChangeSetConfigs, FromOnChainGasSchedule, - Gas, NativeGasParameters, StorageGasParameters, + Gas, NativeGasParameters, StorageGasParameters, StoragePricing, }; use aptos_logger::{enabled, prelude::*, Level}; use aptos_state_view::StateView; use aptos_types::{ account_config::{TransactionValidation, APTOS_TRANSACTION_VALIDATION, CORE_CODE_ADDRESS}, chain_id::ChainId, + fee_statement::FeeStatement, on_chain_config::{ ApprovedExecutionHashes, ConfigurationResource, FeatureFlag, Features, GasSchedule, - GasScheduleV2, OnChainConfig, StorageGasSchedule, TimedFeatures, Version, + GasScheduleV2, OnChainConfig, TimedFeatures, Version, }, transaction::{AbortInfo, ExecutionStatus, Multisig, TransactionStatus}, vm_status::{StatusCode, VMStatus}, @@ -32,6 +33,7 @@ use aptos_vm_types::output::VMOutput; use fail::fail_point; use move_binary_format::{errors::VMResult, CompiledModule}; use move_core_types::{ + gas_algebra::NumArgs, language_storage::ModuleId, move_resource::MoveStructType, value::{serialize_values, MoveValue}, @@ -82,40 +84,33 @@ impl AptosVMImpl { let (mut gas_params, gas_feature_version): (Option, u64) = gas_config(&storage); - let storage_gas_schedule = match gas_feature_version { - 0 => None, - _ => StorageGasSchedule::fetch_config(&storage), - }; - - if let (Some(gas_params), Some(storage_gas_schedule)) = - (&mut gas_params, &storage_gas_schedule) - { - match gas_feature_version { - 2..=6 => { - gas_params.natives.table.common.load_base_legacy = - storage_gas_schedule.per_item_read.into(); - gas_params.natives.table.common.load_base_new = 0.into(); - gas_params.natives.table.common.load_per_byte = - storage_gas_schedule.per_byte_read.into(); - gas_params.natives.table.common.load_failure = 0.into(); - }, - 7.. => { - gas_params.natives.table.common.load_base_legacy = 0.into(); - gas_params.natives.table.common.load_base_new = - storage_gas_schedule.per_item_read.into(); - gas_params.natives.table.common.load_per_byte = - storage_gas_schedule.per_byte_read.into(); - gas_params.natives.table.common.load_failure = 0.into(); - }, - _ => (), + let storage_gas_params = if let Some(gas_params) = &mut gas_params { + let storage_gas_params = + StorageGasParameters::new(gas_feature_version, gas_params, &storage); + + if let StoragePricing::V2(pricing) = &storage_gas_params.pricing { + // Overwrite table io gas parameters with global io pricing. + let g = &mut gas_params.natives.table.common; + match gas_feature_version { + 0..=1 => (), + 2..=6 => { + g.load_base_legacy = pricing.per_item_read * NumArgs::new(1); + g.load_base_new = 0.into(); + g.load_per_byte = pricing.per_byte_read; + g.load_failure = 0.into(); + }, + 7.. => { + g.load_base_legacy = 0.into(); + g.load_base_new = pricing.per_item_read * NumArgs::new(1); + g.load_per_byte = pricing.per_byte_read; + g.load_failure = 0.into(); + }, + } } - } - - let storage_gas_params = StorageGasParameters::new( - gas_feature_version, - gas_params.as_ref(), - storage_gas_schedule.as_ref(), - ); + Some(storage_gas_params) + } else { + None + }; // TODO(Gas): Right now, we have to use some dummy values for gas parameters if they are not found on-chain. // This only happens in a edge case that is probably related to write set transactions or genesis, @@ -143,7 +138,7 @@ impl AptosVMImpl { timed_features = timed_features.with_override_profile(profile) } - let inner = MoveVmExt::new( + let move_vm = MoveVmExt::new( native_gas_params, abs_val_size_gas_params, gas_feature_version, @@ -153,18 +148,18 @@ impl AptosVMImpl { ) .expect("should be able to create Move VM; check if there are duplicated natives"); - let mut vm = Self { - move_vm: inner, + let version = Version::fetch_config(&storage); + let transaction_validation = Self::get_transaction_validation(&storage); + + Self { + move_vm, gas_feature_version, gas_params, storage_gas_params, - version: None, - transaction_validation: None, + version, + transaction_validation, features, - }; - vm.version = Version::fetch_config(&storage); - vm.transaction_validation = Self::get_transaction_validation(&StorageAdapter::new(state)); - vm + } } pub(crate) fn mark_loader_cache_as_invalid(&self) { @@ -619,10 +614,10 @@ impl AptosVMImpl { .new_session(resolver, session_id, aggregator_enabled) } - pub fn load_module<'r>( + pub fn load_module( &self, module_id: &ModuleId, - resolver: &'r impl MoveResolverExt, + resolver: &impl MoveResolverExt, ) -> VMResult> { self.move_vm.load_module(module_id, resolver) } @@ -659,21 +654,15 @@ impl<'a> AptosVMInternals<'a> { pub(crate) fn get_transaction_output( ap_cache: &mut A, session: SessionExt, - gas_left: Gas, - txn_data: &TransactionMetadata, + fee_statement: FeeStatement, status: ExecutionStatus, change_set_configs: &ChangeSetConfigs, ) -> Result { - let gas_used = txn_data - .max_gas_amount() - .checked_sub(gas_left) - .expect("Balance should always be less than or equal to max gas amount"); - let change_set = session.finish(ap_cache, change_set_configs)?; Ok(VMOutput::new( change_set, - gas_used.into(), + fee_statement, TransactionStatus::Keep(status), )) } diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index 302f64db151c4..abcb982f34459 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -23,8 +23,13 @@ use aptos_block_executor::{ }, }; use aptos_infallible::Mutex; -use aptos_state_view::StateView; +use aptos_state_view::{StateView, StateViewId}; use aptos_types::{ + block_executor::partitioner::{ + BlockExecutorTransactions, SubBlock, SubBlocksForShard, TransactionWithDependencies, + }, + executable::ExecutableTestType, + fee_statement::FeeStatement, state_store::state_key::StateKey, transaction::{Transaction, TransactionOutput, TransactionStatus}, write_set::WriteOp, @@ -126,17 +131,75 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .get() .map_or(0, |output| output.gas_used()) } + + // Return the fee statement of the transaction. + // Should never be called after vm_output is consumed. + fn fee_statement(&self) -> FeeStatement { + self.vm_output + .lock() + .as_ref() + .expect("Output to be set to get fee statement") + .fee_statement() + .clone() + } } pub struct BlockAptosVM(); impl BlockAptosVM { + fn verify_transactions( + transactions: BlockExecutorTransactions, + ) -> BlockExecutorTransactions { + match transactions { + BlockExecutorTransactions::Unsharded(transactions) => { + let signature_verified_txns = transactions + .into_par_iter() + .with_min_len(25) + .map(preprocess_transaction::) + .collect(); + BlockExecutorTransactions::Unsharded(signature_verified_txns) + }, + BlockExecutorTransactions::Sharded(sub_blocks) => { + let shard_id = sub_blocks.shard_id; + let signature_verified_sub_blocks = sub_blocks + .into_sub_blocks() + .into_par_iter() + .map(|sub_block| { + let start_index = sub_block.start_index; + let verified_txns = sub_block + .into_transactions_with_deps() + .into_par_iter() + .with_min_len(25) + .map(|txn_with_deps| { + let TransactionWithDependencies { + txn, + cross_shard_dependencies, + } = txn_with_deps; + let preprocessed_txn = preprocess_transaction::(txn); + TransactionWithDependencies::new( + preprocessed_txn, + cross_shard_dependencies, + ) + }) + .collect(); + SubBlock::new(start_index, verified_txns) + }) + .collect(); + + BlockExecutorTransactions::Sharded(SubBlocksForShard::new( + shard_id, + signature_verified_sub_blocks, + )) + }, + } + } + pub fn execute_block( executor_thread_pool: Arc, - transactions: Vec, + transactions: BlockExecutorTransactions, state_view: &S, concurrency_level: usize, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Result, VMStatus> { let _timer = BLOCK_EXECUTOR_EXECUTE_BLOCK_SECONDS.start_timer(); // Verify the signatures of all the transactions in parallel. @@ -145,28 +208,30 @@ impl BlockAptosVM { // TODO: state sync runs this code but doesn't need to verify signatures let signature_verification_timer = BLOCK_EXECUTOR_SIGNATURE_VERIFICATION_SECONDS.start_timer(); - let signature_verified_block: Vec = - executor_thread_pool.install(|| { - transactions - .into_par_iter() - .with_min_len(25) - .map(preprocess_transaction::) - .collect() - }); + let signature_verified_block = + executor_thread_pool.install(|| Self::verify_transactions(transactions)); drop(signature_verification_timer); - let num_txns = signature_verified_block.len(); - init_speculative_logs(num_txns); + let num_txns = signature_verified_block.num_txns(); + if state_view.id() != StateViewId::Miscellaneous { + // Speculation is disabled in Miscellaneous context, which is used by testing and + // can even lead to concurrent execute_block invocations, leading to errors on flush. + init_speculative_logs(num_txns); + } BLOCK_EXECUTOR_CONCURRENCY.set(concurrency_level as i64); - let executor = BlockExecutor::, S>::new( + let executor = BlockExecutor::< + PreprocessedTransaction, + AptosExecutorTask, + S, + ExecutableTestType, + >::new( concurrency_level, executor_thread_pool, - maybe_gas_limit, + maybe_block_gas_limit, ); let ret = executor.execute_block(state_view, signature_verified_block, state_view); - match ret { Ok(outputs) => { let output_vec: Vec = outputs @@ -177,7 +242,11 @@ impl BlockAptosVM { // Flush the speculative logs of the committed transactions. let pos = output_vec.partition_point(|o| !o.status().is_retry()); - flush_speculative_logs(pos); + if state_view.id() != StateViewId::Miscellaneous { + // Speculation is disabled in Miscellaneous context, which is used by testing and + // can even lead to concurrent execute_block invocations, leading to errors on flush. + flush_speculative_logs(pos); + } Ok(output_vec) }, diff --git a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs index f87511d6a84da..97298ff64fe75 100644 --- a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs +++ b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs @@ -6,7 +6,6 @@ use crate::{ adapter_common::{PreprocessedTransaction, VMAdapter}, aptos_vm::AptosVM, block_executor::AptosTransactionOutput, - data_cache::{AsMoveResolver, StorageAdapter}, }; use aptos_block_executor::task::{ExecutionStatus, ExecutorTask}; use aptos_logger::{enabled, Level}; @@ -43,7 +42,7 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { let _ = vm.load_module( &ModuleId::new(CORE_CODE_ADDRESS, ident_str!("account").to_owned()), - &StorageAdapter::new(argument), + &vm.as_move_resolver(argument), ); Self { @@ -66,7 +65,7 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { match self .vm - .execute_single_transaction(txn, &view.as_move_resolver(), &log_context) + .execute_single_transaction(txn, &self.vm.as_move_resolver(view), &log_context) { Ok((vm_status, mut vm_output, sender)) => { if materialize_deltas { diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index b70b89300d127..7f75f3d4d8bcb 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -21,7 +21,7 @@ use move_core_types::{ account_address::AccountAddress, language_storage::{ModuleId, StructTag}, metadata::Metadata, - resolver::{ModuleResolver, ResourceResolver}, + resolver::{resource_size, ModuleResolver, ResourceResolver}, vm_status::StatusCode, }; use move_table_extension::{TableHandle, TableResolver}; @@ -42,16 +42,45 @@ pub(crate) fn get_resource_group_from_metadata( /// Adapter to convert a `StateView` into a `MoveResolverExt`. pub struct StorageAdapter<'a, S> { state_store: &'a S, + accurate_byte_count: bool, + max_binary_format_version: u32, resource_group_cache: RefCell>>>>, } impl<'a, S: StateView> StorageAdapter<'a, S> { + pub fn new_with_cached_config( + state_store: &'a S, + gas_feature_version: u64, + features: &Features, + ) -> Self { + let mut s = Self { + state_store, + accurate_byte_count: false, + max_binary_format_version: 0, + resource_group_cache: RefCell::new(BTreeMap::new()), + }; + if gas_feature_version >= 9 { + s.accurate_byte_count = true; + } + s.max_binary_format_version = get_max_binary_format_version(features, gas_feature_version); + s + } + pub fn new(state_store: &'a S) -> Self { - Self { + let mut s = Self { state_store, + accurate_byte_count: false, + max_binary_format_version: 0, resource_group_cache: RefCell::new(BTreeMap::new()), + }; + let (_, gas_feature_version) = gas_config(&s); + let features = Features::fetch_config(&s).unwrap_or_default(); + if gas_feature_version >= 9 { + s.accurate_byte_count = true; } + s.max_binary_format_version = get_max_binary_format_version(&features, gas_feature_version); + s } pub fn get(&self, access_path: AccessPath) -> PartialVMResult>> { @@ -65,7 +94,7 @@ impl<'a, S: StateView> StorageAdapter<'a, S> { address: &AccountAddress, struct_tag: &StructTag, metadata: &[Metadata], - ) -> Result>, VMError> { + ) -> Result<(Option>, usize), VMError> { let resource_group = get_resource_group_from_metadata(struct_tag, metadata); if let Some(resource_group) = resource_group { let mut cache = self.resource_group_cache.borrow_mut(); @@ -73,24 +102,34 @@ impl<'a, S: StateView> StorageAdapter<'a, S> { if let Some(group_data) = cache.get_mut(&resource_group) { // This resource group is already cached for this address. So just return the // cached value. - return Ok(group_data.get(struct_tag).cloned()); + let buf = group_data.get(struct_tag).cloned(); + let buf_size = resource_size(&buf); + return Ok((buf, buf_size)); } let group_data = self.get_resource_group_data(address, &resource_group)?; if let Some(group_data) = group_data { + let len = if self.accurate_byte_count { + group_data.len() + } else { + 0 + }; let group_data: BTreeMap> = bcs::from_bytes(&group_data) .map_err(|_| { PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .finish(Location::Undefined) })?; let res = group_data.get(struct_tag).cloned(); + let res_size = resource_size(&res); cache.insert(resource_group, group_data); - Ok(res) + Ok((res, res_size + len)) } else { cache.insert(resource_group, BTreeMap::new()); - Ok(None) + Ok((None, 0)) } } else { - self.get_standard_resource(address, struct_tag) + let buf = self.get_standard_resource(address, struct_tag)?; + let buf_size = resource_size(&buf); + Ok((buf, buf_size)) } } } @@ -118,13 +157,8 @@ impl<'a, S: StateView> MoveResolverExt for StorageAdapter<'a, S> { fn release_resource_group_cache( &self, - address: &AccountAddress, - resource_group: &StructTag, - ) -> Option>> { - self.resource_group_cache - .borrow_mut() - .get_mut(address)? - .remove(resource_group) + ) -> BTreeMap>>> { + self.resource_group_cache.take() } } @@ -134,7 +168,7 @@ impl<'a, S: StateView> ResourceResolver for StorageAdapter<'a, S> { address: &AccountAddress, struct_tag: &StructTag, metadata: &[Metadata], - ) -> Result>, Error> { + ) -> anyhow::Result<(Option>, usize)> { Ok(self.get_any_resource(address, struct_tag, metadata)?) } } @@ -145,13 +179,9 @@ impl<'a, S: StateView> ModuleResolver for StorageAdapter<'a, S> { Ok(Some(bytes)) => bytes, _ => return vec![], }; - let (_, gas_feature_version) = gas_config(self); - let features = Features::fetch_config(self).unwrap_or_default(); - let max_binary_format_version = - get_max_binary_format_version(&features, gas_feature_version); let module = match CompiledModule::deserialize_with_max_version( &module_bytes, - max_binary_format_version, + self.max_binary_format_version, ) { Ok(module) => module, _ => return vec![], diff --git a/aptos-move/aptos-vm/src/errors.rs b/aptos-move/aptos-vm/src/errors.rs index 49dc6796a2329..919748dc28a14 100644 --- a/aptos-move/aptos-vm/src/errors.rs +++ b/aptos-move/aptos-vm/src/errors.rs @@ -132,6 +132,8 @@ pub fn convert_prologue_error( }; VMStatus::Error(new_major_status, None) }, + // Storage error can be a result of speculation failure so throw the error back for caller to handle. + e @ VMStatus::Error(StatusCode::STORAGE_ERROR, _) => e, status @ VMStatus::ExecutionFailure { .. } | status @ VMStatus::Error(..) => { speculative_error!( log_context, @@ -176,7 +178,8 @@ pub fn convert_epilogue_error( VMStatus::Error(StatusCode::UNEXPECTED_ERROR_FROM_KNOWN_MOVE_FUNCTION, None) }, }, - + // Storage error can be a result of speculation failure so throw the error back for caller to handle. + e @ VMStatus::Error(StatusCode::STORAGE_ERROR, _) => e, status => { speculative_error!( log_context, @@ -198,7 +201,8 @@ pub fn expect_only_successful_execution( let status = error.into_vm_status(); Err(match status { VMStatus::Executed => VMStatus::Executed, - + // Storage error can be a result of speculation failure so throw the error back for caller to handle. + e @ VMStatus::Error(StatusCode::STORAGE_ERROR, _) => e, status => { // Only trigger a warning here as some errors could be a result of the speculative parallel execution. // We will report the errors after we obtained the final transaction output in update_counters_for_processed_chunk diff --git a/aptos-move/aptos-vm/src/lib.rs b/aptos-move/aptos-vm/src/lib.rs index 0152b7df53543..67fed2abd7ddc 100644 --- a/aptos-move/aptos-vm/src/lib.rs +++ b/aptos-move/aptos-vm/src/lib.rs @@ -125,6 +125,7 @@ pub use crate::aptos_vm::AptosVM; use crate::sharded_block_executor::ShardedBlockExecutor; use aptos_state_view::StateView; use aptos_types::{ + block_executor::partitioner::SubBlocksForShard, transaction::{SignedTransaction, Transaction, TransactionOutput, VMValidatorResult}, vm_status::VMStatus, }; @@ -152,21 +153,15 @@ pub trait VMExecutor: Send + Sync { fn execute_block( transactions: Vec, state_view: &(impl StateView + Sync), - ) -> Result, VMStatus>; - - /// Executes a block of transactions with per_block_gas_limit - /// and returns output for each one of them. - fn execute_block_with_gas_limit( - transactions: Vec, - state_view: &(impl StateView + Sync), - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Result, VMStatus>; /// Executes a block of transactions using a sharded block executor and returns the results. fn execute_block_sharded( sharded_block_executor: &ShardedBlockExecutor, - transactions: Vec, + block: Vec>, state_view: Arc, + maybe_block_gas_limit: Option, ) -> Result, VMStatus>; } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs index 1d22cdadc4493..a386a2f4ec780 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs @@ -1,8 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -///! MoveVM and Session wrapped, to make sure Aptos natives and extensions are always installed and -///! taken care of after session finish. +//! MoveVM and Session wrapped, to make sure Aptos natives and extensions are always installed and +//! taken care of after session finish. mod resolver; mod respawned_session; mod session; diff --git a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs index 1081e9db0decd..f90654d6bc428 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs @@ -29,9 +29,7 @@ pub trait MoveResolverExt: fn release_resource_group_cache( &self, - address: &AccountAddress, - resource_group: &StructTag, - ) -> Option>>; + ) -> BTreeMap>>>; // Move to API does not belong here fn is_resource_group(&self, struct_tag: &StructTag) -> bool { diff --git a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs index 31a0b34b22d01..7f901c03b4839 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs @@ -3,7 +3,7 @@ use crate::{ aptos_vm_impl::AptosVMImpl, - data_cache::{AsMoveResolver, StorageAdapter}, + data_cache::StorageAdapter, move_vm_ext::{SessionExt, SessionId}, }; use anyhow::{bail, Result}; @@ -44,7 +44,13 @@ impl<'r, 'l> RespawnedSession<'r, 'l> { Ok(RespawnedSessionBuilder { state_view, - resolver_builder: |state_view| state_view.as_move_resolver(), + resolver_builder: |state_view| { + StorageAdapter::new_with_cached_config( + state_view, + vm.get_gas_feature_version(), + vm.get_features(), + ) + }, session_builder: |resolver| Some(vm.new_session(resolver, session_id, true)), } .build()) diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session.rs index ec64ec725dbb2..63b748f68e458 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session.rs @@ -38,6 +38,7 @@ use move_table_extension::{NativeTableContext, TableChangeSet}; use move_vm_runtime::{move_vm::MoveVM, session::Session}; use serde::{Deserialize, Serialize}; use std::{ + borrow::BorrowMut, collections::BTreeMap, ops::{Deref, DerefMut}, sync::Arc, @@ -199,19 +200,21 @@ impl<'r, 'l> SessionExt<'r, 'l> { let mut change_set_filtered = MoveChangeSet::new(); let mut resource_group_change_set = MoveChangeSet::new(); + let mut resource_group_cache = remote.release_resource_group_cache(); for (addr, account_changeset) in change_set.into_inner() { let mut resource_groups: BTreeMap = BTreeMap::new(); let mut resources_filtered = BTreeMap::new(); let (modules, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { - let resource_group = runtime.with_module_metadata(&struct_tag.module_id(), |md| { - get_resource_group_from_metadata(&struct_tag, md) - }); + let resource_group_tag = runtime + .with_module_metadata(&struct_tag.module_id(), |md| { + get_resource_group_from_metadata(&struct_tag, md) + }); - if let Some(resource_group) = resource_group { + if let Some(resource_group_tag) = resource_group_tag { resource_groups - .entry(resource_group) + .entry(resource_group_tag) .or_insert_with(AccountChangeSet::new) .add_resource_op(struct_tag, blob_op) .map_err(|_| common_error())?; @@ -227,9 +230,11 @@ impl<'r, 'l> SessionExt<'r, 'l> { ) .map_err(|_| common_error())?; - for (resource_tag, resources) in resource_groups { - let mut source_data = remote - .release_resource_group_cache(&addr, &resource_tag) + for (resource_group_tag, resources) in resource_groups { + let mut source_data = resource_group_cache + .borrow_mut() + .get_mut(&addr) + .and_then(|t| t.remove(&resource_group_tag)) .unwrap_or_default(); let create = source_data.is_empty(); @@ -259,7 +264,7 @@ impl<'r, 'l> SessionExt<'r, 'l> { MoveStorageOp::Modify(bcs::to_bytes(&source_data).map_err(|_| common_error())?) }; resource_group_change_set - .add_resource_op(addr, resource_tag, op) + .add_resource_op(addr, resource_group_tag, op) .map_err(|_| common_error())?; } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs index 60c7fce288a62..c4e6df56eeabd 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs @@ -73,6 +73,7 @@ impl MoveVmExt { paranoid_type_checks: crate::AptosVM::get_paranoid_checks(), enable_invariant_violation_check_in_swap_loc, type_size_limit, + max_value_nest_depth: Some(128), }, )?, chain_id, diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/block_executor_client.rs b/aptos-move/aptos-vm/src/sharded_block_executor/block_executor_client.rs new file mode 100644 index 0000000000000..aa31d2dc0f309 --- /dev/null +++ b/aptos-move/aptos-vm/src/sharded_block_executor/block_executor_client.rs @@ -0,0 +1,65 @@ +// Copyright © Aptos Foundation + +use crate::block_executor::BlockAptosVM; +use aptos_state_view::StateView; +use aptos_types::{ + block_executor::partitioner::{BlockExecutorTransactions, SubBlocksForShard}, + transaction::{Transaction, TransactionOutput}, +}; +use move_core_types::vm_status::VMStatus; +use std::sync::Arc; + +pub trait BlockExecutorClient { + fn execute_block( + &self, + transactions: SubBlocksForShard, + state_view: &S, + concurrency_level: usize, + maybe_block_gas_limit: Option, + ) -> Result, VMStatus>; +} + +impl BlockExecutorClient for LocalExecutorClient { + fn execute_block( + &self, + sub_blocks: SubBlocksForShard, + state_view: &S, + concurrency_level: usize, + maybe_block_gas_limit: Option, + ) -> Result, VMStatus> { + BlockAptosVM::execute_block( + self.executor_thread_pool.clone(), + BlockExecutorTransactions::Sharded(sub_blocks), + state_view, + concurrency_level, + maybe_block_gas_limit, + ) + } +} + +pub struct LocalExecutorClient { + executor_thread_pool: Arc, +} + +impl LocalExecutorClient { + pub fn new(num_threads: usize) -> Self { + let executor_thread_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .build() + .unwrap(), + ); + + Self { + executor_thread_pool, + } + } + + pub fn create_local_clients(num_shards: usize, num_threads: Option) -> Vec { + let num_threads = num_threads + .unwrap_or_else(|| (num_cpus::get() as f64 / num_shards as f64).ceil() as usize); + (0..num_shards) + .map(|_| LocalExecutorClient::new(num_threads)) + .collect() + } +} diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/counters.rs b/aptos-move/aptos-vm/src/sharded_block_executor/counters.rs new file mode 100644 index 0000000000000..85d0b2d6aacd4 --- /dev/null +++ b/aptos-move/aptos-vm/src/sharded_block_executor/counters.rs @@ -0,0 +1,14 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use aptos_metrics_core::{register_int_gauge, IntGauge}; +use once_cell::sync::Lazy; + +pub static NUM_EXECUTOR_SHARDS: Lazy = Lazy::new(|| { + register_int_gauge!( + "num_executor_shards", + "Number of shards for the sharded block executor" + ) + .unwrap() +}); diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/executor_shard.rs b/aptos-move/aptos-vm/src/sharded_block_executor/executor_shard.rs index 2206e00c62ae6..b70fc4ec24967 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/executor_shard.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/executor_shard.rs @@ -2,43 +2,33 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{block_executor::BlockAptosVM, sharded_block_executor::ExecutorShardCommand}; +use crate::sharded_block_executor::{ + block_executor_client::BlockExecutorClient, ExecutorShardCommand, +}; use aptos_logger::trace; use aptos_state_view::StateView; use aptos_types::transaction::TransactionOutput; use aptos_vm_logging::disable_speculative_logging; use move_core_types::vm_status::VMStatus; -use std::sync::{ - mpsc::{Receiver, Sender}, - Arc, -}; +use std::sync::mpsc::{Receiver, Sender}; /// A remote block executor that receives transactions from a channel and executes them in parallel. /// Currently it runs in the local machine and it will be further extended to run in a remote machine. -pub struct ExecutorShard { +pub struct ExecutorShard { shard_id: usize, - executor_thread_pool: Arc, + executor_client: E, command_rx: Receiver>, result_tx: Sender, VMStatus>>, - maybe_gas_limit: Option, } -impl ExecutorShard { +impl ExecutorShard { pub fn new( num_executor_shards: usize, + executor_client: E, shard_id: usize, - num_executor_threads: usize, command_rx: Receiver>, result_tx: Sender, VMStatus>>, - maybe_gas_limit: Option, ) -> Self { - let executor_thread_pool = Arc::new( - rayon::ThreadPoolBuilder::new() - .num_threads(num_executor_threads) - .build() - .unwrap(), - ); - if num_executor_shards > 1 { // todo: speculative logging is not yet compatible with sharded block executor. disable_speculative_logging(); @@ -46,10 +36,9 @@ impl ExecutorShard { Self { shard_id, - executor_thread_pool, + executor_client, command_rx, result_tx, - maybe_gas_limit, } } @@ -57,22 +46,22 @@ impl ExecutorShard { loop { let command = self.command_rx.recv().unwrap(); match command { - ExecutorShardCommand::ExecuteBlock( + ExecutorShardCommand::ExecuteSubBlocks( state_view, transactions, concurrency_level_per_shard, + maybe_block_gas_limit, ) => { trace!( "Shard {} received ExecuteBlock command of block size {} ", self.shard_id, - transactions.len() + transactions.num_txns() ); - let ret = BlockAptosVM::execute_block( - self.executor_thread_pool.clone(), + let ret = self.executor_client.execute_block( transactions, state_view.as_ref(), concurrency_level_per_shard, - self.maybe_gas_limit, + maybe_block_gas_limit, ); drop(state_view); self.result_tx.send(ret).unwrap(); diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs b/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs index 87e913c230f55..14225c6806fa0 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs @@ -2,13 +2,14 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::sharded_block_executor::{ - block_partitioner::{BlockPartitioner, UniformPartitioner}, - executor_shard::ExecutorShard, -}; +use crate::sharded_block_executor::{counters::NUM_EXECUTOR_SHARDS, executor_shard::ExecutorShard}; use aptos_logger::{error, info, trace}; use aptos_state_view::StateView; -use aptos_types::transaction::{Transaction, TransactionOutput}; +use aptos_types::{ + block_executor::partitioner::SubBlocksForShard, + transaction::{Transaction, TransactionOutput}, +}; +use block_executor_client::BlockExecutorClient; use move_core_types::vm_status::VMStatus; use std::{ marker::PhantomData, @@ -19,58 +20,49 @@ use std::{ thread, }; -mod block_partitioner; +pub mod block_executor_client; +mod counters; mod executor_shard; /// A wrapper around sharded block executors that manages multiple shards and aggregates the results. pub struct ShardedBlockExecutor { num_executor_shards: usize, - partitioner: Arc, command_txs: Vec>>, shard_threads: Vec>, result_rxs: Vec, VMStatus>>>, phantom: PhantomData, } -pub enum ExecutorShardCommand { - ExecuteBlock(Arc, Vec, usize), +pub enum ExecutorShardCommand { + ExecuteSubBlocks(Arc, SubBlocksForShard, usize, Option), Stop, } impl ShardedBlockExecutor { - pub fn new( - num_executor_shards: usize, - executor_threads_per_shard: Option, - maybe_gas_limit: Option, - ) -> Self { - assert!(num_executor_shards > 0, "num_executor_shards must be > 0"); - let executor_threads_per_shard = executor_threads_per_shard.unwrap_or_else(|| { - (num_cpus::get() as f64 / num_executor_shards as f64).ceil() as usize - }); + pub fn new(executor_clients: Vec) -> Self { let mut command_txs = vec![]; let mut result_rxs = vec![]; let mut shard_join_handles = vec![]; - for i in 0..num_executor_shards { + let num_executor_shards = executor_clients.len(); + for (i, executor_client) in executor_clients.into_iter().enumerate() { let (transactions_tx, transactions_rx) = std::sync::mpsc::channel(); let (result_tx, result_rx) = std::sync::mpsc::channel(); command_txs.push(transactions_tx); result_rxs.push(result_rx); shard_join_handles.push(spawn_executor_shard( num_executor_shards, + executor_client, i, - executor_threads_per_shard, transactions_rx, result_tx, - maybe_gas_limit, )); } info!( - "Creating a new ShardedBlockExecutor with {} shards and concurrency per shard {}", - num_executor_shards, executor_threads_per_shard + "Creating a new ShardedBlockExecutor with {} shards", + num_executor_shards ); Self { num_executor_shards, - partitioner: Arc::new(UniformPartitioner {}), command_txs, shard_threads: shard_join_handles, result_rxs, @@ -83,26 +75,31 @@ impl ShardedBlockExecutor { pub fn execute_block( &self, state_view: Arc, - block: Vec, + block: Vec>, concurrency_level_per_shard: usize, + maybe_block_gas_limit: Option, ) -> Result, VMStatus> { - let block_partitions = self.partitioner.partition(block, self.num_executor_shards); - // Number of partitions might be smaller than the number of executor shards in case of - // block size is smaller than number of executor shards. - let num_partitions = block_partitions.len(); - for (i, transactions) in block_partitions.into_iter().enumerate() { + NUM_EXECUTOR_SHARDS.set(self.num_executor_shards as i64); + assert_eq!( + self.num_executor_shards, + block.len(), + "Block must be partitioned into {} sub-blocks", + self.num_executor_shards + ); + for (i, sub_blocks_for_shard) in block.into_iter().enumerate() { self.command_txs[i] - .send(ExecutorShardCommand::ExecuteBlock( + .send(ExecutorShardCommand::ExecuteSubBlocks( state_view.clone(), - transactions, + sub_blocks_for_shard, concurrency_level_per_shard, + maybe_block_gas_limit, )) .unwrap(); } // wait for all remote executors to send the result back and append them in order by shard id let mut aggregated_results = vec![]; trace!("ShardedBlockExecutor Waiting for results"); - for i in 0..num_partitions { + for i in 0..self.num_executor_shards { let result = self.result_rxs[i].recv().unwrap(); aggregated_results.extend(result?); } @@ -129,13 +126,15 @@ impl Drop for ShardedBlockExecutor { } } -fn spawn_executor_shard( +fn spawn_executor_shard< + S: StateView + Sync + Send + 'static, + E: BlockExecutorClient + Sync + Send + 'static, +>( num_executor_shards: usize, + executor_client: E, shard_id: usize, - concurrency_level: usize, command_rx: Receiver>, result_tx: Sender, VMStatus>>, - maybe_gas_limit: Option, ) -> thread::JoinHandle<()> { // create and start a new executor shard in a separate thread thread::Builder::new() @@ -143,11 +142,10 @@ fn spawn_executor_shard( .spawn(move || { let executor_shard = ExecutorShard::new( num_executor_shards, + executor_client, shard_id, - concurrency_level, command_rx, result_tx, - maybe_gas_limit, ); executor_shard.start(); }) diff --git a/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs b/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs index 3e69987cd8ac9..3807eceb29e4d 100644 --- a/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs +++ b/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs @@ -7,7 +7,11 @@ //! for strings whether they consist of correct characters. use crate::{move_vm_ext::SessionExt, VMStatus}; -use move_binary_format::{errors::VMError, file_format_common::read_uleb128_as_u64}; +use move_binary_format::{ + errors::{Location, PartialVMError}, + file_format::FunctionDefinitionIndex, + file_format_common::read_uleb128_as_u64, +}; use move_core_types::{ account_address::AccountAddress, ident_str, @@ -123,10 +127,9 @@ pub(crate) fn validate_combine_signer_and_txn_args( } let allowed_structs = get_allowed_structs(are_struct_constructors_enabled); - // validate all non_signer params - let mut needs_construction = vec![]; - for (idx, ty) in func.parameters[signer_param_cnt..].iter().enumerate() { - let (valid, construction) = is_valid_txn_arg( + // Need to keep this here to ensure we return the historic correct error code for replay + for ty in func.parameters[signer_param_cnt..].iter() { + let valid = is_valid_txn_arg( session, &ty.subst(&func.type_arguments).unwrap(), allowed_structs, @@ -137,9 +140,6 @@ pub(crate) fn validate_combine_signer_and_txn_args( None, )); } - if construction { - needs_construction.push(idx + signer_param_cnt); - } } if (signer_param_cnt + args.len()) != func.parameters.len() { @@ -148,34 +148,40 @@ pub(crate) fn validate_combine_signer_and_txn_args( None, )); } - // if function doesn't require signer, we reuse txn args - // if the function require signer, we check senders number same as signers - // and then combine senders with txn args. - let mut combined_args = if signer_param_cnt == 0 { + + // If the invoked function expects one or more signers, we need to check that the number of + // signers actually passed is matching first to maintain backward compatibility before + // moving on to the validation of non-signer args. + // the number of txn senders should be the same number of signers + if signer_param_cnt > 0 && senders.len() != signer_param_cnt { + return Err(VMStatus::Error( + StatusCode::NUMBER_OF_SIGNER_ARGUMENTS_MISMATCH, + None, + )); + } + + // This also validates that the args are valid. If they are structs, they have to be allowed + // and must be constructed successfully. If construction fails, this would fail with a + // FAILED_TO_DESERIALIZE_ARGUMENT error. + let args = construct_args( + session, + &func.parameters[signer_param_cnt..], + args, + &func.type_arguments, + allowed_structs, + false, + )?; + + // Combine signer and non-signer arguments. + let combined_args = if signer_param_cnt == 0 { args } else { - // the number of txn senders should be the same number of signers - if senders.len() != signer_param_cnt { - return Err(VMStatus::Error( - StatusCode::NUMBER_OF_SIGNER_ARGUMENTS_MISMATCH, - None, - )); - } senders .into_iter() .map(|s| MoveValue::Signer(s).simple_serialize().unwrap()) .chain(args) .collect() }; - if !needs_construction.is_empty() { - construct_args( - session, - &needs_construction, - &mut combined_args, - func, - allowed_structs, - )?; - } Ok(combined_args) } @@ -184,21 +190,21 @@ pub(crate) fn is_valid_txn_arg( session: &SessionExt, typ: &Type, allowed_structs: &ConstructorMap, -) -> (bool, bool) { +) -> bool { use move_vm_types::loaded_data::runtime_types::Type::*; match typ { - Bool | U8 | U16 | U32 | U64 | U128 | U256 | Address => (true, false), + Bool | U8 | U16 | U32 | U64 | U128 | U256 | Address => true, Vector(inner) => is_valid_txn_arg(session, inner, allowed_structs), Struct(idx) | StructInstantiation(idx, _) => { if let Some(st) = session.get_struct_type(*idx) { let full_name = format!("{}::{}", st.module.short_str_lossless(), st.name); - (allowed_structs.contains_key(&full_name), true) + allowed_structs.contains_key(&full_name) } else { - (false, false) + false } }, - Signer | Reference(_) | MutableReference(_) | TyParam(_) => (false, false), + Signer | Reference(_) | MutableReference(_) | TyParam(_) => false, } } @@ -207,41 +213,81 @@ pub(crate) fn is_valid_txn_arg( // TODO: This needs a more solid story and a tighter integration with the VM. pub(crate) fn construct_args( session: &mut SessionExt, - idxs: &[usize], - args: &mut [Vec], - func: &LoadedFunctionInstantiation, + types: &[Type], + args: Vec>, + ty_args: &[Type], allowed_structs: &ConstructorMap, -) -> Result<(), VMStatus> { + is_view: bool, +) -> Result>, VMStatus> { // Perhaps in a future we should do proper gas metering here let mut gas_meter = UnmeteredGasMeter; - for (idx, ty) in func.parameters.iter().enumerate() { - if !idxs.contains(&idx) { - continue; - } - let arg = &mut args[idx]; - let mut cursor = Cursor::new(&arg[..]); - let mut new_arg = vec![]; - recursively_construct_arg( + let mut res_args = vec![]; + if types.len() != args.len() { + return Err(invalid_signature()); + } + for (ty, arg) in types.iter().zip(args.into_iter()) { + let arg = construct_arg( session, - &ty.subst(&func.type_arguments).unwrap(), + &ty.subst(ty_args).unwrap(), allowed_structs, - &mut cursor, + arg, &mut gas_meter, - &mut new_arg, + is_view, )?; - // Check cursor has parsed everything - // Unfortunately, is_empty is only enabled in nightly, so we check this way. - if cursor.position() != arg.len() as u64 { - return Err(VMStatus::Error( - StatusCode::FAILED_TO_DESERIALIZE_ARGUMENT, - Some(String::from( - "The serialized arguments to constructor contained extra data", - )), - )); - } - *arg = new_arg; + res_args.push(arg); + } + Ok(res_args) +} + +fn invalid_signature() -> VMStatus { + VMStatus::Error(StatusCode::INVALID_MAIN_FUNCTION_SIGNATURE, None) +} + +fn construct_arg( + session: &mut SessionExt, + ty: &Type, + allowed_structs: &ConstructorMap, + arg: Vec, + gas_meter: &mut impl GasMeter, + is_view: bool, +) -> Result, VMStatus> { + use move_vm_types::loaded_data::runtime_types::Type::*; + match ty { + Bool | U8 | U16 | U32 | U64 | U128 | U256 | Address => Ok(arg), + Vector(_) | Struct(_) | StructInstantiation(_, _) => { + let mut cursor = Cursor::new(&arg[..]); + let mut new_arg = vec![]; + let mut max_invocations = 10; // Read from config in the future + recursively_construct_arg( + session, + ty, + allowed_structs, + &mut cursor, + gas_meter, + &mut max_invocations, + &mut new_arg, + )?; + // Check cursor has parsed everything + // Unfortunately, is_empty is only enabled in nightly, so we check this way. + if cursor.position() != arg.len() as u64 { + return Err(VMStatus::Error( + StatusCode::FAILED_TO_DESERIALIZE_ARGUMENT, + Some(String::from( + "The serialized arguments to constructor contained extra data", + )), + )); + } + Ok(new_arg) + }, + Signer => { + if is_view { + Ok(arg) + } else { + Err(invalid_signature()) + } + }, + Reference(_) | MutableReference(_) | TyParam(_) => Err(invalid_signature()), } - Ok(()) } // A Cursor is used to recursively walk the serialized arg manually and correctly. In effect we @@ -253,6 +299,7 @@ pub(crate) fn recursively_construct_arg( allowed_structs: &ConstructorMap, cursor: &mut Cursor<&[u8]>, gas_meter: &mut impl GasMeter, + max_invocations: &mut u64, arg: &mut Vec, ) -> Result<(), VMStatus> { use move_vm_types::loaded_data::runtime_types::Type::*; @@ -263,7 +310,15 @@ pub(crate) fn recursively_construct_arg( let mut len = get_len(cursor)?; serialize_uleb128(len, arg); while len > 0 { - recursively_construct_arg(session, inner, allowed_structs, cursor, gas_meter, arg)?; + recursively_construct_arg( + session, + inner, + allowed_structs, + cursor, + gas_meter, + max_invocations, + arg, + )?; len -= 1; } }, @@ -272,11 +327,11 @@ pub(crate) fn recursively_construct_arg( // performed in `is_valid_txn_arg` let st = session .get_struct_type(*idx) - .expect("unreachable, type must exist"); + .ok_or_else(invalid_signature)?; let full_name = format!("{}::{}", st.module.short_str_lossless(), st.name); let constructor = allowed_structs .get(&full_name) - .expect("unreachable: struct must be allowed"); + .ok_or_else(invalid_signature)?; // By appending the BCS to the output parameter we construct the correct BCS format // of the argument. arg.append(&mut validate_and_construct( @@ -286,6 +341,7 @@ pub(crate) fn recursively_construct_arg( allowed_structs, cursor, gas_meter, + max_invocations, )?); }, Bool | U8 => read_n_bytes(1, cursor, arg)?, @@ -294,11 +350,8 @@ pub(crate) fn recursively_construct_arg( U64 => read_n_bytes(8, cursor, arg)?, U128 => read_n_bytes(16, cursor, arg)?, U256 | Address => read_n_bytes(32, cursor, arg)?, - Signer | Reference(_) | MutableReference(_) | TyParam(_) => { - unreachable!("We already checked for this in is-valid-txn-arg") - }, + Signer | Reference(_) | MutableReference(_) | TyParam(_) => return Err(invalid_signature()), }; - Ok(()) } @@ -313,7 +366,45 @@ fn validate_and_construct( allowed_structs: &ConstructorMap, cursor: &mut Cursor<&[u8]>, gas_meter: &mut impl GasMeter, + max_invocations: &mut u64, ) -> Result, VMStatus> { + if *max_invocations == 0 { + return Err(VMStatus::Error( + StatusCode::FAILED_TO_DESERIALIZE_ARGUMENT, + None, + )); + } + // HACK mitigation of performance attack + // To maintain compatibility with vector or so on, we need to allow unlimited strings. + // So we do not count the string constructor against the max_invocations, instead we + // shortcut the string case to avoid the performance attack. + if constructor.func_name.as_str() == "utf8" { + let constructor_error = || { + // A slight hack, to prevent additional piping of the feature flag through all + // function calls. We know the feature is active when more structs then just strings are + // allowed. + let are_struct_constructors_enabled = allowed_structs.len() > 1; + if are_struct_constructors_enabled { + PartialVMError::new(StatusCode::ABORTED) + .with_sub_status(1) + .at_code_offset(FunctionDefinitionIndex::new(0), 0) + .finish(Location::Module(constructor.module_id.clone())) + .into_vm_status() + } else { + VMStatus::Error(StatusCode::FAILED_TO_DESERIALIZE_ARGUMENT, None) + } + }; + // short cut for the utf8 constructor, which is a special case + let len = get_len(cursor)?; + let mut arg = vec![]; + read_n_bytes(len, cursor, &mut arg)?; + std::str::from_utf8(&arg).map_err(|_| constructor_error())?; + return bcs::to_bytes(&arg) + .map_err(|_| VMStatus::Error(StatusCode::FAILED_TO_DESERIALIZE_ARGUMENT, None)); + } else { + *max_invocations -= 1; + } + let (function, instantiation) = session.load_function_with_type_arg_inference( &constructor.module_id, constructor.func_name, @@ -328,24 +419,13 @@ fn validate_and_construct( allowed_structs, cursor, gas_meter, + max_invocations, &mut arg, )?; args.push(arg); } - let constructor_error = |e: VMError| { - // A slight hack, to prevent additional piping of the feature flag through all - // function calls. We know the feature is active when more structs then just strings are - // allowed. - let are_struct_constructors_enabled = allowed_structs.len() > 1; - if are_struct_constructors_enabled { - e.into_vm_status() - } else { - VMStatus::Error(StatusCode::FAILED_TO_DESERIALIZE_ARGUMENT, None) - } - }; - let serialized_result = session - .execute_instantiated_function(function, instantiation, args, gas_meter) - .map_err(constructor_error)?; + let serialized_result = + session.execute_instantiated_function(function, instantiation, args, gas_meter)?; let mut ret_vals = serialized_result.return_values; // We know ret_vals.len() == 1 let deserialize_error = VMStatus::Error( diff --git a/aptos-move/aptos-vm/src/verifier/view_function.rs b/aptos-move/aptos-vm/src/verifier/view_function.rs index 1870fddcf9ddd..00cc38e0efd3c 100644 --- a/aptos-move/aptos-vm/src/verifier/view_function.rs +++ b/aptos-move/aptos-vm/src/verifier/view_function.rs @@ -9,7 +9,6 @@ use aptos_framework::RuntimeModuleMetadataV1; use move_binary_format::errors::{PartialVMError, PartialVMResult}; use move_core_types::{identifier::IdentStr, vm_status::StatusCode}; use move_vm_runtime::session::LoadedFunctionInstantiation; -use move_vm_types::loaded_data::runtime_types::Type; /// Based on the function attributes in the module metadata, determine whether a /// function is a view function. @@ -31,7 +30,7 @@ pub fn determine_is_view( /// function, and validates the arguments. pub(crate) fn validate_view_function( session: &mut SessionExt, - mut args: Vec>, + args: Vec>, fun_name: &IdentStr, fun_inst: &LoadedFunctionInstantiation, module_metadata: Option<&RuntimeModuleMetadataV1>, @@ -55,43 +54,14 @@ pub(crate) fn validate_view_function( } let allowed_structs = get_allowed_structs(struct_constructors_feature); - // Validate arguments. We allow all what transaction allows, in addition, signers can - // be passed. Some arguments (e.g. utf8 strings) need validation which happens here. - let mut needs_construction = vec![]; - for (idx, ty) in fun_inst.parameters.iter().enumerate() { - match ty { - Type::Signer => continue, - Type::Reference(inner_type) if matches!(&**inner_type, Type::Signer) => continue, - _ => { - let (valid, construction) = - transaction_arg_validation::is_valid_txn_arg(session, ty, allowed_structs); - if !valid { - return Err( - PartialVMError::new(StatusCode::INVALID_MAIN_FUNCTION_SIGNATURE) - .with_message("invalid view function argument".to_string()), - ); - } - if construction { - needs_construction.push(idx); - } - }, - } - } - if !needs_construction.is_empty() - && transaction_arg_validation::construct_args( - session, - &needs_construction, - &mut args, - fun_inst, - allowed_structs, - ) - .is_err() - { - return Err( - PartialVMError::new(StatusCode::INVALID_MAIN_FUNCTION_SIGNATURE) - .with_message("invalid view function argument: failed validation".to_string()), - ); - } - + let args = transaction_arg_validation::construct_args( + session, + &fun_inst.parameters, + args, + &fun_inst.type_arguments, + allowed_structs, + true, + ) + .map_err(|e| PartialVMError::new(e.status_code()))?; Ok(args) } diff --git a/aptos-move/block-executor/src/counters.rs b/aptos-move/block-executor/src/counters.rs index b7b0edf6ca412..d8ef23ca3fbe9 100644 --- a/aptos-move/block-executor/src/counters.rs +++ b/aptos-move/block-executor/src/counters.rs @@ -2,10 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_metrics_core::{ - exponential_buckets, register_histogram, register_int_counter, Histogram, IntCounter, + exponential_buckets, register_histogram, register_histogram_vec, register_int_counter, + Histogram, HistogramVec, IntCounter, }; use once_cell::sync::Lazy; +pub struct GasType; + +impl GasType { + pub const EXECUTION_GAS: &'static str = "execution_gas"; + pub const IO_GAS: &'static str = "io_gas"; + pub const NON_STORAGE_GAS: &'static str = "non_storage_gas"; + pub const STORAGE_FEE: &'static str = "storage_fee"; + pub const STORAGE_GAS: &'static str = "storage_gas"; + pub const TOTAL_GAS: &'static str = "total_gas"; +} + +/// Record the block gas during parallel execution. +pub fn observe_parallel_execution_block_gas(cost: u64, gas_type: &'static str) { + PARALLEL_BLOCK_GAS + .with_label_values(&[gas_type]) + .observe(cost as f64); +} + +/// Record the txn gas during parallel execution. +pub fn observe_parallel_execution_txn_gas(cost: u64, gas_type: &'static str) { + PARALLEL_TXN_GAS + .with_label_values(&[gas_type]) + .observe(cost as f64); +} + +/// Record the block gas during sequential execution. +pub fn observe_sequential_execution_block_gas(cost: u64, gas_type: &'static str) { + SEQUENTIAL_BLOCK_GAS + .with_label_values(&[gas_type]) + .observe(cost as f64); +} + +/// Record the txn gas during sequential execution. +pub fn observe_sequential_execution_txn_gas(cost: u64, gas_type: &'static str) { + SEQUENTIAL_TXN_GAS + .with_label_values(&[gas_type]) + .observe(cost as f64); +} + /// Count of times the module publishing fallback was triggered in parallel execution. pub static MODULE_PUBLISHING_FALLBACK_COUNT: Lazy = Lazy::new(|| { register_int_counter!( @@ -128,56 +168,56 @@ pub static DEPENDENCY_WAIT_SECONDS: Lazy = Lazy::new(|| { .unwrap() }); -pub static PARALLEL_PER_BLOCK_GAS: Lazy = Lazy::new(|| { - register_histogram!( - "aptos_execution_par_per_block_gas", - "The per-block consumed gas in parallel execution (Block STM)", - exponential_buckets(/*start=*/ 1.0, /*factor=*/ 2.0, /*count=*/ 30).unwrap(), +pub static PARALLEL_BLOCK_GAS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_execution_parallel_block_gas", + "Histogram for different block gas costs (execution, io, storage, storage fee, non-storage) during parallel execution", + &["stage"] ) .unwrap() }); -pub static SEQUENTIAL_PER_BLOCK_GAS: Lazy = Lazy::new(|| { - register_histogram!( - "aptos_execution_seq_per_block_gas", - "The per-block consumed gas in sequential execution", - exponential_buckets(/*start=*/ 1.0, /*factor=*/ 2.0, /*count=*/ 30).unwrap(), +pub static PARALLEL_TXN_GAS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_execution_parallel_txn_gas", + "Histogram for different average txn gas costs (execution, io, storage, storage fee, non-storage) during parallel execution", + &["stage"] ) .unwrap() }); -pub static PARALLEL_PER_BLOCK_COMMITTED_TXNS: Lazy = Lazy::new(|| { - register_histogram!( - "aptos_execution_par_per_block_committed_txns", - "The per-block committed txns in parallel execution (Block STM)", - exponential_buckets(/*start=*/ 1.0, /*factor=*/ 2.0, /*count=*/ 30).unwrap(), +pub static SEQUENTIAL_BLOCK_GAS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_execution_sequential_block_gas", + "Histogram for different block gas costs (execution, io, storage, storage fee, non-storage) during sequential execution", + &["stage"] ) .unwrap() }); -pub static SEQUENTIAL_PER_BLOCK_COMMITTED_TXNS: Lazy = Lazy::new(|| { - register_histogram!( - "aptos_execution_seq_per_block_committed_txns", - "The per-block committed txns in sequential execution", - exponential_buckets(/*start=*/ 1.0, /*factor=*/ 2.0, /*count=*/ 30).unwrap(), +pub static SEQUENTIAL_TXN_GAS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_execution_sequential_txn_gas", + "Histogram for different average txn gas costs (execution, io, storage, storage fee, non-storage) during sequential execution", + &["stage"] ) .unwrap() }); -pub static PARALLEL_PER_TXN_GAS: Lazy = Lazy::new(|| { +pub static PARALLEL_BLOCK_COMMITTED_TXNS: Lazy = Lazy::new(|| { register_histogram!( - "aptos_execution_par_per_txn_gas", - "The per-txn consumed gas in parallel execution (Block STM)", - exponential_buckets(/*start=*/ 1.0, /*factor=*/ 1.5, /*count=*/ 30).unwrap(), + "aptos_execution_par_block_committed_txns", + "The per-block committed txns in parallel execution (Block STM)", + exponential_buckets(/*start=*/ 1.0, /*factor=*/ 2.0, /*count=*/ 30).unwrap(), ) .unwrap() }); -pub static SEQUENTIAL_PER_TXN_GAS: Lazy = Lazy::new(|| { +pub static SEQUENTIAL_BLOCK_COMMITTED_TXNS: Lazy = Lazy::new(|| { register_histogram!( - "aptos_execution_seq_per_txn_gas", - "The per-txn consumed gas in sequential execution", - exponential_buckets(/*start=*/ 1.0, /*factor=*/ 1.5, /*count=*/ 30).unwrap(), + "aptos_execution_seq_block_committed_txns", + "The per-block committed txns in sequential execution", + exponential_buckets(/*start=*/ 1.0, /*factor=*/ 2.0, /*count=*/ 30).unwrap(), ) .unwrap() }); diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index b0013246a7db5..6f2ad1148fb12 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -5,11 +5,11 @@ use crate::{ counters, counters::{ - PARALLEL_EXECUTION_SECONDS, RAYON_EXECUTION_SECONDS, TASK_EXECUTE_SECONDS, + GasType, PARALLEL_EXECUTION_SECONDS, RAYON_EXECUTION_SECONDS, TASK_EXECUTE_SECONDS, TASK_VALIDATE_SECONDS, VM_INIT_SECONDS, WORK_WITH_TASK_SECONDS, }, errors::*, - scheduler::{DependencyStatus, Scheduler, SchedulerTask, Wave}, + scheduler::{DependencyStatus, ExecutionTaskType, Scheduler, SchedulerTask, Wave}, task::{ExecutionStatus, ExecutorTask, Transaction, TransactionOutput}, txn_last_input_output::TxnLastInputOutput, view::{LatestView, MVHashMapView}, @@ -18,18 +18,18 @@ use aptos_aggregator::delta_change_set::{deserialize, serialize}; use aptos_logger::{debug, info}; use aptos_mvhashmap::{ types::{MVDataError, MVDataOutput, TxnIndex, Version}, + unsync_map::UnsyncMap, MVHashMap, }; use aptos_state_view::TStateView; use aptos_types::{ - executable::ExecutableTestType, // TODO: fix up with the proper generics. - write_set::WriteOp, + block_executor::partitioner::BlockExecutorTransactions, executable::Executable, + fee_statement::FeeStatement, write_set::WriteOp, }; use aptos_vm_logging::{clear_speculative_txn_logs, init_speculative_logs}; use num_cpus; use rayon::ThreadPool; use std::{ - collections::btree_map::BTreeMap, marker::PhantomData, sync::{ mpsc, @@ -38,33 +38,59 @@ use std::{ }, }; +struct CommitGuard<'a> { + post_commit_txs: &'a Vec>, + worker_idx: usize, + txn_idx: u32, +} + +impl<'a> CommitGuard<'a> { + fn new(post_commit_txs: &'a Vec>, worker_idx: usize, txn_idx: u32) -> Self { + Self { + post_commit_txs, + worker_idx, + txn_idx, + } + } +} + +impl<'a> Drop for CommitGuard<'a> { + fn drop(&mut self) { + // Send the committed txn to the Worker thread. + self.post_commit_txs[self.worker_idx] + .send(self.txn_idx) + .expect("Worker must be available"); + } +} + #[derive(Debug)] enum CommitRole { Coordinator(Vec>), Worker(Receiver), } -pub struct BlockExecutor { +pub struct BlockExecutor { // number of active concurrent tasks, corresponding to the maximum number of rayon // threads that may be concurrently participating in parallel execution. concurrency_level: usize, executor_thread_pool: Arc, - maybe_gas_limit: Option, - phantom: PhantomData<(T, E, S)>, + maybe_block_gas_limit: Option, + phantom: PhantomData<(T, E, S, X)>, } -impl BlockExecutor +impl BlockExecutor where T: Transaction, E: ExecutorTask, S: TStateView + Sync, + X: Executable + 'static, { /// The caller needs to ensure that concurrency_level > 1 (0 is illegal and 1 should /// be handled by sequential execution) and that concurrency_level <= num_cpus. pub fn new( concurrency_level: usize, executor_thread_pool: Arc, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Self { assert!( concurrency_level > 0 && concurrency_level <= num_cpus::get(), @@ -74,17 +100,131 @@ where Self { concurrency_level, executor_thread_pool, - maybe_gas_limit, + maybe_block_gas_limit, phantom: PhantomData, } } + fn update_parallel_block_gas_counters( + &self, + accumulated_fee_statement: &FeeStatement, + num_committed: usize, + ) { + counters::observe_parallel_execution_block_gas( + accumulated_fee_statement.gas_used(), + GasType::TOTAL_GAS, + ); + counters::observe_parallel_execution_block_gas( + accumulated_fee_statement.execution_gas_used(), + GasType::EXECUTION_GAS, + ); + counters::observe_parallel_execution_block_gas( + accumulated_fee_statement.io_gas_used(), + GasType::IO_GAS, + ); + counters::observe_parallel_execution_block_gas( + accumulated_fee_statement.storage_gas_used(), + GasType::STORAGE_GAS, + ); + counters::observe_parallel_execution_block_gas( + accumulated_fee_statement.execution_gas_used() + + accumulated_fee_statement.io_gas_used(), + GasType::NON_STORAGE_GAS, + ); + counters::observe_parallel_execution_block_gas( + accumulated_fee_statement.storage_fee_used(), + GasType::STORAGE_FEE, + ); + counters::PARALLEL_BLOCK_COMMITTED_TXNS.observe(num_committed as f64); + } + + fn update_parallel_txn_gas_counters(&self, fee_statement: &FeeStatement) { + counters::observe_parallel_execution_txn_gas(fee_statement.gas_used(), GasType::TOTAL_GAS); + counters::observe_parallel_execution_txn_gas( + fee_statement.execution_gas_used(), + GasType::EXECUTION_GAS, + ); + counters::observe_parallel_execution_txn_gas(fee_statement.io_gas_used(), GasType::IO_GAS); + counters::observe_parallel_execution_txn_gas( + fee_statement.storage_gas_used(), + GasType::STORAGE_GAS, + ); + counters::observe_parallel_execution_txn_gas( + fee_statement.execution_gas_used() + fee_statement.io_gas_used(), + GasType::NON_STORAGE_GAS, + ); + counters::observe_parallel_execution_txn_gas( + fee_statement.storage_fee_used(), + GasType::STORAGE_FEE, + ); + } + + fn update_sequential_block_gas_counters( + &self, + accumulated_fee_statement: &FeeStatement, + num_committed: usize, + ) { + counters::observe_sequential_execution_block_gas( + accumulated_fee_statement.gas_used(), + GasType::TOTAL_GAS, + ); + counters::observe_sequential_execution_block_gas( + accumulated_fee_statement.execution_gas_used(), + GasType::EXECUTION_GAS, + ); + counters::observe_sequential_execution_block_gas( + accumulated_fee_statement.io_gas_used(), + GasType::IO_GAS, + ); + counters::observe_sequential_execution_block_gas( + accumulated_fee_statement.storage_gas_used(), + GasType::STORAGE_GAS, + ); + counters::observe_sequential_execution_block_gas( + accumulated_fee_statement.execution_gas_used() + + accumulated_fee_statement.io_gas_used(), + GasType::NON_STORAGE_GAS, + ); + counters::observe_sequential_execution_block_gas( + accumulated_fee_statement.storage_fee_used(), + GasType::STORAGE_FEE, + ); + counters::PARALLEL_BLOCK_COMMITTED_TXNS.observe(num_committed as f64); + } + + fn update_sequential_txn_gas_counters(&self, fee_statement: &FeeStatement) { + counters::observe_sequential_execution_txn_gas( + fee_statement.gas_used(), + GasType::TOTAL_GAS, + ); + counters::observe_sequential_execution_txn_gas( + fee_statement.execution_gas_used(), + GasType::EXECUTION_GAS, + ); + counters::observe_sequential_execution_txn_gas( + fee_statement.io_gas_used(), + GasType::IO_GAS, + ); + counters::observe_sequential_execution_txn_gas( + fee_statement.storage_gas_used(), + GasType::STORAGE_GAS, + ); + counters::observe_sequential_execution_txn_gas( + fee_statement.execution_gas_used() + fee_statement.io_gas_used(), + GasType::NON_STORAGE_GAS, + ); + counters::observe_sequential_execution_txn_gas( + fee_statement.storage_fee_used(), + GasType::STORAGE_FEE, + ); + } + fn execute( &self, version: Version, signature_verified_block: &[T], last_input_output: &TxnLastInputOutput, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, scheduler: &Scheduler, executor: &E, base_view: &S, @@ -97,7 +237,7 @@ where // VM execution. let execute_result = executor.execute_transaction( - &LatestView::::new_mv_view(base_view, &speculative_view, idx_to_execute), + &LatestView::::new_mv_view(base_view, &speculative_view, idx_to_execute), txn, idx_to_execute, false, @@ -113,7 +253,7 @@ where if !prev_modified_keys.remove(&k) { updates_outside = true; } - versioned_cache.write(&k, write_version, v); + versioned_cache.write(k, write_version, v); } // Then, apply deltas. @@ -121,7 +261,7 @@ where if !prev_modified_keys.remove(&k) { updates_outside = true; } - versioned_cache.add_delta(&k, idx_to_execute, d); + versioned_cache.add_delta(k, idx_to_execute, d); } }; @@ -168,7 +308,7 @@ where version_to_validate: Version, validation_wave: Wave, last_input_output: &TxnLastInputOutput, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, scheduler: &Scheduler, ) -> SchedulerTask { use MVDataError::*; @@ -221,18 +361,18 @@ where fn coordinator_commit_hook( &self, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, scheduler: &Scheduler, post_commit_txs: &Vec>, worker_idx: &mut usize, - accumulated_gas: &mut u64, scheduler_task: &mut SchedulerTask, last_input_output: &TxnLastInputOutput, + accumulated_fee_statement: &mut FeeStatement, ) { while let Some(txn_idx) = scheduler.try_commit() { - post_commit_txs[*worker_idx] - .send(txn_idx) - .expect("Worker must be available"); + // Create a CommitGuard to ensure Coordinator sends the committed txn index to Worker. + let _commit_guard: CommitGuard = + CommitGuard::new(post_commit_txs, *worker_idx, txn_idx); // Iterate round robin over workers to do commit_hook. *worker_idx = (*worker_idx + 1) % post_commit_txs.len(); @@ -240,8 +380,10 @@ where if txn_idx as usize + 1 == scheduler.num_txns() as usize { *scheduler_task = SchedulerTask::Done; - counters::PARALLEL_PER_BLOCK_GAS.observe(*accumulated_gas as f64); - counters::PARALLEL_PER_BLOCK_COMMITTED_TXNS.observe((txn_idx + 1) as f64); + self.update_parallel_block_gas_counters( + accumulated_fee_statement, + (txn_idx + 1) as usize, + ); info!( "[BlockSTM]: Parallel execution completed, all {} txns committed.", txn_idx + 1 @@ -249,52 +391,59 @@ where break; } - // For committed txns with Success status, calculate the accumulated gas. + // For committed txns with Success status, calculate the accumulated gas costs. // For committed txns with Abort or SkipRest status, early halt BlockSTM. - match last_input_output.gas_used(txn_idx) { - Some(gas) => { - *accumulated_gas += gas; - counters::PARALLEL_PER_TXN_GAS.observe(gas as f64); + match last_input_output.fee_statement(txn_idx) { + Some(fee_statement) => { + accumulated_fee_statement.add_fee_statement(&fee_statement); + self.update_parallel_txn_gas_counters(&fee_statement); }, None => { scheduler.halt(); - counters::PARALLEL_PER_BLOCK_GAS.observe(*accumulated_gas as f64); - counters::PARALLEL_PER_BLOCK_COMMITTED_TXNS.observe((txn_idx + 1) as f64); + self.update_parallel_block_gas_counters( + accumulated_fee_statement, + (txn_idx + 1) as usize, + ); info!("[BlockSTM]: Parallel execution early halted due to Abort or SkipRest txn, {} txns committed.", txn_idx + 1); break; }, }; - if let Some(per_block_gas_limit) = maybe_gas_limit { - // When the accumulated gas of the committed txns exceeds PER_BLOCK_GAS_LIMIT, early halt BlockSTM. - if *accumulated_gas >= per_block_gas_limit { + if let Some(per_block_gas_limit) = maybe_block_gas_limit { + // When the accumulated execution and io gas of the committed txns exceeds PER_BLOCK_GAS_LIMIT, early halt BlockSTM. + // Storage gas does not count towards the per block gas limit, as we measure execution related cost here. + let accumulated_non_storage_gas = accumulated_fee_statement.execution_gas_used() + + accumulated_fee_statement.io_gas_used(); + if accumulated_non_storage_gas >= per_block_gas_limit { // Set the execution output status to be SkipRest, to skip the rest of the txns. last_input_output.update_to_skip_rest(txn_idx); scheduler.halt(); - counters::PARALLEL_PER_BLOCK_GAS.observe(*accumulated_gas as f64); - counters::PARALLEL_PER_BLOCK_COMMITTED_TXNS.observe((txn_idx + 1) as f64); + self.update_parallel_block_gas_counters( + accumulated_fee_statement, + (txn_idx + 1) as usize, + ); counters::PARALLEL_EXCEED_PER_BLOCK_GAS_LIMIT_COUNT.inc(); - info!("[BlockSTM]: Parallel execution early halted due to accumulated_gas {} >= PER_BLOCK_GAS_LIMIT {}, {} txns committed", *accumulated_gas, per_block_gas_limit, txn_idx); + info!("[BlockSTM]: Parallel execution early halted due to accumulated_non_storage_gas {} >= PER_BLOCK_GAS_LIMIT {}, {} txns committed", accumulated_non_storage_gas, per_block_gas_limit, txn_idx); break; } } // Remark: When early halting the BlockSTM, we have to make sure the current / new tasks // will be properly handled by the threads. For instance, it is possible that the committing - // thread holds an execution task from the last iteration, and then early halts the BlockSTM - // due to a txn execution abort. In this case, we cannot reset the scheduler_task of the - // committing thread (to be Done), otherwise some other pending thread waiting for the execution - // will be pending on read forever (since the halt logic let the execution task to wake up such - // pending task). + // thread holds an execution task of ExecutionTaskType::Wakeup(DependencyCondvar) for some + // other thread pending on the dependency conditional variable from the last iteration. If + // the committing thread early halts BlockSTM and resets its scheduler_task to be Done, the + // pending thread will be pending on read forever. In other words, we rely on the committing + // thread to wake up the pending execution thread, if the committing thread holds the Wakeup task. } } fn worker_commit_hook( &self, txn_idx: TxnIndex, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, last_input_output: &TxnLastInputOutput, base_view: &S, ) { @@ -339,7 +488,7 @@ where executor_arguments: &E::Argument, block: &[T], last_input_output: &TxnLastInputOutput, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, scheduler: &Scheduler, base_view: &S, role: CommitRole, @@ -353,20 +502,21 @@ where let _timer = WORK_WITH_TASK_SECONDS.start_timer(); let mut scheduler_task = SchedulerTask::NoTask; - let mut accumulated_gas = 0; let mut worker_idx = 0; + + let mut accumulated_fee_statement = FeeStatement::zero(); loop { // Only one thread does try_commit to avoid contention. match &role { CommitRole::Coordinator(post_commit_txs) => { self.coordinator_commit_hook( - self.maybe_gas_limit, + self.maybe_block_gas_limit, scheduler, post_commit_txs, &mut worker_idx, - &mut accumulated_gas, &mut scheduler_task, last_input_output, + &mut accumulated_fee_statement, ); }, CommitRole::Worker(rx) => { @@ -389,16 +539,18 @@ where versioned_cache, scheduler, ), - SchedulerTask::ExecutionTask(version_to_execute, None) => self.execute( - version_to_execute, - block, - last_input_output, - versioned_cache, - scheduler, - &executor, - base_view, - ), - SchedulerTask::ExecutionTask(_, Some(condvar)) => { + SchedulerTask::ExecutionTask(version_to_execute, ExecutionTaskType::Execution) => { + self.execute( + version_to_execute, + block, + last_input_output, + versioned_cache, + scheduler, + &executor, + base_view, + ) + }, + SchedulerTask::ExecutionTask(_, ExecutionTaskType::Wakeup(condvar)) => { let (lock, cvar) = &*condvar; // Mark dependency resolved. *lock.lock() = DependencyStatus::Resolved; @@ -440,7 +592,7 @@ where // w. concurrency_level = 1 for some reason. assert!(self.concurrency_level > 1, "Must use sequential execution"); - let versioned_cache = MVHashMap::new(None); + let versioned_cache = MVHashMap::new(); if signature_verified_block.is_empty() { return Ok(vec![]); @@ -528,18 +680,20 @@ where pub(crate) fn execute_transactions_sequential( &self, executor_arguments: E::Argument, - signature_verified_block: &[T], + signature_verified_block: &Vec, base_view: &S, ) -> Result, E::Error> { let num_txns = signature_verified_block.len(); let executor = E::init(executor_arguments); - let mut data_map = BTreeMap::new(); + let data_map = UnsyncMap::new(); let mut ret = Vec::with_capacity(num_txns); - let mut accumulated_gas = 0; + + let mut accumulated_fee_statement = FeeStatement::zero(); + for (idx, txn) in signature_verified_block.iter().enumerate() { let res = executor.execute_transaction( - &LatestView::::new_btree_view(base_view, &data_map, idx as TxnIndex), + &LatestView::::new_btree_view(base_view, &data_map, idx as TxnIndex), txn, idx as TxnIndex, true, @@ -555,12 +709,12 @@ where ); // Apply the writes. for (ap, write_op) in output.get_writes().into_iter() { - data_map.insert(ap, write_op); + data_map.write(ap, write_op); } - // Calculating the accumulated gas of the committed txns. - let txn_gas = output.gas_used(); - accumulated_gas += txn_gas; - counters::SEQUENTIAL_PER_TXN_GAS.observe(txn_gas as f64); + // Calculating the accumulated gas costs of the committed txns. + let fee_statement = output.fee_statement(); + accumulated_fee_statement.add_fee_statement(&fee_statement); + self.update_sequential_txn_gas_counters(&accumulated_fee_statement); ret.push(output); }, ExecutionStatus::Abort(err) => { @@ -575,12 +729,14 @@ where break; } - if let Some(per_block_gas_limit) = self.maybe_gas_limit { + if let Some(per_block_gas_limit) = self.maybe_block_gas_limit { // When the accumulated gas of the committed txns // exceeds per_block_gas_limit, halt sequential execution. - if accumulated_gas >= per_block_gas_limit { + let accumulated_non_storage_gas = accumulated_fee_statement.execution_gas_used() + + accumulated_fee_statement.io_gas_used(); + if accumulated_non_storage_gas >= per_block_gas_limit { counters::SEQUENTIAL_EXCEED_PER_BLOCK_GAS_LIMIT_COUNT.inc(); - info!("[Execution]: Sequential execution early halted due to accumulated_gas {} >= PER_BLOCK_GAS_LIMIT {}, {} txns committed", accumulated_gas, per_block_gas_limit, ret.len()); + info!("[Execution]: Sequential execution early halted due to accumulated_non_storage_gas {} >= PER_BLOCK_GAS_LIMIT {}, {} txns committed", accumulated_non_storage_gas, per_block_gas_limit, ret.len()); break; } } @@ -593,8 +749,7 @@ where ); } - counters::SEQUENTIAL_PER_BLOCK_GAS.observe(accumulated_gas as f64); - counters::SEQUENTIAL_PER_BLOCK_COMMITTED_TXNS.observe(ret.len() as f64); + self.update_sequential_block_gas_counters(&accumulated_fee_statement, ret.len()); ret.resize_with(num_txns, E::Output::skip_output); Ok(ret) } @@ -602,19 +757,20 @@ where pub fn execute_block( &self, executor_arguments: E::Argument, - signature_verified_block: Vec, + signature_verified_block: BlockExecutorTransactions, base_view: &S, ) -> Result, E::Error> { + let signature_verified_txns = signature_verified_block.into_txns(); let mut ret = if self.concurrency_level > 1 { self.execute_transactions_parallel( executor_arguments, - &signature_verified_block, + &signature_verified_txns, base_view, ) } else { self.execute_transactions_sequential( executor_arguments, - &signature_verified_block, + &signature_verified_txns, base_view, ) }; @@ -624,18 +780,18 @@ where // All logs from the parallel execution should be cleared and not reported. // Clear by re-initializing the speculative logs. - init_speculative_logs(signature_verified_block.len()); + init_speculative_logs(signature_verified_txns.len()); ret = self.execute_transactions_sequential( executor_arguments, - &signature_verified_block, + &signature_verified_txns, base_view, ) } self.executor_thread_pool.spawn(move || { // Explicit async drops. - drop(signature_verified_block); + drop(signature_verified_txns); }); ret diff --git a/aptos-move/block-executor/src/proptest_types/bencher.rs b/aptos-move/block-executor/src/proptest_types/bencher.rs index abe5c9d236019..87bb442323e64 100644 --- a/aptos-move/block-executor/src/proptest_types/bencher.rs +++ b/aptos-move/block-executor/src/proptest_types/bencher.rs @@ -9,6 +9,7 @@ use crate::{ TransactionGenParams, ValueType, }, }; +use aptos_types::executable::ExecutableTestType; use criterion::{BatchSize, Bencher as CBencher}; use num_cpus; use proptest::{ @@ -124,6 +125,7 @@ where Transaction, ValueType>, Task, ValueType>, EmptyDataView, ValueType>, + ExecutableTestType, >::new(num_cpus::get(), executor_thread_pool, None) .execute_transactions_parallel((), &self.transactions, &data_view); diff --git a/aptos-move/block-executor/src/proptest_types/tests.rs b/aptos-move/block-executor/src/proptest_types/tests.rs index 93d6cda5fc9af..73e65ff53423a 100644 --- a/aptos-move/block-executor/src/proptest_types/tests.rs +++ b/aptos-move/block-executor/src/proptest_types/tests.rs @@ -10,6 +10,7 @@ use crate::{ TransactionGenParams, ValueType, }, }; +use aptos_types::executable::ExecutableTestType; use claims::assert_ok; use num_cpus; use proptest::{ @@ -29,7 +30,7 @@ fn run_transactions( skip_rest_transactions: Vec, num_repeat: usize, module_access: (bool, bool), - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) where K: Hash + Clone + Debug + Eq + Send + Sync + PartialOrd + Ord + 'static, V: Clone + Eq + Send + Sync + Arbitrary + 'static, @@ -64,10 +65,11 @@ fn run_transactions( Transaction, ValueType>, Task, ValueType>, EmptyDataView, ValueType>, + ExecutableTestType, >::new( num_cpus::get(), executor_thread_pool.clone(), - maybe_gas_limit, + maybe_block_gas_limit, ) .execute_transactions_parallel((), &transactions, &data_view); @@ -76,7 +78,8 @@ fn run_transactions( continue; } - let baseline = ExpectedOutput::generate_baseline(&transactions, None, maybe_gas_limit); + let baseline = + ExpectedOutput::generate_baseline(&transactions, None, maybe_block_gas_limit); baseline.assert_output(&output); } } @@ -134,7 +137,7 @@ proptest! { } } -fn dynamic_read_writes_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) { +fn dynamic_read_writes_with_block_gas_limit(num_txns: usize, maybe_block_gas_limit: Option) { let mut runner = TestRunner::default(); let universe = vec(any::<[u8; 32]>(), 100) @@ -156,11 +159,11 @@ fn dynamic_read_writes_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) { +fn deltas_writes_mixed_with_block_gas_limit(num_txns: usize, maybe_block_gas_limit: Option) { let mut runner = TestRunner::default(); let universe = vec(any::<[u8; 32]>(), 50) @@ -197,19 +200,21 @@ fn deltas_writes_mixed_with_gas_limit(num_txns: usize, maybe_gas_limit: Option, ValueType<[u8; 32]>>, Task, ValueType<[u8; 32]>>, DeltaDataView, ValueType<[u8; 32]>>, + ExecutableTestType, >::new( num_cpus::get(), executor_thread_pool.clone(), - maybe_gas_limit, + maybe_block_gas_limit, ) .execute_transactions_parallel((), &transactions, &data_view); - let baseline = ExpectedOutput::generate_baseline(&transactions, None, maybe_gas_limit); + let baseline = + ExpectedOutput::generate_baseline(&transactions, None, maybe_block_gas_limit); baseline.assert_output(&output); } } -fn deltas_resolver_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) { +fn deltas_resolver_with_block_gas_limit(num_txns: usize, maybe_block_gas_limit: Option) { let mut runner = TestRunner::default(); let universe = vec(any::<[u8; 32]>(), 50) @@ -246,10 +251,11 @@ fn deltas_resolver_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) Transaction, ValueType<[u8; 32]>>, Task, ValueType<[u8; 32]>>, DeltaDataView, ValueType<[u8; 32]>>, + ExecutableTestType, >::new( num_cpus::get(), executor_thread_pool.clone(), - maybe_gas_limit, + maybe_block_gas_limit, ) .execute_transactions_parallel((), &transactions, &data_view); @@ -260,13 +266,19 @@ fn deltas_resolver_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) .map(|out| out.delta_writes()) .collect(); - let baseline = - ExpectedOutput::generate_baseline(&transactions, Some(delta_writes), maybe_gas_limit); + let baseline = ExpectedOutput::generate_baseline( + &transactions, + Some(delta_writes), + maybe_block_gas_limit, + ); baseline.assert_output(&output); } } -fn dynamic_read_writes_contended_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) { +fn dynamic_read_writes_contended_with_block_gas_limit( + num_txns: usize, + maybe_block_gas_limit: Option, +) { let mut runner = TestRunner::default(); let universe = vec(any::<[u8; 32]>(), 10) @@ -289,11 +301,14 @@ fn dynamic_read_writes_contended_with_gas_limit(num_txns: usize, maybe_gas_limit vec![], 100, (false, false), - maybe_gas_limit, + maybe_block_gas_limit, ); } -fn module_publishing_fallback_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) { +fn module_publishing_fallback_with_block_gas_limit( + num_txns: usize, + maybe_block_gas_limit: Option, +) { let mut runner = TestRunner::default(); let universe = vec(any::<[u8; 32]>(), 100) @@ -315,7 +330,7 @@ fn module_publishing_fallback_with_gas_limit(num_txns: usize, maybe_gas_limit: O vec![], 2, (false, true), - maybe_gas_limit, + maybe_block_gas_limit, ); run_transactions( &universe, @@ -324,7 +339,7 @@ fn module_publishing_fallback_with_gas_limit(num_txns: usize, maybe_gas_limit: O vec![], 2, (false, true), - maybe_gas_limit, + maybe_block_gas_limit, ); run_transactions( &universe, @@ -333,11 +348,14 @@ fn module_publishing_fallback_with_gas_limit(num_txns: usize, maybe_gas_limit: O vec![], 2, (true, true), - maybe_gas_limit, + maybe_block_gas_limit, ); } -fn publishing_fixed_params_with_gas_limit(num_txns: usize, maybe_gas_limit: Option) { +fn publishing_fixed_params_with_block_gas_limit( + num_txns: usize, + maybe_block_gas_limit: Option, +) { let mut runner = TestRunner::default(); let universe = vec(any::<[u8; 32]>(), 50) @@ -407,7 +425,8 @@ fn publishing_fixed_params_with_gas_limit(num_txns: usize, maybe_gas_limit: Opti Transaction, ValueType<[u8; 32]>>, Task, ValueType<[u8; 32]>>, DeltaDataView, ValueType<[u8; 32]>>, - >::new(num_cpus::get(), executor_thread_pool, maybe_gas_limit) + ExecutableTestType, + >::new(num_cpus::get(), executor_thread_pool, maybe_block_gas_limit) .execute_transactions_parallel((), &transactions, &data_view); assert_ok!(output); @@ -450,6 +469,7 @@ fn publishing_fixed_params_with_gas_limit(num_txns: usize, maybe_gas_limit: Opti Transaction, ValueType<[u8; 32]>>, Task, ValueType<[u8; 32]>>, DeltaDataView, ValueType<[u8; 32]>>, + ExecutableTestType, >::new( num_cpus::get(), executor_thread_pool.clone(), @@ -463,27 +483,27 @@ fn publishing_fixed_params_with_gas_limit(num_txns: usize, maybe_gas_limit: Opti #[test] fn dynamic_read_writes() { - dynamic_read_writes_with_gas_limit(3000, None); + dynamic_read_writes_with_block_gas_limit(3000, None); } #[test] fn deltas_writes_mixed() { - deltas_writes_mixed_with_gas_limit(1000, None); + deltas_writes_mixed_with_block_gas_limit(1000, None); } #[test] fn deltas_resolver() { - deltas_resolver_with_gas_limit(1000, None); + deltas_resolver_with_block_gas_limit(1000, None); } #[test] fn dynamic_read_writes_contended() { - dynamic_read_writes_contended_with_gas_limit(1000, None); + dynamic_read_writes_contended_with_block_gas_limit(1000, None); } #[test] fn module_publishing_fallback() { - module_publishing_fallback_with_gas_limit(3000, None); + module_publishing_fallback_with_block_gas_limit(3000, None); } #[test] @@ -491,7 +511,7 @@ fn module_publishing_fallback() { // not overlapping module r/w keys. fn module_publishing_races() { for _ in 0..5 { - publishing_fixed_params_with_gas_limit(300, None); + publishing_fixed_params_with_block_gas_limit(300, None); } } @@ -550,35 +570,41 @@ proptest! { } #[test] -fn dynamic_read_writes_with_block_gas_limit() { - dynamic_read_writes_with_gas_limit(3000, Some(rand::thread_rng().gen_range(0, 3000) as u64)); - dynamic_read_writes_with_gas_limit(3000, Some(0)); +fn dynamic_read_writes_with_block_gas_limit_test() { + dynamic_read_writes_with_block_gas_limit( + 3000, + Some(rand::thread_rng().gen_range(0, 3000) as u64), + ); + dynamic_read_writes_with_block_gas_limit(3000, Some(0)); } #[test] -fn deltas_writes_mixed_with_block_gas_limit() { - deltas_writes_mixed_with_gas_limit(1000, Some(rand::thread_rng().gen_range(0, 1000) as u64)); - deltas_writes_mixed_with_gas_limit(1000, Some(0)); +fn deltas_writes_mixed_with_block_gas_limit_test() { + deltas_writes_mixed_with_block_gas_limit( + 1000, + Some(rand::thread_rng().gen_range(0, 1000) as u64), + ); + deltas_writes_mixed_with_block_gas_limit(1000, Some(0)); } #[test] -fn deltas_resolver_with_block_gas_limit() { - deltas_resolver_with_gas_limit(1000, Some(rand::thread_rng().gen_range(0, 1000) as u64)); - deltas_resolver_with_gas_limit(1000, Some(0)); +fn deltas_resolver_with_block_gas_limit_test() { + deltas_resolver_with_block_gas_limit(1000, Some(rand::thread_rng().gen_range(0, 1000) as u64)); + deltas_resolver_with_block_gas_limit(1000, Some(0)); } #[test] -fn dynamic_read_writes_contended_with_block_gas_limit() { - dynamic_read_writes_contended_with_gas_limit( +fn dynamic_read_writes_contended_with_block_gas_limit_test() { + dynamic_read_writes_contended_with_block_gas_limit( 1000, Some(rand::thread_rng().gen_range(0, 1000) as u64), ); - dynamic_read_writes_contended_with_gas_limit(1000, Some(0)); + dynamic_read_writes_contended_with_block_gas_limit(1000, Some(0)); } #[test] -fn module_publishing_fallback_with_block_gas_limit() { - module_publishing_fallback_with_gas_limit( +fn module_publishing_fallback_with_block_gas_limit_test() { + module_publishing_fallback_with_block_gas_limit( 3000, // Need to execute at least 2 txns to trigger module publishing fallback Some(rand::thread_rng().gen_range(1, 3000) as u64), @@ -588,9 +614,9 @@ fn module_publishing_fallback_with_block_gas_limit() { #[test] // Test a single transaction intersection interleaves with a lot of dependencies and // not overlapping module r/w keys. -fn module_publishing_races_with_block_gas_limit() { +fn module_publishing_races_with_block_gas_limit_test() { for _ in 0..5 { - publishing_fixed_params_with_gas_limit( + publishing_fixed_params_with_block_gas_limit( 300, Some(rand::thread_rng().gen_range(0, 300) as u64), ); diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index c0c8aca22ac88..f9aa9b2d3449b 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -16,6 +16,7 @@ use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, executable::ModulePath, + fee_statement::FeeStatement, state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, write_set::{TransactionWrite, WriteOp}, }; @@ -516,6 +517,10 @@ where fn gas_used(&self) -> u64 { 1 } + + fn fee_statement(&self) -> FeeStatement { + FeeStatement::new(1, 1, 0, 0, 0) + } } /////////////////////////////////////////////////////////////////////////// @@ -536,7 +541,7 @@ impl ExpectedOutput { pub fn generate_baseline( txns: &[Transaction], resolved_deltas: Option>>, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Self { let mut current_world = HashMap::new(); // Delta world stores the latest u128 value of delta aggregator. When empty, the @@ -640,7 +645,7 @@ impl ExpectedOutput { // In unit tests, the gas_used of any txn is set to be 1. accumulated_gas += 1; - if let Some(block_gas_limit) = maybe_gas_limit { + if let Some(block_gas_limit) = maybe_block_gas_limit { if accumulated_gas >= block_gas_limit { return Self::ExceedBlockGasLimit(idx, result_vec); } diff --git a/aptos-move/block-executor/src/scheduler.rs b/aptos-move/block-executor/src/scheduler.rs index 4a0c12312c3a8..f13dcbd2bcc33 100644 --- a/aptos-move/block-executor/src/scheduler.rs +++ b/aptos-move/block-executor/src/scheduler.rs @@ -39,13 +39,22 @@ pub enum DependencyResult { ExecutionHalted, } +/// Two types of execution tasks: Execution and Wakeup. +/// Execution is a normal execution task, Wakeup is a task that just wakes up a suspended execution. +/// See explanations for the ExecutionStatus below. +#[derive(Debug, Clone)] +pub enum ExecutionTaskType { + Execution, + Wakeup(DependencyCondvar), +} + /// A holder for potential task returned from the Scheduler. ExecutionTask and ValidationTask /// each contain a version of transaction that must be executed or validated, respectively. /// NoTask holds no task (similar None if we wrapped tasks in Option), and Done implies that /// there are no more tasks and the scheduler is done. #[derive(Debug)] pub enum SchedulerTask { - ExecutionTask(Version, Option), + ExecutionTask(Version, ExecutionTaskType), ValidationTask(Version, Wave), NoTask, Done, @@ -56,21 +65,24 @@ pub enum SchedulerTask { /// 'execution status' as 'status'. Each status contains the latest incarnation number, /// where incarnation = i means it is the i-th execution instance of the transaction. /// -/// 'ReadyToExecute' means that the corresponding incarnation should be executed and the scheduler +/// 'Ready' means that the corresponding incarnation should be executed and the scheduler /// must eventually create a corresponding execution task. The scheduler ensures that exactly one -/// execution task gets created, changing the status to 'Executing' in the process. If a dependency -/// condition variable is set, then an execution of a prior incarnation is waiting on it with -/// a read dependency resolved (when dependency was encountered, the status changed to Suspended, -/// and suspended changed to ReadyToExecute when the dependency finished its execution). In this case -/// the caller need not create a new execution task, but just notify the suspended execution. +/// execution task gets created, changing the status to 'Executing' in the process. 'Ready' status +/// contains an ExecutionTaskType, which is either Execution or Wakeup. If it is Execution, then +/// the scheduler creates an execution task for the corresponding incarnation. If it is Wakeup, +/// a dependency condition variable is set in ExecutionTaskType::Wakeup(DependencyCondvar): an execution +/// of a prior incarnation is waiting on it with a read dependency resolved (when dependency was +/// encountered, the status changed to Suspended, and suspended changed to Ready when the dependency +/// finished its execution). In this case the caller need not create a new execution task, but +/// just notify the suspended execution via the dependency condition variable. /// /// 'Executing' status of an incarnation turns into 'Executed' if the execution task finishes, or -/// if a dependency is encountered, it becomes 'ReadyToExecute(incarnation + 1)' once the +/// if a dependency is encountered, it becomes 'Ready(incarnation + 1)' once the /// dependency is resolved. An 'Executed' status allows creation of validation tasks for the /// corresponding incarnation, and a validation failure leads to an abort. The scheduler ensures /// that there is exactly one abort, changing the status to 'Aborting' in the process. Once the /// thread that successfully aborted performs everything that's required, it sets the status -/// to 'ReadyToExecute(incarnation + 1)', allowing the scheduler to create an execution +/// to 'Ready(incarnation + 1)', allowing the scheduler to create an execution /// task for the next incarnation of the transaction. /// /// 'ExecutionHalted' is a transaction status marking that parallel execution is halted, due to @@ -95,7 +107,7 @@ pub enum SchedulerTask { /// #[derive(Debug)] enum ExecutionStatus { - ReadyToExecute(Incarnation, Option), + Ready(Incarnation, ExecutionTaskType), Executing(Incarnation), Suspended(Incarnation, DependencyCondvar), Executed(Incarnation), @@ -108,7 +120,7 @@ impl PartialEq for ExecutionStatus { fn eq(&self, other: &Self) -> bool { use ExecutionStatus::*; match (self, other) { - (&ReadyToExecute(ref a, _), &ReadyToExecute(ref b, _)) + (&Ready(ref a, _), &Ready(ref b, _)) | (&Executing(ref a), &Executing(ref b)) | (&Suspended(ref a, _), &Suspended(ref b, _)) | (&Executed(ref a), &Executed(ref b)) @@ -208,7 +220,7 @@ pub struct Scheduler { // validation/execution preferences stick to the worker threads). /// A shared index that tracks the minimum of all transaction indices that require execution. /// The threads increment the index and attempt to create an execution task for the corresponding - /// transaction, if the status of the txn is 'ReadyToExecute'. This implements a counting-based + /// transaction, if the status of the txn is 'Ready'. This implements a counting-based /// concurrent ordered set. It is reduced as necessary when transactions become ready to be /// executed, in particular, when execution finishes and dependencies are resolved. execution_idx: AtomicU32, @@ -242,7 +254,7 @@ impl Scheduler { txn_status: (0..num_txns) .map(|_| { CachePadded::new(( - RwLock::new(ExecutionStatus::ReadyToExecute(0, None)), + RwLock::new(ExecutionStatus::Ready(0, ExecutionTaskType::Execution)), RwLock::new(ValidationStatus::new()), )) }) @@ -368,10 +380,10 @@ impl Scheduler { { return SchedulerTask::ValidationTask(version_to_validate, wave); } - } else if let Some((version_to_execute, maybe_condvar)) = + } else if let Some((version_to_execute, execution_task_type)) = self.try_execute_next_version() { - return SchedulerTask::ExecutionTask(version_to_execute, maybe_condvar); + return SchedulerTask::ExecutionTask(version_to_execute, execution_task_type); } } } @@ -468,7 +480,7 @@ impl Scheduler { let min_dep = txn_deps .into_iter() .map(|dep| { - // Mark the status of dependencies as 'ReadyToExecute' since dependency on + // Mark the status of dependencies as 'Ready' since dependency on // transaction txn_idx is now resolved. self.resume(dep); @@ -536,8 +548,11 @@ impl Scheduler { // re-execution task back to the caller. If incarnation fails, there is // nothing to do, as another thread must have succeeded to incarnate and // obtain the task for re-execution. - if let Some((new_incarnation, maybe_condvar)) = self.try_incarnate(txn_idx) { - return SchedulerTask::ExecutionTask((txn_idx, new_incarnation), maybe_condvar); + if let Some((new_incarnation, execution_task_type)) = self.try_incarnate(txn_idx) { + return SchedulerTask::ExecutionTask( + (txn_idx, new_incarnation), + execution_task_type, + ); } } @@ -573,10 +588,10 @@ impl Scheduler { pub fn resolve_condvar(&self, txn_idx: TxnIndex) { let mut status = self.txn_status[txn_idx as usize].0.write(); { - // Only transactions with status Suspended or ReadyToExecute may have the condition variable of pending threads. + // Only transactions with status Suspended or Ready may have the condition variable of pending threads. match &*status { ExecutionStatus::Suspended(_, condvar) - | ExecutionStatus::ReadyToExecute(_, Some(condvar)) => { + | ExecutionStatus::Ready(_, ExecutionTaskType::Wakeup(condvar)) => { let (lock, cvar) = &*(condvar.clone()); // Mark parallel execution halted due to reasons like module r/w intersection. *lock.lock() = DependencyStatus::ExecutionHalted; @@ -639,11 +654,11 @@ impl Scheduler { } /// Try and incarnate a transaction. Only possible when the status is - /// ReadyToExecute(incarnation), in which case Some(incarnation) is returned and the + /// Ready(incarnation), in which case Some(incarnation) is returned and the /// status is (atomically, due to the mutex) updated to Executing(incarnation). /// An unsuccessful incarnation returns None. Since incarnation numbers never decrease /// for each transaction, incarnate function may not succeed more than once per version. - fn try_incarnate(&self, txn_idx: TxnIndex) -> Option<(Incarnation, Option)> { + fn try_incarnate(&self, txn_idx: TxnIndex) -> Option<(Incarnation, ExecutionTaskType)> { if txn_idx >= self.num_txns { return None; } @@ -652,8 +667,8 @@ impl Scheduler { // However, it is likely an overkill (and overhead to actually upgrade), // while unlikely there would be much contention on a specific index lock. let mut status = self.txn_status[txn_idx as usize].0.write(); - if let ExecutionStatus::ReadyToExecute(incarnation, maybe_condvar) = &*status { - let ret = (*incarnation, maybe_condvar.clone()); + if let ExecutionStatus::Ready(incarnation, execution_task_type) = &*status { + let ret: (u32, ExecutionTaskType) = (*incarnation, (*execution_task_type).clone()); *status = ExecutionStatus::Executing(*incarnation); Some(ret) } else { @@ -695,7 +710,7 @@ impl Scheduler { let status = self.txn_status[txn_idx as usize].0.read(); matches!( *status, - ExecutionStatus::ReadyToExecute(0, _) + ExecutionStatus::Ready(0, _) | ExecutionStatus::Executing(0) | ExecutionStatus::Suspended(0, _) ) @@ -744,11 +759,11 @@ impl Scheduler { /// Grab an index to try and execute next (by fetch-and-incrementing execution_idx). /// - If the index is out of bounds, return None (and invoke a check of whether /// all txns can be committed). - /// - If the transaction is ready for execution (ReadyToExecute state), attempt + /// - If the transaction is ready for execution (Ready state), attempt /// to create the next incarnation (should happen exactly once), and if successful, /// return the version to the caller for the corresponding ExecutionTask. /// - Otherwise, return None. - fn try_execute_next_version(&self) -> Option<(Version, Option)> { + fn try_execute_next_version(&self) -> Option<(Version, ExecutionTaskType)> { let idx_to_execute = self.execution_idx.fetch_add(1, Ordering::SeqCst); if idx_to_execute >= self.num_txns { @@ -758,7 +773,9 @@ impl Scheduler { // If successfully incarnated (changed status from ready to executing), // return version for execution task, otherwise None. self.try_incarnate(idx_to_execute) - .map(|(incarnation, maybe_condvar)| ((idx_to_execute, incarnation), maybe_condvar)) + .map(|(incarnation, execution_task_type)| { + ((idx_to_execute, incarnation), execution_task_type) + }) } /// Put a transaction in a suspended state, with a condition variable that can be @@ -767,7 +784,6 @@ impl Scheduler { /// Return false when the execution is halted. fn suspend(&self, txn_idx: TxnIndex, dep_condvar: DependencyCondvar) -> bool { let mut status = self.txn_status[txn_idx as usize].0.write(); - match *status { ExecutionStatus::Executing(incarnation) => { *status = ExecutionStatus::Suspended(incarnation, dep_condvar); @@ -778,7 +794,7 @@ impl Scheduler { } } - /// When a dependency is resolved, mark the transaction as ReadyToExecute with an + /// When a dependency is resolved, mark the transaction as Ready with an /// incremented incarnation number. /// The caller must ensure that the transaction is in the Suspended state. fn resume(&self, txn_idx: TxnIndex) { @@ -789,7 +805,10 @@ impl Scheduler { } if let ExecutionStatus::Suspended(incarnation, dep_condvar) = &*status { - *status = ExecutionStatus::ReadyToExecute(*incarnation, Some(dep_condvar.clone())); + *status = ExecutionStatus::Ready( + *incarnation, + ExecutionTaskType::Wakeup(dep_condvar.clone()), + ); } else { unreachable!(); } @@ -819,7 +838,7 @@ impl Scheduler { // Only makes sense when the current status is 'Aborting'. debug_assert!(*status == ExecutionStatus::Aborting(incarnation)); - *status = ExecutionStatus::ReadyToExecute(incarnation + 1, None); + *status = ExecutionStatus::Ready(incarnation + 1, ExecutionTaskType::Execution); } /// Checks whether the done marker is set. The marker can only be set by 'try_commit'. diff --git a/aptos-move/block-executor/src/task.rs b/aptos-move/block-executor/src/task.rs index 7589bef53ded1..a0ee96d32bc85 100644 --- a/aptos-move/block-executor/src/task.rs +++ b/aptos-move/block-executor/src/task.rs @@ -7,6 +7,7 @@ use aptos_mvhashmap::types::TxnIndex; use aptos_state_view::TStateView; use aptos_types::{ executable::ModulePath, + fee_statement::FeeStatement, write_set::{TransactionWrite, WriteOp}, }; use std::{fmt::Debug, hash::Hash}; @@ -24,11 +25,11 @@ pub enum ExecutionStatus { SkipRest(T), } -/// Trait that defines a transaction that could be parallel executed by the scheduler. Each +/// Trait that defines a transaction type that can be executed by the block executor. A transaction /// transaction will write to a key value storage as their side effect. -pub trait Transaction: Sync + Send + 'static { +pub trait Transaction: Sync + Send + Clone + 'static { type Key: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug; - type Value: Send + Sync + TransactionWrite; + type Value: Send + Sync + Clone + TransactionWrite; } /// Inference result of a transaction. @@ -95,4 +96,7 @@ pub trait TransactionOutput: Send + Sync + Debug { /// Return the amount of gas consumed by the transaction. fn gas_used(&self) -> u64; + + /// Return the fee statement of the transaction. + fn fee_statement(&self) -> FeeStatement; } diff --git a/aptos-move/block-executor/src/txn_last_input_output.rs b/aptos-move/block-executor/src/txn_last_input_output.rs index 30fc77f57fb50..520b61d823346 100644 --- a/aptos-move/block-executor/src/txn_last_input_output.rs +++ b/aptos-move/block-executor/src/txn_last_input_output.rs @@ -6,9 +6,11 @@ use crate::{ task::{ExecutionStatus, Transaction, TransactionOutput}, }; use anyhow::anyhow; -use aptos_infallible::Mutex; use aptos_mvhashmap::types::{Incarnation, TxnIndex, Version}; -use aptos_types::{access_path::AccessPath, executable::ModulePath, write_set::WriteOp}; +use aptos_types::{ + access_path::AccessPath, executable::ModulePath, fee_statement::FeeStatement, + write_set::WriteOp, +}; use arc_swap::ArcSwapOption; use crossbeam::utils::CachePadded; use dashmap::DashSet; @@ -130,8 +132,6 @@ pub struct TxnLastInputOutput { module_reads: DashSet, module_read_write_intersection: AtomicBool, - - commit_locks: Vec>, // Shared locks to prevent race during commit } impl TxnLastInputOutput { @@ -146,7 +146,6 @@ impl TxnLastInputO module_writes: DashSet::new(), module_reads: DashSet::new(), module_read_write_intersection: AtomicBool::new(false), - commit_locks: (0..num_txns).map(|_| Mutex::new(())).collect(), } } @@ -222,19 +221,19 @@ impl TxnLastInputO self.inputs[txn_idx as usize].load_full() } - pub fn gas_used(&self, txn_idx: TxnIndex) -> Option { + /// Returns the total gas, execution gas, io gas and storage gas of the transaction. + pub fn fee_statement(&self, txn_idx: TxnIndex) -> Option { match &self.outputs[txn_idx as usize] .load_full() .expect("[BlockSTM]: Execution output must be recorded after execution") .output_status { - ExecutionStatus::Success(output) => Some(output.gas_used()), + ExecutionStatus::Success(output) => Some(output.fee_statement()), _ => None, } } pub fn update_to_skip_rest(&self, txn_idx: TxnIndex) { - let _lock = self.commit_locks[txn_idx as usize].lock(); if let ExecutionStatus::Success(output) = self.take_output(txn_idx) { self.outputs[txn_idx as usize].store(Some(Arc::new(TxnOutput { output_status: ExecutionStatus::SkipRest(output), @@ -268,7 +267,6 @@ impl TxnLastInputO usize, Box::Txn as Transaction>::Key>>, ) { - let _lock = self.commit_locks[txn_idx as usize].lock(); let ret: ( usize, Box::Txn as Transaction>::Key>>, @@ -298,7 +296,6 @@ impl TxnLastInputO txn_idx: TxnIndex, delta_writes: Vec<(<::Txn as Transaction>::Key, WriteOp)>, ) { - let _lock = self.commit_locks[txn_idx as usize].lock(); match &self.outputs[txn_idx as usize] .load_full() .expect("Output must exist") diff --git a/aptos-move/block-executor/src/unit_tests/mod.rs b/aptos-move/block-executor/src/unit_tests/mod.rs index c31b6acafc1b2..443ba3188d5d4 100644 --- a/aptos-move/block-executor/src/unit_tests/mod.rs +++ b/aptos-move/block-executor/src/unit_tests/mod.rs @@ -5,11 +5,14 @@ use crate::{ executor::BlockExecutor, proptest_types::types::{DeltaDataView, ExpectedOutput, KeyType, Task, Transaction, ValueType}, - scheduler::{DependencyResult, Scheduler, SchedulerTask}, + scheduler::{DependencyResult, ExecutionTaskType, Scheduler, SchedulerTask}, }; use aptos_aggregator::delta_change_set::{delta_add, delta_sub, DeltaOp, DeltaUpdate}; use aptos_mvhashmap::types::TxnIndex; -use aptos_types::{executable::ModulePath, write_set::TransactionWrite}; +use aptos_types::{ + executable::{ExecutableTestType, ModulePath}, + write_set::TransactionWrite, +}; use claims::{assert_matches, assert_some_eq}; use rand::{prelude::*, random}; use std::{ @@ -37,11 +40,12 @@ where .unwrap(), ); - let output = BlockExecutor::, Task, DeltaDataView>::new( - num_cpus::get(), - executor_thread_pool, - None, - ) + let output = BlockExecutor::< + Transaction, + Task, + DeltaDataView, + ExecutableTestType, + >::new(num_cpus::get(), executor_thread_pool, None) .execute_transactions_parallel((), &transactions, &data_view); let baseline = ExpectedOutput::generate_baseline(&transactions, None, None); @@ -265,7 +269,7 @@ fn scheduler_tasks() { // No validation tasks. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), None) if i == j + SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if i == j )); } @@ -297,16 +301,16 @@ fn scheduler_tasks() { assert!(matches!( s.finish_abort(4, 0), - SchedulerTask::ExecutionTask((4, 1), None) + SchedulerTask::ExecutionTask((4, 1), ExecutionTaskType::Execution) )); assert!(matches!( s.finish_abort(1, 0), - SchedulerTask::ExecutionTask((1, 1), None) + SchedulerTask::ExecutionTask((1, 1), ExecutionTaskType::Execution) )); // Validation index = 2, wave = 1. assert!(matches!( s.finish_abort(3, 0), - SchedulerTask::ExecutionTask((3, 1), None) + SchedulerTask::ExecutionTask((3, 1), ExecutionTaskType::Execution) )); assert!(matches!( @@ -356,7 +360,7 @@ fn scheduler_first_wave() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), None) if j == i + SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i )); } @@ -374,7 +378,7 @@ fn scheduler_first_wave() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((5, 0), None) + SchedulerTask::ExecutionTask((5, 0), ExecutionTaskType::Execution) )); // Since (1, 0) is not EXECUTED, no validation tasks, and execution index // is already at the limit, so no tasks immediately available. @@ -411,7 +415,7 @@ fn scheduler_dependency() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), None) if j == i + SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i )); } @@ -445,7 +449,7 @@ fn scheduler_dependency() { // resumed task doesn't bump incarnation assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((4, 0), Some(_)) + SchedulerTask::ExecutionTask((4, 0), ExecutionTaskType::Wakeup(_)) )); } @@ -458,7 +462,7 @@ fn incarnation_one_scheduler(num_txns: TxnIndex) -> Scheduler { // Get the first executions out of the way. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), None) if j == i + SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i )); assert!(matches!( s.finish_execution(i, 0, false), @@ -471,7 +475,7 @@ fn incarnation_one_scheduler(num_txns: TxnIndex) -> Scheduler { assert!(s.try_abort(i, 0)); assert!(matches!( s.finish_abort(i, 0), - SchedulerTask::ExecutionTask((j, 1), None) if i == j + SchedulerTask::ExecutionTask((j, 1), ExecutionTaskType::Execution) if i == j )); } s @@ -515,7 +519,7 @@ fn scheduler_incarnation() { assert!(matches!( s.finish_abort(2, 1), - SchedulerTask::ExecutionTask((2, 2), None) + SchedulerTask::ExecutionTask((2, 2), ExecutionTaskType::Execution) )); // wave = 2, validation index = 2. assert!(matches!( @@ -528,15 +532,15 @@ fn scheduler_incarnation() { assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((1, 1), Some(_)) + SchedulerTask::ExecutionTask((1, 1), ExecutionTaskType::Wakeup(_)) )); assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((3, 1), Some(_)) + SchedulerTask::ExecutionTask((3, 1), ExecutionTaskType::Wakeup(_)) )); assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((4, 2), None) + SchedulerTask::ExecutionTask((4, 2), ExecutionTaskType::Execution) )); // execution index = 5 @@ -572,7 +576,7 @@ fn scheduler_basic() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), None) if j == i + SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i )); } @@ -622,7 +626,7 @@ fn scheduler_drain_idx() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), None) if j == i + SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i )); } diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index fb5e7e998362d..1451c838d8ae6 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -11,18 +11,19 @@ use anyhow::Result; use aptos_aggregator::delta_change_set::{deserialize, serialize}; use aptos_logger::error; use aptos_mvhashmap::{ - types::{MVCodeError, MVCodeOutput, MVDataError, MVDataOutput, TxnIndex}, + types::{MVDataError, MVDataOutput, MVModulesError, MVModulesOutput, TxnIndex}, + unsync_map::UnsyncMap, MVHashMap, }; use aptos_state_view::{StateViewId, TStateView}; use aptos_types::{ - executable::{ExecutableTestType, ModulePath}, + executable::{Executable, ModulePath}, state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, vm_status::{StatusCode, VMStatus}, write_set::TransactionWrite, }; use aptos_vm_logging::{log_schema::AdapterLogSchema, prelude::*}; -use std::{cell::RefCell, collections::BTreeMap, fmt::Debug, hash::Hash, sync::Arc}; +use std::{cell::RefCell, fmt::Debug, hash::Hash, sync::Arc}; /// A struct that is always used by a single thread performing an execution task. The struct is /// passed to the VM and acts as a proxy to resolve reads first in the shared multi-version @@ -31,8 +32,8 @@ use std::{cell::RefCell, collections::BTreeMap, fmt::Debug, hash::Hash, sync::Ar /// TODO(issue 10177): MvHashMapView currently needs to be sync due to trait bounds, but should /// not be. In this case, the read_dependency member can have a RefCell type and the /// captured_reads member can have RefCell>> type. -pub(crate) struct MVHashMapView<'a, K, V: TransactionWrite> { - versioned_map: &'a MVHashMap, // TODO: proper generic type +pub(crate) struct MVHashMapView<'a, K, V: TransactionWrite, X: Executable> { + versioned_map: &'a MVHashMap, scheduler: &'a Scheduler, captured_reads: RefCell>>, } @@ -57,12 +58,10 @@ impl< 'a, K: ModulePath + PartialOrd + Ord + Send + Clone + Debug + Hash + Eq, V: TransactionWrite + Send + Sync, - > MVHashMapView<'a, K, V> + X: Executable, + > MVHashMapView<'a, K, V, X> { - pub(crate) fn new( - versioned_map: &'a MVHashMap, - scheduler: &'a Scheduler, - ) -> Self { + pub(crate) fn new(versioned_map: &'a MVHashMap, scheduler: &'a Scheduler) -> Self { Self { versioned_map, scheduler, @@ -76,18 +75,18 @@ impl< } // TODO: Actually fill in the logic to record fetched executables, etc. - fn fetch_code( + fn fetch_module( &self, key: &K, txn_idx: TxnIndex, - ) -> anyhow::Result, MVCodeError> { + ) -> anyhow::Result, MVModulesError> { // Add a fake read from storage to register in reads for now in order // for the read / write path intersection fallback for modules to still work. self.captured_reads .borrow_mut() .push(ReadDescriptor::from_storage(key.clone())); - self.versioned_map.fetch_code(key, txn_idx) + self.versioned_map.fetch_module(key, txn_idx) } fn set_aggregator_base_value(&self, key: &K, value: u128) { @@ -170,23 +169,23 @@ impl< } } -enum ViewMapKind<'a, T: Transaction> { - MultiVersion(&'a MVHashMapView<'a, T::Key, T::Value>), - BTree(&'a BTreeMap), +enum ViewMapKind<'a, T: Transaction, X: Executable> { + MultiVersion(&'a MVHashMapView<'a, T::Key, T::Value, X>), + Unsync(&'a UnsyncMap), } -pub(crate) struct LatestView<'a, T: Transaction, S: TStateView> { +pub(crate) struct LatestView<'a, T: Transaction, S: TStateView, X: Executable> { base_view: &'a S, - latest_view: ViewMapKind<'a, T>, + latest_view: ViewMapKind<'a, T, X>, txn_idx: TxnIndex, } -impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { +impl<'a, T: Transaction, S: TStateView, X: Executable> LatestView<'a, T, S, X> { pub(crate) fn new_mv_view( base_view: &'a S, - map: &'a MVHashMapView<'a, T::Key, T::Value>, + map: &'a MVHashMapView<'a, T::Key, T::Value, X>, txn_idx: TxnIndex, - ) -> LatestView<'a, T, S> { + ) -> LatestView<'a, T, S, X> { LatestView { base_view, latest_view: ViewMapKind::MultiVersion(map), @@ -196,12 +195,12 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { pub(crate) fn new_btree_view( base_view: &'a S, - map: &'a BTreeMap, + map: &'a UnsyncMap, txn_idx: TxnIndex, - ) -> LatestView<'a, T, S> { + ) -> LatestView<'a, T, S, X> { LatestView { base_view, - latest_view: ViewMapKind::BTree(map), + latest_view: ViewMapKind::Unsync(map), txn_idx, } } @@ -223,17 +222,19 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { } } -impl<'a, T: Transaction, S: TStateView> TStateView for LatestView<'a, T, S> { +impl<'a, T: Transaction, S: TStateView, X: Executable> TStateView + for LatestView<'a, T, S, X> +{ type Key = T::Key; fn get_state_value(&self, state_key: &T::Key) -> anyhow::Result> { match self.latest_view { ViewMapKind::MultiVersion(map) => match state_key.module_path() { Some(_) => { - use MVCodeError::*; - use MVCodeOutput::*; + use MVModulesError::*; + use MVModulesOutput::*; - match map.fetch_code(state_key, self.txn_idx) { + match map.fetch_module(state_key, self.txn_idx) { Ok(Executable(_)) => unreachable!("Versioned executable not implemented"), Ok(Module((v, _))) => Ok(v.as_state_value()), Err(Dependency(_)) => { @@ -280,7 +281,7 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< } }, }, - ViewMapKind::BTree(map) => map.get(state_key).map_or_else( + ViewMapKind::Unsync(map) => map.fetch_data(state_key).map_or_else( || self.get_base_value(state_key), |v| Ok(v.as_state_value()), ), diff --git a/aptos-move/e2e-move-tests/src/tests/constructor_args.data/pack/sources/args_test.move b/aptos-move/e2e-move-tests/src/tests/constructor_args.data/pack/sources/args_test.move index a5fbaed167e68..0dd613099c3c9 100644 --- a/aptos-move/e2e-move-tests/src/tests/constructor_args.data/pack/sources/args_test.move +++ b/aptos-move/e2e-move-tests/src/tests/constructor_args.data/pack/sources/args_test.move @@ -90,6 +90,25 @@ module 0xCAFE::test { }; } + // Valuable data that should not be able to be fabricated by a malicious tx + struct MyPrecious { + value: u64, + } + + public entry fun ensure_no_fabrication(my_precious: Option) { + if (std::option::is_none(&my_precious)) { + std::option::destroy_none(my_precious) + } else { + let MyPrecious { value : _ } = std::option::destroy_some(my_precious); + } + } + + public entry fun ensure_vector_vector_u8(o: Object, _: vector>) acquires ModuleData { + let addr = aptos_std::object::object_address(&o); + // guaranteed to exist + borrow_global_mut(addr).state = std::string::utf8(b"vector>"); + } + fun convert(x: u128): String { let s = std::vector::empty(); let ascii0 = 48; diff --git a/aptos-move/e2e-move-tests/src/tests/constructor_args.rs b/aptos-move/e2e-move-tests/src/tests/constructor_args.rs index 89e5275f063d7..f4fa91804d09d 100644 --- a/aptos-move/e2e-move-tests/src/tests/constructor_args.rs +++ b/aptos-move/e2e-move-tests/src/tests/constructor_args.rs @@ -156,6 +156,14 @@ fn constructor_args_good() { ], "pff vectors of optionals", ), + ( + "0xcafe::test::ensure_vector_vector_u8", + vec![ + bcs::to_bytes(&OBJECT_ADDRESS).unwrap(), // Object + bcs::to_bytes(&vec![vec![1u8], vec![2u8]]).unwrap(), // vector> + ], + "vector>", + ), ]; let mut h = MoveHarness::new_with_features(vec![FeatureFlag::STRUCT_CONSTRUCTORS], vec![]); @@ -228,6 +236,20 @@ fn constructor_args_bad() { ) }), ), + ( + "0xcafe::test::ensure_no_fabrication", + vec![ + bcs::to_bytes(&vec![1u64]).unwrap(), // Option + ], + Box::new(|e| { + matches!( + e, + TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some( + StatusCode::INVALID_MAIN_FUNCTION_SIGNATURE + ))) + ) + }), + ), ]; fail(tests); diff --git a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs index be4fb8cdfa1a3..e8142b054482a 100644 --- a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs +++ b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs @@ -23,26 +23,49 @@ fn test_basic_fungible_token() { let mut build_options = aptos_framework::BuildOptions::default(); build_options .named_addresses - .insert("fungible_token".to_string(), *alice.address()); + .insert("example_addr".to_string(), *alice.address()); let result = h.publish_package_with_options( &alice, - &common::test_dir_path("../../../move-examples/fungible_token"), + &common::test_dir_path("../../../move-examples/fungible_asset/managed_fungible_asset"), + build_options.clone(), + ); + + assert_success!(result); + let result = h.publish_package_with_options( + &alice, + &common::test_dir_path("../../../move-examples/fungible_asset/managed_fungible_token"), build_options, ); assert_success!(result); + let metadata = h + .execute_view_function( + str::parse(&format!( + "0x{}::managed_fungible_token::get_metadata", + *alice.address() + )) + .unwrap(), + vec![], + vec![], + ) + .unwrap() + .pop() + .unwrap(); + let metadata = bcs::from_bytes::(metadata.as_slice()).unwrap(); + let result = h.run_entry_function( &alice, str::parse(&format!( - "0x{}::managed_fungible_token::mint", + "0x{}::managed_fungible_asset::mint_to_primary_stores", *alice.address() )) .unwrap(), vec![], vec![ - bcs::to_bytes::(&100).unwrap(), // amount - bcs::to_bytes(alice.address()).unwrap(), + bcs::to_bytes(&metadata).unwrap(), + bcs::to_bytes(&vec![alice.address()]).unwrap(), + bcs::to_bytes(&vec![100u64]).unwrap(), // amount ], ); assert_success!(result); @@ -50,15 +73,16 @@ fn test_basic_fungible_token() { let result = h.run_entry_function( &alice, str::parse(&format!( - "0x{}::managed_fungible_token::transfer", + "0x{}::managed_fungible_asset::transfer_between_primary_stores", *alice.address() )) .unwrap(), vec![], vec![ - bcs::to_bytes(alice.address()).unwrap(), - bcs::to_bytes(bob.address()).unwrap(), - bcs::to_bytes::(&30).unwrap(), // amount + bcs::to_bytes(&metadata).unwrap(), + bcs::to_bytes(&vec![alice.address()]).unwrap(), + bcs::to_bytes(&vec![bob.address()]).unwrap(), + bcs::to_bytes(&vec![30u64]).unwrap(), // amount ], ); @@ -66,14 +90,15 @@ fn test_basic_fungible_token() { let result = h.run_entry_function( &alice, str::parse(&format!( - "0x{}::managed_fungible_token::burn", + "0x{}::managed_fungible_asset::burn_from_primary_stores", *alice.address() )) .unwrap(), vec![], vec![ - bcs::to_bytes(bob.address()).unwrap(), - bcs::to_bytes::(&20).unwrap(), // amount + bcs::to_bytes(&metadata).unwrap(), + bcs::to_bytes(&vec![bob.address()]).unwrap(), + bcs::to_bytes(&vec![20u64]).unwrap(), // amount ], ); assert_success!(result); diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__create_account__create_account.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__create_account__create_account.exp index 562502050d9bf..17341c0156fdb 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__create_account__create_account.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__create_account__create_account.exp @@ -16,13 +16,13 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01000000000000000000000000000000000000000000000000000000000000000104636f696e09436f696e53746f7265010700000000000000000000000000000000000000000000000000000000000000010a6170746f735f636f696e094170746f73436f696e00 }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(00000000000000000000000000000000000200000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000300000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, metadata:V0 { payer: 000000000000000000000000000000000000000000000000000000000a550c18, deposit: 0, creation_time_usecs: 0 }), + }: Creation(00000000000000000000000000000000000200000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000300000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(20f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10000000000000000040000000000000001000000000000000000000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000100000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10000, metadata:V0 { payer: 000000000000000000000000000000000000000000000000000000000a550c18, deposit: 0, creation_time_usecs: 0 }), + }: Creation(20f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10000000000000000040000000000000001000000000000000000000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000100000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10000), }, }, ), @@ -30,7 +30,7 @@ Ok( events: [ ContractEvent { key: EventKey { creation_number: 0, account_address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1 }, index: 0, type: Struct(StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("account"), name: Identifier("CoinRegisterEvent"), type_params: [] }), event_data: "00000000000000000000000000000000000000000000000000000000000000010a6170746f735f636f696e094170746f73436f696e" }, ], - gas_used: 16, + gas_used: 6, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__borrow_after_move.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__borrow_after_move.exp index 2c06ccd10ae88..57f85b57237ed 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__borrow_after_move.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__borrow_after_move.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000090100040204040308190521140735420877400ab701050cbc014f0d8b020200000101000208000003000100000402010000050001000006000100010800040001060c0002060c03010608000105010708000103014d067369676e657202543109626f72726f775f7431096368616e67655f74310972656d6f76655f74310a7075626c6973685f743101760a616464726573735f6f66f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000000000000000000000000000000000010002010703000100010003050b0011042b000c0102010100010005090b0011042a000c020b010b020f001502020100010006060b0011042c0013000c01020301000001050b0006030000000000000012002d0002000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000090100040204040308190521140735420877400ab701050cbc014f0d8b020200000101000208000003000100000402010000050001000006000100010800040001060c0002060c03010608000105010708000103014d067369676e657202543109626f72726f775f7431096368616e67655f74310972656d6f76655f74310a7075626c6973685f743101760a616464726573735f6f66f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000000000000000000000000000000000010002010703000100010003050b0011042b000c0102010100010005090b0011042a000c020b010b020f001502020100010006060b0011042c0013000c01020301000001050b0006030000000000000012002d0002000000), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), @@ -76,13 +76,13 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(0300000000000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(0300000000000000), }, }, ), ), events: [], - gas_used: 7, + gas_used: 3, status: Keep( Success, ), @@ -132,7 +132,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: DeletionWithMetadata(metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Deletion, }, }, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__change_after_move.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__change_after_move.exp index 0631757ebbff4..07efb5e5c47cb 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__change_after_move.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__change_after_move.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000090100040204040308190521140735420877400ab701050cbc014f0d8b020200000101000208000003000100000402010000050001000006000100010800040001060c0002060c03010608000105010708000103014d067369676e657202543109626f72726f775f7431096368616e67655f74310972656d6f76655f74310a7075626c6973685f743101760a616464726573735f6f66f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000000000000000000000000000000000010002010703000100010003050b0011042b000c0102010100010005090b0011042a000c020b010b020f001502020100010006060b0011042c0013000c01020301000001050b0006030000000000000012002d0002000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000090100040204040308190521140735420877400ab701050cbc014f0d8b020200000101000208000003000100000402010000050001000006000100010800040001060c0002060c03010608000105010708000103014d067369676e657202543109626f72726f775f7431096368616e67655f74310972656d6f76655f74310a7075626c6973685f743101760a616464726573735f6f66f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000000000000000000000000000000000010002010703000100010003050b0011042b000c0102010100010005090b0011042a000c020b010b020f001502020100010006060b0011042c0013000c01020301000001050b0006030000000000000012002d0002000000), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), @@ -76,13 +76,13 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(0300000000000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(0300000000000000), }, }, ), ), events: [], - gas_used: 7, + gas_used: 3, status: Keep( Success, ), @@ -132,7 +132,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: DeletionWithMetadata(metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Deletion, }, }, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__move_from_across_blocks.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__move_from_across_blocks.exp index 93307a8dc18fb..1bec780feaaed 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__move_from_across_blocks.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__data_store__move_from_across_blocks.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000090100040204040308190521140735420877400ab701050cbc014f0d8b020200000101000208000003000100000402010000050001000006000100010800040001060c0002060c03010608000105010708000103014d067369676e657202543109626f72726f775f7431096368616e67655f74310972656d6f76655f74310a7075626c6973685f743101760a616464726573735f6f66f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000000000000000000000000000000000010002010703000100010003050b0011042b000c0102010100010005090b0011042a000c020b010b020f001502020100010006060b0011042c0013000c01020301000001050b0006030000000000000012002d0002000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000090100040204040308190521140735420877400ab701050cbc014f0d8b020200000101000208000003000100000402010000050001000006000100010800040001060c0002060c03010608000105010708000103014d067369676e657202543109626f72726f775f7431096368616e67655f74310972656d6f76655f74310a7075626c6973685f743101760a616464726573735f6f66f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000000000000000000000000000000000010002010703000100010003050b0011042b000c0102010100010005090b0011042a000c020b010b020f001502020100010006060b0011042c0013000c01020301000001050b0006030000000000000012002d0002000000), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), @@ -76,13 +76,13 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(0300000000000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(0300000000000000), }, }, ), ), events: [], - gas_used: 7, + gas_used: 3, status: Keep( Success, ), @@ -132,7 +132,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: DeletionWithMetadata(metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Deletion, }, }, ), @@ -221,13 +221,13 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(0300000000000000, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(0300000000000000), }, }, ), ), events: [], - gas_used: 7, + gas_used: 3, status: Keep( Success, ), @@ -252,7 +252,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d02543100 }, ), hash: OnceCell(Uninit), - }: DeletionWithMetadata(metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Deletion, }, }, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__duplicate_module.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__duplicate_module.exp index 1714537199d03..805da66d61fab 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__duplicate_module.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__duplicate_module.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000008010002020204030605050b01070c060812200a32050c3707000000010000000200000000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100020102030001000000010200, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000008010002020204030605050b01070c060812200a32050c3707000000010000000200000000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100020102030001000000010200), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_compatible_module.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_compatible_module.exp index ee51e8228631b..e706de32425ce 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_compatible_module.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_compatible_module.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000030100020702020804200000014df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000030100020702020804200000014df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_changed_field.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_changed_field.exp index 3d6a6852dc42e..8548cf8bda8f5 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_changed_field.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_changed_field.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_new_field.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_new_field.exp index 3d6a6852dc42e..8548cf8bda8f5 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_new_field.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_new_field.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_field.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_field.exp index 3d6a6852dc42e..8548cf8bda8f5 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_field.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_field.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_struct.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_struct.exp index 3d6a6852dc42e..8548cf8bda8f5 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_struct.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__layout_incompatible_module_with_removed_struct.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000005010002020204070606080c200a2c05000000010000014d01540166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1000201020300), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_compatible_module.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_compatible_module.exp index ee51e8228631b..e706de32425ce 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_compatible_module.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_compatible_module.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000030100020702020804200000014df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000030100020702020804200000014df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_added_param.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_added_param.exp index afa912978f8de..161b4225a8fb9 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_added_param.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_added_param.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000006010002030205050701070804080c200c2c070000000100000000014d0166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000000010200, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000006010002030205050701070804080c200c2c070000000100000000014d0166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000000010200), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_changed_param.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_changed_param.exp index 62125bf1c6b61..2ade87987f340 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_changed_param.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_changed_param.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000006010002030205050703070a04080e200c2e0700000001000100010300014d0166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000001010200, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000006010002030205050703070a04080e200c2e0700000001000100010300014d0166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000001010200), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_removed_pub_fn.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_removed_pub_fn.exp index afa912978f8de..161b4225a8fb9 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_removed_pub_fn.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__linking_incompatible_module_with_removed_pub_fn.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b0600000006010002030205050701070804080c200c2c070000000100000000014d0166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000000010200, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b0600000006010002030205050701070804080c200c2c070000000100000000014d0166f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000000010200), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_allow_modules.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_allow_modules.exp index 450e0a735b5a2..bcf854c1f7b71 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_allow_modules.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_allow_modules.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000030100020702020804200000014df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000030100020702020804200000014df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_modules_proper_sender.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_modules_proper_sender.exp index 514bd8a87900e..db6061ec50a87 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_modules_proper_sender.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__module_publishing__test_publishing_modules_proper_sender.exp @@ -10,7 +10,7 @@ Ok( AccessPath { address: 000000000000000000000000000000000000000000000000000000000a550c18, path: 00000000000000000000000000000000000000000000000000000000000a550c18014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b06000000030100020702020804200000014d000000000000000000000000000000000000000000000000000000000a550c1800, metadata:V0 { payer: 000000000000000000000000000000000000000000000000000000000a550c18, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b06000000030100020702020804200000014d000000000000000000000000000000000000000000000000000000000a550c1800), StateKey { inner: AccessPath( AccessPath { address: 000000000000000000000000000000000000000000000000000000000a550c18, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -22,7 +22,7 @@ Ok( ), ), events: [], - gas_used: 7, + gas_used: 2, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__verify_txn__test_open_publishing.exp b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__verify_txn__test_open_publishing.exp index 8a6c83e9dbd9f..1598d1d9730bd 100644 --- a/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__verify_txn__test_open_publishing.exp +++ b/aptos-move/e2e-tests/goldens/language_e2e_testsuite__tests__verify_txn__test_open_publishing.exp @@ -10,13 +10,13 @@ Ok( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 00f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1014d }, ), hash: OnceCell(Uninit), - }: CreationWithMetadata(a11ceb0b060000000601000203020a050c0607120a081c200c3c23000000010001000002000100020303010300014d036d61780373756df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000002080a000a012403060a01020a00020101000001060a000a01160c020a020200, metadata:V0 { payer: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, deposit: 0, creation_time_usecs: 0 }), + }: Creation(a11ceb0b060000000601000203020a050c0607120a081c200c3c23000000010001000002000100020303010300014d036d61780373756df5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe10001000002080a000a012403060a01020a00020101000001060a000a01160c020a020200), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 01000000000000000000000000000000000000000000000000000000000000000104636f696e09436f696e53746f7265010700000000000000000000000000000000000000000000000000000000000000010a6170746f735f636f696e094170746f73436f696e00 }, ), hash: OnceCell(Uninit), - }: Modification(f4039a3b000000000000000000000000000000000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1), + }: Modification(e8059a3b000000000000000000000000000000000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe100000000000000000000000000000000f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1), StateKey { inner: AccessPath( AccessPath { address: f5b9d6f01a99e74c790e2f330c092fa05455a8193f1dfc1b113ecc54d067afe1, path: 010000000000000000000000000000000000000000000000000000000000000001076163636f756e74074163636f756e7400 }, @@ -64,13 +64,13 @@ Ok( ], }, hash: OnceCell(Uninit), - }: Modification(f31af505000000000100000000000000), + }: Modification(e71cf505000000000100000000000000), }, }, ), ), events: [], - gas_used: 507, + gas_used: 502, status: Keep( Success, ), diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index b0571e3daaabd..d68f2cd6a9b4f 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -6,7 +6,7 @@ use crate::account::AccountData; use anyhow::Result; -use aptos_state_view::TStateView; +use aptos_state_view::{in_memory_state_view::InMemoryStateView, TStateView}; use aptos_types::{ access_path::AccessPath, account_config::CoinInfoResource, @@ -133,6 +133,10 @@ impl TStateView for FakeDataStore { } Ok(usage) } + + fn as_in_memory_state_view(&self) -> InMemoryStateView { + InMemoryStateView::new(self.state_data.clone()) + } } // This is used by aggregator tests. diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index c1edbf84c862c..fac4a714fcc17 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -28,6 +28,7 @@ use aptos_types::{ new_block_event_key, AccountResource, CoinInfoResource, CoinStoreResource, NewBlockEvent, CORE_CODE_ADDRESS, }, + block_executor::partitioner::BlockExecutorTransactions, block_metadata::BlockMetadata, chain_id::ChainId, on_chain_config::{ @@ -162,6 +163,10 @@ impl FakeExecutor { ) } + pub fn data_store(&self) -> &FakeDataStore { + &self.data_store + } + /// Creates an executor in which no genesis state has been applied yet. pub fn no_genesis() -> Self { let executor_thread_pool = Arc::new( @@ -412,7 +417,7 @@ impl FakeExecutor { ) -> Result, VMStatus> { BlockAptosVM::execute_block( self.executor_thread_pool.clone(), - txn_block, + BlockExecutorTransactions::Unsharded(txn_block), &self.data_store, usize::min(4, num_cpus::get()), None, @@ -436,7 +441,7 @@ impl FakeExecutor { } } - let output = AptosVM::execute_block(txn_block.clone(), &self.data_store); + let output = AptosVM::execute_block(txn_block.clone(), &self.data_store, None); if !self.no_parallel_exec { let parallel_output = self.execute_transaction_block_parallel(txn_block); assert_eq!(output, parallel_output); diff --git a/aptos-move/framework/aptos-framework/doc/account.md b/aptos-move/framework/aptos-framework/doc/account.md index 7c64b4cf70f47..e196457309bd9 100644 --- a/aptos-move/framework/aptos-framework/doc/account.md +++ b/aptos-move/framework/aptos-framework/doc/account.md @@ -936,7 +936,8 @@ is returned. This way, the caller of this function can publish additional resour -
public fun exists_at(addr: address): bool
+
#[view]
+public fun exists_at(addr: address): bool
 
@@ -960,7 +961,8 @@ is returned. This way, the caller of this function can publish additional resour -
public fun get_guid_next_creation_num(addr: address): u64
+
#[view]
+public fun get_guid_next_creation_num(addr: address): u64
 
@@ -984,7 +986,8 @@ is returned. This way, the caller of this function can publish additional resour -
public fun get_sequence_number(addr: address): u64
+
#[view]
+public fun get_sequence_number(addr: address): u64
 
@@ -1039,7 +1042,8 @@ is returned. This way, the caller of this function can publish additional resour -
public fun get_authentication_key(addr: address): vector<u8>
+
#[view]
+public fun get_authentication_key(addr: address): vector<u8>
 
@@ -1425,7 +1429,8 @@ to the account owner's signer capability). Returns true if the account at account_addr has a signer capability offer. -
public fun is_signer_capability_offered(account_addr: address): bool
+
#[view]
+public fun is_signer_capability_offered(account_addr: address): bool
 
@@ -1451,7 +1456,8 @@ Returns true if the account at account_addr has a signer capability Returns the address of the account that has a signer capability offer from the account at account_addr. -
public fun get_signer_capability_offer_for(account_addr: address): address
+
#[view]
+public fun get_signer_capability_offer_for(account_addr: address): address
 
@@ -2076,7 +2082,8 @@ The Account does not exist under the new address before creating the account. ### Function `get_guid_next_creation_num` -
public fun get_guid_next_creation_num(addr: address): u64
+
#[view]
+public fun get_guid_next_creation_num(addr: address): u64
 
@@ -2093,7 +2100,8 @@ The Account does not exist under the new address before creating the account. ### Function `get_sequence_number` -
public fun get_sequence_number(addr: address): u64
+
#[view]
+public fun get_sequence_number(addr: address): u64
 
@@ -2133,7 +2141,8 @@ The sequence_number of the Account is up to MAX_U64. ### Function `get_authentication_key` -
public fun get_authentication_key(addr: address): vector<u8>
+
#[view]
+public fun get_authentication_key(addr: address): vector<u8>
 
@@ -2224,9 +2233,16 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME signature: cap_update_table, challenge: challenge, }; -pragma aborts_if_is_partial; -modifies global<Account>(addr); -modifies global<OriginatingAddress>(@aptos_framework); +let originating_addr = addr; +let new_auth_key_vector = spec_assert_valid_rotation_proof_signature_and_get_auth_key(to_scheme, to_public_key_bytes, cap_update_table, challenge); +let address_map = global<OriginatingAddress>(@aptos_framework).address_map; +let new_auth_key = from_bcs::deserialize<address>(new_auth_key_vector); +aborts_if !exists<OriginatingAddress>(@aptos_framework); +aborts_if !from_bcs::deserializable<address>(account_resource.authentication_key); +aborts_if table::spec_contains(address_map, curr_auth_key) && + table::spec_get(address_map, curr_auth_key) != originating_addr; +aborts_if !from_bcs::deserializable<address>(new_auth_key_vector); +aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key);
@@ -2261,7 +2277,15 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME signature: cap_update_table, challenge: challenge, }; -pragma aborts_if_is_partial; +let new_auth_key_vector = spec_assert_valid_rotation_proof_signature_and_get_auth_key(new_scheme, new_public_key_bytes, cap_update_table, challenge); +let address_map = global<OriginatingAddress>(@aptos_framework).address_map; +aborts_if !exists<OriginatingAddress>(@aptos_framework); +aborts_if !from_bcs::deserializable<address>(offerer_account_resource.authentication_key); +aborts_if table::spec_contains(address_map, curr_auth_key) && + table::spec_get(address_map, curr_auth_key) != rotation_cap_offerer_address; +aborts_if !from_bcs::deserializable<address>(new_auth_key_vector); +let new_auth_key = from_bcs::deserialize<address>(new_auth_key_vector); +aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key);
@@ -2412,7 +2436,8 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME. ### Function `is_signer_capability_offered` -
public fun is_signer_capability_offered(account_addr: address): bool
+
#[view]
+public fun is_signer_capability_offered(account_addr: address): bool
 
@@ -2428,7 +2453,8 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME. ### Function `get_signer_capability_offer_for` -
public fun get_signer_capability_offer_for(account_addr: address): address
+
#[view]
+public fun get_signer_capability_offer_for(account_addr: address): address
 
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_account.md b/aptos-move/framework/aptos-framework/doc/aptos_account.md index 25e16aad93962..f77fdac11e573 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_account.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_account.md @@ -210,13 +210,10 @@ Batch version of APT transfer. error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH), ); - let i = 0; - while (i < recipients_len) { - let to = *vector::borrow(&recipients, i); + vector::enumerate_ref(&recipients, |i, to| { let amount = *vector::borrow(&amounts, i); - transfer(source, to, amount); - i = i + 1; - }; + transfer(source, *to, amount); + }); }
@@ -282,13 +279,10 @@ Batch version of transfer_coins. error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH), ); - let i = 0; - while (i < recipients_len) { - let to = *vector::borrow(&recipients, i); + vector::enumerate_ref(&recipients, |i, to| { let amount = *vector::borrow(&amounts, i); - transfer_coins<CoinType>(from, to, amount); - i = i + 1; - }; + transfer_coins<CoinType>(from, *to, amount); + }); }
@@ -342,6 +336,11 @@ This would create the recipient account first and register it to receive the Coi
public fun deposit_coins<CoinType>(to: address, coins: Coin<CoinType>) acquires DirectTransferConfig {
     if (!account::exists_at(to)) {
         create_account(to);
+        spec {
+            assert coin::is_account_registered<AptosCoin>(to);
+            assume aptos_std::type_info::type_of<CoinType>() == aptos_std::type_info::type_of<AptosCoin>() ==>
+                coin::is_account_registered<CoinType>(to);
+        };
     };
     if (!coin::is_account_registered<CoinType>(to)) {
         assert!(
@@ -463,7 +462,8 @@ receive.
 By default, this returns true if an account has not explicitly set whether the can receive direct transfers.
 
 
-
public fun can_receive_direct_coin_transfers(account: address): bool
+
#[view]
+public fun can_receive_direct_coin_transfers(account: address): bool
 
@@ -488,8 +488,7 @@ By default, this returns true if an account has not explicitly set whether the c -
pragma verify = true;
-pragma aborts_if_is_strict;
+
pragma aborts_if_is_strict;
 
@@ -554,7 +553,34 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to -
pragma verify=false;
+
pragma verify = false;
+let account_addr_source = signer::address_of(source);
+let coin_store_source = global<coin::CoinStore<AptosCoin>>(account_addr_source);
+let balance_source = coin_store_source.coin.value;
+requires forall i in 0..len(recipients):
+    recipients[i] != account_addr_source;
+requires exists i in 0..len(recipients):
+    amounts[i] > 0;
+aborts_if len(recipients) != len(amounts);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && length_judgment(recipients[i]);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token);
+ensures forall i in 0..len(recipients):
+        (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) &&
+            (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token));
+aborts_if exists i in 0..len(recipients):
+    !exists<coin::CoinStore<AptosCoin>>(account_addr_source);
+aborts_if exists i in 0..len(recipients):
+    coin_store_source.frozen;
+aborts_if exists i in 0..len(recipients):
+    global<coin::CoinStore<AptosCoin>>(account_addr_source).coin.value < amounts[i];
+aborts_if exists i in 0..len(recipients):
+    exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<coin::CoinStore<AptosCoin>>(recipients[i]).frozen;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
 
@@ -570,7 +596,13 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to -
pragma verify = false;
+
let account_addr_source = signer::address_of(source);
+let coin_store_to = global<coin::CoinStore<AptosCoin>>(to);
+requires account_addr_source != to;
+include CreateAccountTransferAbortsIf;
+include GuidAbortsIf<AptosCoin>;
+include WithdrawAbortsIf<AptosCoin>{from: source};
+aborts_if exists<coin::CoinStore<AptosCoin>>(to) && global<coin::CoinStore<AptosCoin>>(to).frozen;
 
@@ -586,7 +618,38 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to -
pragma verify=false;
+
pragma verify = false;
+let account_addr_source = signer::address_of(from);
+let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
+let balance_source = coin_store_source.coin.value;
+requires forall i in 0..len(recipients):
+    recipients[i] != account_addr_source;
+requires exists i in 0..len(recipients):
+    amounts[i] > 0;
+aborts_if len(recipients) != len(amounts);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && length_judgment(recipients[i]);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token);
+ensures forall i in 0..len(recipients):
+        (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) &&
+            (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token));
+aborts_if exists i in 0..len(recipients):
+    !exists<coin::CoinStore<CoinType>>(account_addr_source);
+aborts_if exists i in 0..len(recipients):
+    coin_store_source.frozen;
+aborts_if exists i in 0..len(recipients):
+    global<coin::CoinStore<CoinType>>(account_addr_source).coin.value < amounts[i];
+aborts_if exists i in 0..len(recipients):
+    exists<coin::CoinStore<CoinType>>(recipients[i]) && global<coin::CoinStore<CoinType>>(recipients[i]).frozen;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
+aborts_if exists i in 0..len(recipients):
+    !coin::is_account_registered<CoinType>(recipients[i]) && !type_info::spec_is_struct<CoinType>();
+aborts_if exists i in 0..len(recipients):
+    !coin::is_account_registered<CoinType>(recipients[i]) && !can_receive_direct_coin_transfers(recipients[i]);
 
@@ -602,7 +665,75 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to -
pragma verify=false;
+
let account_addr_source = signer::address_of(from);
+let coin_store_to = global<coin::CoinStore<CoinType>>(to);
+requires account_addr_source != to;
+include CreateAccountTransferAbortsIf;
+include WithdrawAbortsIf<CoinType>;
+include GuidAbortsIf<CoinType>;
+include RegistCoinAbortsIf<CoinType>;
+aborts_if exists<coin::CoinStore<CoinType>>(to) && global<coin::CoinStore<CoinType>>(to).frozen;
+
+ + + + + + + +
schema CreateAccountTransferAbortsIf {
+    to: address;
+    aborts_if !account::exists_at(to) && length_judgment(to);
+    aborts_if !account::exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token);
+}
+
+ + + + + + + +
schema WithdrawAbortsIf<CoinType> {
+    from: &signer;
+    amount: u64;
+    let account_addr_source = signer::address_of(from);
+    let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
+    let balance_source = coin_store_source.coin.value;
+    aborts_if !exists<coin::CoinStore<CoinType>>(account_addr_source);
+    aborts_if coin_store_source.frozen;
+    aborts_if balance_source < amount;
+}
+
+ + + + + + + +
schema GuidAbortsIf<CoinType> {
+    to: address;
+    let acc = global<account::Account>(to);
+    aborts_if account::exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if account::exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 > MAX_U64;
+}
+
+ + + + + + + +
schema RegistCoinAbortsIf<CoinType> {
+    to: address;
+    aborts_if !coin::is_account_registered<CoinType>(to) && !type_info::spec_is_struct<CoinType>();
+    aborts_if exists<aptos_framework::account::Account>(to)
+        && !coin::is_account_registered<CoinType>(to) && !can_receive_direct_coin_transfers(to);
+    aborts_if type_info::type_of<CoinType>() != type_info::type_of<AptosCoin>()
+        && !coin::is_account_registered<CoinType>(to) && !can_receive_direct_coin_transfers(to);
+}
 
@@ -618,7 +749,10 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to -
pragma verify=false;
+
include CreateAccountTransferAbortsIf;
+include GuidAbortsIf<CoinType>;
+include RegistCoinAbortsIf<CoinType>;
+aborts_if exists<coin::CoinStore<CoinType>>(to) && global<coin::CoinStore<CoinType>>(to).frozen;
 
@@ -680,7 +814,8 @@ Check if the AptosCoin under the address existed. ### Function `can_receive_direct_coin_transfers` -
public fun can_receive_direct_coin_transfers(account: address): bool
+
#[view]
+public fun can_receive_direct_coin_transfers(account: address): bool
 
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_coin.md b/aptos-move/framework/aptos-framework/doc/aptos_coin.md index 3fc8267a11470..f6834ab4e1830 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_coin.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_coin.md @@ -378,12 +378,10 @@ Create delegated token for the address so the account could claim MintCapability
public entry fun delegate_mint_capability(account: signer, to: address) acquires Delegations {
     system_addresses::assert_core_resource(&account);
     let delegations = &mut borrow_global_mut<Delegations>(@core_resources).inner;
-    let i = 0;
-    while (i < vector::length(delegations)) {
-        let element = vector::borrow(delegations, i);
+    vector::for_each_ref(delegations, |element| {
+        let element: &DelegatedMintCapability = element;
         assert!(element.to != to, error::invalid_argument(EALREADY_DELEGATED));
-        i = i + 1;
-    };
+    });
     vector::push_back(delegations, DelegatedMintCapability { to });
 }
 
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_governance.md b/aptos-move/framework/aptos-framework/doc/aptos_governance.md index d78f3b61a221c..c19ff2462a55e 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_governance.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_governance.md @@ -707,7 +707,8 @@ AptosGovernance. -
public fun get_voting_duration_secs(): u64
+
#[view]
+public fun get_voting_duration_secs(): u64
 
@@ -731,7 +732,8 @@ AptosGovernance. -
public fun get_min_voting_threshold(): u128
+
#[view]
+public fun get_min_voting_threshold(): u128
 
@@ -755,7 +757,8 @@ AptosGovernance. -
public fun get_required_proposer_stake(): u64
+
#[view]
+public fun get_required_proposer_stake(): u64
 
@@ -1289,7 +1292,8 @@ Return a signer for making changes to 0x1 as part of on-chain governance proposa -
public fun initialize_for_verification(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
+
#[verify_only]
+public fun initialize_for_verification(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
 
@@ -1432,7 +1436,8 @@ Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. ### Function `get_voting_duration_secs` -
public fun get_voting_duration_secs(): u64
+
#[view]
+public fun get_voting_duration_secs(): u64
 
@@ -1448,7 +1453,8 @@ Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. ### Function `get_min_voting_threshold` -
public fun get_min_voting_threshold(): u128
+
#[view]
+public fun get_min_voting_threshold(): u128
 
@@ -1464,7 +1470,8 @@ Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. ### Function `get_required_proposer_stake` -
public fun get_required_proposer_stake(): u64
+
#[view]
+public fun get_required_proposer_stake(): u64
 
@@ -1487,8 +1494,7 @@ Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. The same as spec of create_proposal_v2(). -
pragma aborts_if_is_partial;
-requires chain_status::is_operating();
+
requires chain_status::is_operating();
 include CreateProposalAbortsIf;
 
@@ -1505,43 +1511,11 @@ The same as spec of chain_status::is_operating(); +
requires chain_status::is_operating();
 include CreateProposalAbortsIf;
 
-stake_pool must exist StakePool. -The delegated voter under the resource StakePool of the stake_pool must be the proposer address. -Address @aptos_framework must exist GovernanceEvents. - - - - - -
schema CreateProposalAbortsIf {
-    proposer: &signer;
-    stake_pool: address;
-    execution_hash: vector<u8>;
-    metadata_location: vector<u8>;
-    metadata_hash: vector<u8>;
-    let proposer_address = signer::address_of(proposer);
-    let governance_config = global<GovernanceConfig>(@aptos_framework);
-    let stake_pool_res = global<stake::StakePool>(stake_pool);
-    aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
-    aborts_if !exists<stake::StakePool>(stake_pool);
-    aborts_if global<stake::StakePool>(stake_pool).delegated_voter != proposer_address;
-    include AbortsIfNotGovernanceConfig;
-    let current_time = timestamp::now_seconds();
-    let proposal_expiration = current_time + governance_config.voting_duration_secs;
-    aborts_if stake_pool_res.locked_until_secs < proposal_expiration;
-    aborts_if !exists<GovernanceEvents>(@aptos_framework);
-    let allow_validator_set_change = global<staking_config::StakingConfig>(@aptos_framework).allow_validator_set_change;
-    aborts_if !allow_validator_set_change && !exists<stake::ValidatorSet>(@aptos_framework);
-}
-
- - @@ -1557,21 +1531,105 @@ The delegated voter under the resource StakePool of the stake_pool must be the v Address @aptos_framework must exist VotingRecords and GovernanceProposal. -
pragma aborts_if_is_partial;
-requires chain_status::is_operating();
-let voter_address = signer::address_of(voter);
-let stake_pool_res = global<stake::StakePool>(stake_pool);
-aborts_if !exists<stake::StakePool>(stake_pool);
-aborts_if stake_pool_res.delegated_voter != voter_address;
+
requires chain_status::is_operating();
+include VotingGetDelegatedVoterAbortsIf { sign: voter };
 aborts_if !exists<VotingRecords>(@aptos_framework);
-aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let voting_records = global<VotingRecords>(@aptos_framework);
+let record_key = RecordKey {
+    stake_pool,
+    proposal_id,
+};
+let post post_voting_records = global<VotingRecords>(@aptos_framework);
+aborts_if table::spec_contains(voting_records.votes, record_key);
+ensures table::spec_get(post_voting_records.votes, record_key) == true;
+include GetVotingPowerAbortsIf { pool_address: stake_pool };
 let allow_validator_set_change = global<staking_config::StakingConfig>(@aptos_framework).allow_validator_set_change;
-aborts_if !allow_validator_set_change && !exists<stake::ValidatorSet>(@aptos_framework);
+let stake_pool_res = global<stake::StakePool>(stake_pool);
+let voting_power_0 = stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value;
+let voting_power_1 = stake_pool_res.active.value + stake_pool_res.pending_inactive.value;
+aborts_if allow_validator_set_change && voting_power_0 <= 0;
+aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(stake_pool) && voting_power_1 <= 0;
+aborts_if !allow_validator_set_change && !stake::spec_is_current_epoch_validator(stake_pool);
+aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
 let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
 let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
 let proposal_expiration = proposal.expiration_secs;
 let locked_until_secs = global<stake::StakePool>(stake_pool).locked_until_secs;
 aborts_if proposal_expiration > locked_until_secs;
+aborts_if timestamp::now_seconds() > proposal_expiration;
+aborts_if proposal.is_resolved;
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let execution_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+aborts_if simple_map::spec_contains_key(proposal.metadata, execution_key) &&
+          simple_map::spec_get(proposal.metadata, execution_key) != std::bcs::to_bytes(false);
+aborts_if allow_validator_set_change &&
+    if (should_pass) { proposal.yes_votes + voting_power_0 > MAX_U128 } else { proposal.no_votes + voting_power_0 > MAX_U128 };
+aborts_if !allow_validator_set_change &&
+    if (should_pass) { proposal.yes_votes + voting_power_1 > MAX_U128 } else { proposal.no_votes + voting_power_1 > MAX_U128 };
+let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+ensures allow_validator_set_change ==>
+    if (should_pass) { post_proposal.yes_votes == proposal.yes_votes + voting_power_0 } else { post_proposal.no_votes == proposal.no_votes + voting_power_0 };
+ensures !allow_validator_set_change ==>
+    if (should_pass) { post_proposal.yes_votes == proposal.yes_votes + voting_power_1 } else { post_proposal.no_votes == proposal.no_votes + voting_power_1 };
+aborts_if !string::spec_internal_check_utf8(voting::RESOLVABLE_TIME_METADATA_KEY);
+let key = utf8(voting::RESOLVABLE_TIME_METADATA_KEY);
+ensures simple_map::spec_contains_key(post_proposal.metadata, key);
+ensures simple_map::spec_get(post_proposal.metadata, key) == std::bcs::to_bytes(timestamp::now_seconds());
+aborts_if !exists<GovernanceEvents>(@aptos_framework);
+let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+let is_voting_period_over = timestamp::now_seconds() > proposal_expiration;
+let new_proposal_yes_votes_0 = proposal.yes_votes + voting_power_0;
+let can_be_resolved_early_0 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (new_proposal_yes_votes_0 >= early_resolution_threshold ||
+                             proposal.no_votes >= early_resolution_threshold);
+let is_voting_closed_0 = is_voting_period_over || can_be_resolved_early_0;
+let proposal_state_successed_0 = is_voting_closed_0 && new_proposal_yes_votes_0 > proposal.no_votes &&
+                                 new_proposal_yes_votes_0 + proposal.no_votes >= proposal.min_vote_threshold;
+let new_proposal_no_votes_0 = proposal.no_votes + voting_power_0;
+let can_be_resolved_early_1 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (proposal.yes_votes >= early_resolution_threshold ||
+                             new_proposal_no_votes_0 >= early_resolution_threshold);
+let is_voting_closed_1 = is_voting_period_over || can_be_resolved_early_1;
+let proposal_state_successed_1 = is_voting_closed_1 && proposal.yes_votes > new_proposal_no_votes_0 &&
+                                 proposal.yes_votes + new_proposal_no_votes_0 >= proposal.min_vote_threshold;
+let new_proposal_yes_votes_1 = proposal.yes_votes + voting_power_1;
+let can_be_resolved_early_2 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (new_proposal_yes_votes_1 >= early_resolution_threshold ||
+                             proposal.no_votes >= early_resolution_threshold);
+let is_voting_closed_2 = is_voting_period_over || can_be_resolved_early_2;
+let proposal_state_successed_2 = is_voting_closed_2 && new_proposal_yes_votes_1 > proposal.no_votes &&
+                                 new_proposal_yes_votes_1 + proposal.no_votes >= proposal.min_vote_threshold;
+let new_proposal_no_votes_1 = proposal.no_votes + voting_power_1;
+let can_be_resolved_early_3 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (proposal.yes_votes >= early_resolution_threshold ||
+                             new_proposal_no_votes_1 >= early_resolution_threshold);
+let is_voting_closed_3 = is_voting_period_over || can_be_resolved_early_3;
+let proposal_state_successed_3 = is_voting_closed_3 && proposal.yes_votes > new_proposal_no_votes_1 &&
+                                 proposal.yes_votes + new_proposal_no_votes_1 >= proposal.min_vote_threshold;
+let post can_be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (post_proposal.yes_votes >= early_resolution_threshold ||
+                             post_proposal.no_votes >= early_resolution_threshold);
+let post is_voting_closed = is_voting_period_over || can_be_resolved_early;
+let post proposal_state_successed = is_voting_closed && post_proposal.yes_votes > post_proposal.no_votes &&
+                                 post_proposal.yes_votes + post_proposal.no_votes >= proposal.min_vote_threshold;
+let execution_hash = proposal.execution_hash;
+let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework);
+aborts_if allow_validator_set_change &&
+    if (should_pass) {
+        proposal_state_successed_0 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+    } else {
+        proposal_state_successed_1 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+    };
+aborts_if !allow_validator_set_change &&
+    if (should_pass) {
+        proposal_state_successed_2 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+    } else {
+        proposal_state_successed_3 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+    };
+ensures proposal_state_successed ==> simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) &&
+                                     simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == execution_hash;
 
@@ -1587,7 +1645,35 @@ Address @aptos_framework must exist VotingRecords and GovernanceProposal. -
pragma verify = false;
+
requires chain_status::is_operating();
+include AddApprovedScriptHash;
+
+ + + + + + + +
schema AddApprovedScriptHash {
+    proposal_id: u64;
+    aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+    aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+    aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+    let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+    aborts_if timestamp::now_seconds() <= proposal.expiration_secs &&
+        (option::spec_is_none(proposal.early_resolution_vote_threshold) ||
+        proposal.yes_votes < early_resolution_threshold && proposal.no_votes < early_resolution_threshold);
+    aborts_if (timestamp::now_seconds() > proposal.expiration_secs ||
+        option::spec_is_some(proposal.early_resolution_vote_threshold) && (proposal.yes_votes >= early_resolution_threshold ||
+                                                                           proposal.no_votes >= early_resolution_threshold)) &&
+        (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold);
+    let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework);
+    ensures simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) &&
+        simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == proposal.execution_hash;
+}
 
@@ -1603,9 +1689,8 @@ Address @aptos_framework must exist VotingRecords and GovernanceProposal. -
pragma aborts_if_is_partial;
-requires chain_status::is_operating();
-aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+
requires chain_status::is_operating();
+include AddApprovedScriptHash;
 
@@ -1622,11 +1707,28 @@ Address @aptos_framework must exist VotingRecords and GovernanceProposal. Address @aptos_framework must exist ApprovedExecutionHashes and GovernanceProposal and GovernanceResponsbility. -
pragma aborts_if_is_partial;
-requires chain_status::is_operating();
-aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+
requires chain_status::is_operating();
+include VotingIsProposalResolvableAbortsif;
+let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let multi_step_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+let has_multi_step_key = simple_map::spec_contains_key(proposal.metadata, multi_step_key);
+let is_multi_step_proposal = aptos_std::from_bcs::deserialize<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+aborts_if has_multi_step_key && !aptos_std::from_bcs::deserializable<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+aborts_if has_multi_step_key && is_multi_step_proposal;
+let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+ensures post_proposal.is_resolved == true && post_proposal.resolution_time_secs == timestamp::now_seconds();
+aborts_if option::spec_is_none(proposal.execution_content);
 aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework).hashes;
+ensures !simple_map::spec_contains_key(post_approved_hashes, proposal_id);
 include GetSignerAbortsIf;
+let governance_responsibility = global<GovernanceResponsbility>(@aptos_framework);
+let signer_cap = simple_map::spec_get(governance_responsibility.signer_caps, signer_address);
+let addr = signer_cap.account;
+ensures signer::address_of(result) == addr;
 
@@ -1642,16 +1744,76 @@ Address @aptos_framework must exist ApprovedExecutionHashes and GovernancePropos -
pragma aborts_if_is_partial;
-let voting_forum = borrow_global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+
requires chain_status::is_operating();
+include VotingIsProposalResolvableAbortsif;
+let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
 let proposal = table::spec_get(voting_forum.proposals, proposal_id);
-requires chain_status::is_operating();
-aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
-aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
-aborts_if !table::spec_contains(voting_forum.proposals,proposal_id);
-aborts_if !string::spec_internal_check_utf8(b"IS_MULTI_STEP_PROPOSAL_IN_EXECUTION");
-aborts_if aptos_framework::transaction_context::spec_get_script_hash() != proposal.execution_hash;
+let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let multi_step_in_execution_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let post is_multi_step_proposal_in_execution_value = simple_map::spec_get(post_proposal.metadata, multi_step_in_execution_key);
+ensures simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==>
+    is_multi_step_proposal_in_execution_value == std::bcs::serialize(true);
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+let multi_step_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+aborts_if simple_map::spec_contains_key(proposal.metadata, multi_step_key) &&
+                    aptos_std::from_bcs::deserializable<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+let is_multi_step = simple_map::spec_contains_key(proposal.metadata, multi_step_key) &&
+                    aptos_std::from_bcs::deserialize<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+let next_execution_hash_is_empty = len(next_execution_hash) == 0;
+aborts_if !is_multi_step && !next_execution_hash_is_empty;
+aborts_if next_execution_hash_is_empty && is_multi_step && !simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key);
+ensures next_execution_hash_is_empty ==> post_proposal.is_resolved == true && post_proposal.resolution_time_secs == timestamp::spec_now_seconds() &&
+    if (is_multi_step) {
+        is_multi_step_proposal_in_execution_value == std::bcs::serialize(false)
+    } else {
+        simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==>
+            is_multi_step_proposal_in_execution_value == std::bcs::serialize(true)
+    };
+ensures !next_execution_hash_is_empty ==> post_proposal.execution_hash == next_execution_hash &&
+    simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==>
+        is_multi_step_proposal_in_execution_value == std::bcs::serialize(true);
+aborts_if next_execution_hash_is_empty && !exists<ApprovedExecutionHashes>(@aptos_framework);
+let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework).hashes;
+ensures next_execution_hash_is_empty ==> !simple_map::spec_contains_key(post_approved_hashes, proposal_id);
+ensures !next_execution_hash_is_empty ==>
+    simple_map::spec_get(post_approved_hashes, proposal_id) == next_execution_hash;
 include GetSignerAbortsIf;
+let governance_responsibility = global<GovernanceResponsbility>(@aptos_framework);
+let signer_cap = simple_map::spec_get(governance_responsibility.signer_caps, signer_address);
+let addr = signer_cap.account;
+ensures signer::address_of(result) == addr;
+
+ + + + + + + +
schema VotingIsProposalResolvableAbortsif {
+    proposal_id: u64;
+    aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+    aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+    let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+    let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs;
+    let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (proposal.yes_votes >= early_resolution_threshold ||
+                                 proposal.no_votes >= early_resolution_threshold);
+    let voting_closed = voting_period_over || be_resolved_early;
+    aborts_if voting_closed && (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold);
+    aborts_if !voting_closed;
+    aborts_if proposal.is_resolved;
+    aborts_if !string::spec_internal_check_utf8(voting::RESOLVABLE_TIME_METADATA_KEY);
+    aborts_if !simple_map::spec_contains_key(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY));
+    let resolvable_time = aptos_std::from_bcs::deserialize<u64>(simple_map::spec_get(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY)));
+    aborts_if !aptos_std::from_bcs::deserializable<u64>(simple_map::spec_get(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY)));
+    aborts_if timestamp::now_seconds() <= resolvable_time;
+    aborts_if aptos_framework::transaction_context::spec_get_script_hash() != proposal.execution_hash;
+}
 
@@ -1690,14 +1852,13 @@ Address @aptos_framework must exist ApprovedExecutionHashes and GovernancePropos -
pragma verify_duration_estimate = 120;
-aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
+
aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
 include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
-include staking_config::StakingRewardsConfigRequirement;
 requires chain_status::is_operating();
-requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time();
 requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+requires exists<staking_config::StakingRewardsConfig>(@aptos_framework);
+include staking_config::StakingRewardsConfigRequirement;
 
@@ -1737,15 +1898,35 @@ limit addition overflow. pool_address must exist in StakePool. -
pragma aborts_if_is_partial;
+
include GetVotingPowerAbortsIf;
 let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
-aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
 let allow_validator_set_change = staking_config.allow_validator_set_change;
-let stake_pool = global<stake::StakePool>(pool_address);
-aborts_if allow_validator_set_change && (stake_pool.active.value + stake_pool.pending_active.value + stake_pool.pending_inactive.value) > MAX_U64;
-aborts_if !exists<stake::StakePool>(pool_address);
-aborts_if !allow_validator_set_change && !exists<stake::ValidatorSet>(@aptos_framework);
-ensures allow_validator_set_change ==> result == stake_pool.active.value + stake_pool.pending_active.value + stake_pool.pending_inactive.value;
+let stake_pool_res = global<stake::StakePool>(pool_address);
+ensures allow_validator_set_change ==> result == stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value;
+ensures !allow_validator_set_change ==> if (stake::spec_is_current_epoch_validator(pool_address)) {
+    result == stake_pool_res.active.value + stake_pool_res.pending_inactive.value
+} else {
+    result == 0
+};
+
+ + + + + + + +
schema GetVotingPowerAbortsIf {
+    pool_address: address;
+    let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+    aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+    let allow_validator_set_change = staking_config.allow_validator_set_change;
+    let stake_pool_res = global<stake::StakePool>(pool_address);
+    aborts_if allow_validator_set_change && (stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value) > MAX_U64;
+    aborts_if !exists<stake::StakePool>(pool_address);
+    aborts_if !allow_validator_set_change && !exists<stake::ValidatorSet>(@aptos_framework);
+    aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(pool_address) && stake_pool_res.active.value + stake_pool_res.pending_inactive.value > MAX_U64;
+}
 
@@ -1791,12 +1972,25 @@ pool_address must exist in StakePool. -
aborts_if string::length(utf8(metadata_location)) > 256;
-aborts_if string::length(utf8(metadata_hash)) > 256;
-aborts_if !string::spec_internal_check_utf8(metadata_location);
-aborts_if !string::spec_internal_check_utf8(metadata_hash);
-aborts_if !string::spec_internal_check_utf8(METADATA_LOCATION_KEY);
-aborts_if !string::spec_internal_check_utf8(METADATA_HASH_KEY);
+
include CreateProposalMetadataAbortsIf;
+
+ + + + + + + +
schema CreateProposalMetadataAbortsIf {
+    metadata_location: vector<u8>;
+    metadata_hash: vector<u8>;
+    aborts_if string::length(utf8(metadata_location)) > 256;
+    aborts_if string::length(utf8(metadata_hash)) > 256;
+    aborts_if !string::spec_internal_check_utf8(metadata_location);
+    aborts_if !string::spec_internal_check_utf8(metadata_hash);
+    aborts_if !string::spec_internal_check_utf8(METADATA_LOCATION_KEY);
+    aborts_if !string::spec_internal_check_utf8(METADATA_HASH_KEY);
+}
 
@@ -1806,7 +2000,8 @@ pool_address must exist in StakePool. ### Function `initialize_for_verification` -
public fun initialize_for_verification(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
+
#[verify_only]
+public fun initialize_for_verification(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
 
diff --git a/aptos-move/framework/aptos-framework/doc/block.md b/aptos-move/framework/aptos-framework/doc/block.md index fd19a25fc2763..5ed52b69e46a1 100644 --- a/aptos-move/framework/aptos-framework/doc/block.md +++ b/aptos-move/framework/aptos-framework/doc/block.md @@ -320,7 +320,8 @@ Can only be called as part of the Aptos governance proposal process established Return epoch interval in seconds. -
public fun get_epoch_interval_secs(): u64
+
#[view]
+public fun get_epoch_interval_secs(): u64
 
@@ -425,7 +426,8 @@ The runtime always runs this before executing the transactions in a block. Get the current block height -
public fun get_current_block_height(): u64
+
#[view]
+public fun get_current_block_height(): u64
 
@@ -667,7 +669,8 @@ The BlockResource existed under the @aptos_framework. ### Function `get_epoch_interval_secs` -
public fun get_epoch_interval_secs(): u64
+
#[view]
+public fun get_epoch_interval_secs(): u64
 
@@ -710,7 +713,8 @@ The BlockResource existed under the @aptos_framework. ### Function `get_current_block_height` -
public fun get_current_block_height(): u64
+
#[view]
+public fun get_current_block_height(): u64
 
diff --git a/aptos-move/framework/aptos-framework/doc/chain_id.md b/aptos-move/framework/aptos-framework/doc/chain_id.md index 797c714576d7f..3cd22f09abc3f 100644 --- a/aptos-move/framework/aptos-framework/doc/chain_id.md +++ b/aptos-move/framework/aptos-framework/doc/chain_id.md @@ -82,7 +82,8 @@ Publish the chain ID id of this instance under the SystemAddresses Return the chain ID of this instance. -
public fun get(): u8
+
#[view]
+public fun get(): u8
 
@@ -135,7 +136,8 @@ Return the chain ID of this instance. ### Function `get` -
public fun get(): u8
+
#[view]
+public fun get(): u8
 
diff --git a/aptos-move/framework/aptos-framework/doc/chain_status.md b/aptos-move/framework/aptos-framework/doc/chain_status.md index 46219dbe67bdd..4b91cf10f580a 100644 --- a/aptos-move/framework/aptos-framework/doc/chain_status.md +++ b/aptos-move/framework/aptos-framework/doc/chain_status.md @@ -114,7 +114,8 @@ Marks that genesis has finished. Helper function to determine if Aptos is in genesis state. -
public fun is_genesis(): bool
+
#[view]
+public fun is_genesis(): bool
 
@@ -141,7 +142,8 @@ the same as !is_gene Testing is_operating() is more frequent than is_genesis(). -
public fun is_operating(): bool
+
#[view]
+public fun is_operating(): bool
 
diff --git a/aptos-move/framework/aptos-framework/doc/code.md b/aptos-move/framework/aptos-framework/doc/code.md index 35eaac3bfc69b..e849ecef6b9ff 100644 --- a/aptos-move/framework/aptos-framework/doc/code.md +++ b/aptos-move/framework/aptos-framework/doc/code.md @@ -34,6 +34,7 @@ This module supports functionality related to code management. - [Function `check_upgradability`](#@Specification_1_check_upgradability) - [Function `check_coexistence`](#@Specification_1_check_coexistence) - [Function `check_dependencies`](#@Specification_1_check_dependencies) + - [Function `get_module_names`](#@Specification_1_get_module_names) - [Function `request_publish`](#@Specification_1_request_publish) - [Function `request_publish_with_allowed_deps`](#@Specification_1_request_publish_with_allowed_deps) @@ -554,10 +555,9 @@ package. let packages = &mut borrow_global_mut<PackageRegistry>(addr).packages; let len = vector::length(packages); let index = len; - let i = 0; let upgrade_number = 0; - while (i < len) { - let old = vector::borrow(packages, i); + vector::enumerate_ref(packages, |i, old| { + let old: &PackageMetadata = old; if (old.name == pack.name) { upgrade_number = old.upgrade_number + 1; check_upgradability(old, &pack, &module_names); @@ -565,8 +565,7 @@ package. } else { check_coexistence(old, &module_names) }; - i = i + 1; - }; + }); // Assign the upgrade counter. pack.upgrade_number = upgrade_number; @@ -643,14 +642,13 @@ Checks whether the given package is upgradable, and returns true if a compatibil assert!(can_change_upgrade_policy_to(old_pack.upgrade_policy, new_pack.upgrade_policy), error::invalid_argument(EUPGRADE_WEAKER_POLICY)); let old_modules = get_module_names(old_pack); - let i = 0; - while (i < vector::length(&old_modules)) { + + vector::for_each_ref(&old_modules, |old_module| { assert!( - vector::contains(new_modules, vector::borrow(&old_modules, i)), + vector::contains(new_modules, old_module), EMODULE_MISSING ); - i = i + 1; - } + }); }
@@ -676,17 +674,15 @@ Checks whether a new package with given names can co-exist with old package.
fun check_coexistence(old_pack: &PackageMetadata, new_modules: &vector<String>) {
     // The modules introduced by each package must not overlap with `names`.
-    let i = 0;
-    while (i < vector::length(&old_pack.modules)) {
-        let old_mod = vector::borrow(&old_pack.modules, i);
+    vector::for_each_ref(&old_pack.modules, |old_mod| {
+        let old_mod: &ModuleMetadata = old_mod;
         let j = 0;
         while (j < vector::length(new_modules)) {
             let name = vector::borrow(new_modules, j);
             assert!(&old_mod.name != name, error::already_exists(EMODULE_NAME_CLASH));
             j = j + 1;
         };
-        i = i + 1;
-    }
+    });
 }
 
@@ -716,54 +712,47 @@ is passed on to the native layer to verify that bytecode dependencies are actual acquires PackageRegistry { let allowed_module_deps = vector::empty(); let deps = &pack.deps; - let i = 0; - let n = vector::length(deps); - while (i < n) { - let dep = vector::borrow(deps, i); + vector::for_each_ref(deps, |dep| { + let dep: &PackageDep = dep; assert!(exists<PackageRegistry>(dep.account), error::not_found(EPACKAGE_DEP_MISSING)); if (is_policy_exempted_address(dep.account)) { // Allow all modules from this address, by using "" as a wildcard in the AllowedDep - let account = dep.account; + let account: address = dep.account; let module_name = string::utf8(b""); vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name }); - i = i + 1; - continue - }; - let registry = borrow_global<PackageRegistry>(dep.account); - let j = 0; - let m = vector::length(®istry.packages); - let found = false; - while (j < m) { - let dep_pack = vector::borrow(®istry.packages, j); - if (dep_pack.name == dep.package_name) { - found = true; - // Check policy - assert!( - dep_pack.upgrade_policy.policy >= pack.upgrade_policy.policy, - error::invalid_argument(EDEP_WEAKER_POLICY) - ); - if (dep_pack.upgrade_policy == upgrade_policy_arbitrary()) { + } else { + let registry = borrow_global<PackageRegistry>(dep.account); + let found = vector::any(®istry.packages, |dep_pack| { + let dep_pack: &PackageMetadata = dep_pack; + if (dep_pack.name == dep.package_name) { + // Check policy assert!( - dep.account == publish_address, - error::invalid_argument(EDEP_ARBITRARY_NOT_SAME_ADDRESS) - ) - }; - // Add allowed deps - let k = 0; - let r = vector::length(&dep_pack.modules); - while (k < r) { + dep_pack.upgrade_policy.policy >= pack.upgrade_policy.policy, + error::invalid_argument(EDEP_WEAKER_POLICY) + ); + if (dep_pack.upgrade_policy == upgrade_policy_arbitrary()) { + assert!( + dep.account == publish_address, + error::invalid_argument(EDEP_ARBITRARY_NOT_SAME_ADDRESS) + ) + }; + // Add allowed deps let account = dep.account; - let module_name = vector::borrow(&dep_pack.modules, k).name; - vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name }); - k = k + 1; - }; - break - }; - j = j + 1; + let k = 0; + let r = vector::length(&dep_pack.modules); + while (k < r) { + let module_name = vector::borrow(&dep_pack.modules, k).name; + vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name }); + k = k + 1; + }; + true + } else { + false + } + }); + assert!(found, error::not_found(EPACKAGE_DEP_MISSING)); }; - assert!(found, error::not_found(EPACKAGE_DEP_MISSING)); - i = i + 1; - }; + }); allowed_module_deps }
@@ -818,11 +807,10 @@ Get the names of the modules in a package.
fun get_module_names(pack: &PackageMetadata): vector<String> {
     let module_names = vector::empty();
-    let i = 0;
-    while (i < vector::length(&pack.modules)) {
-        vector::push_back(&mut module_names, vector::borrow(&pack.modules, i).name);
-        i = i + 1
-    };
+    vector::for_each_ref(&pack.modules, |pack_module| {
+        let pack_module: &ModuleMetadata = pack_module;
+        vector::push_back(&mut module_names, pack_module.name);
+    });
     module_names
 }
 
@@ -999,6 +987,25 @@ Native function to initiate module loading, including a list of allowed dependen + + +### Function `get_module_names` + + +
fun get_module_names(pack: &code::PackageMetadata): vector<string::String>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] len(result) == len(pack.modules);
+ensures [abstract] forall i in 0..len(result): result[i] == pack.modules[i].name;
+
+ + + ### Function `request_publish` diff --git a/aptos-move/framework/aptos-framework/doc/coin.md b/aptos-move/framework/aptos-framework/doc/coin.md index 2e511bee0c848..7edc1cf7b7261 100644 --- a/aptos-move/framework/aptos-framework/doc/coin.md +++ b/aptos-move/framework/aptos-framework/doc/coin.md @@ -16,6 +16,8 @@ This module provides the foundation for typesafe Coins. - [Struct `MintCapability`](#0x1_coin_MintCapability) - [Struct `FreezeCapability`](#0x1_coin_FreezeCapability) - [Struct `BurnCapability`](#0x1_coin_BurnCapability) +- [Resource `Ghost$supply`](#0x1_coin_Ghost$supply) +- [Resource `Ghost$aggregate_supply`](#0x1_coin_Ghost$aggregate_supply) - [Constants](#@Constants_0) - [Function `initialize_supply_config`](#0x1_coin_initialize_supply_config) - [Function `allow_supply_upgrades`](#0x1_coin_allow_supply_upgrades) @@ -278,7 +280,7 @@ Information about a specific coin type. Stored on the creator of the coin's acco be displayed to a user as 5.05 (505 / 10 ** 2).
-supply: option::Option<optional_aggregator::OptionalAggregator> +supply: option::Option<optional_aggregator::OptionalAggregator>
Amount of this coin type in existence. @@ -426,6 +428,60 @@ Capability required to burn coins. + + + + +## Resource `Ghost$supply` + + + +
struct Ghost$supply<CoinType> has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: num +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$aggregate_supply` + + + +
struct Ghost$aggregate_supply<CoinType> has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: num +
+
+ +
+
+ +
@@ -732,8 +788,13 @@ Drains the aggregatable coin, setting it to zero and returning a standard coin. }; let amount = aggregator::read(&coin.value); assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE)); - + spec { + update aggregate_supply<CoinType> = aggregate_supply<CoinType> - amount; + }; aggregator::sub(&mut coin.value, amount); + spec { + update supply<CoinType> = supply<CoinType> + amount; + }; Coin<CoinType> { value: (amount as u64), } @@ -761,8 +822,14 @@ Merges coin into aggregatable coin (
public(friend) fun merge_aggregatable_coin<CoinType>(dst_coin: &mut AggregatableCoin<CoinType>, coin: Coin<CoinType>) {
+    spec {
+        update supply<CoinType> = supply<CoinType> - coin.value;
+    };
     let Coin { value } = coin;
     let amount = (value as u128);
+    spec {
+        update aggregate_supply<CoinType> = aggregate_supply<CoinType> + amount;
+    };
     aggregator::add(&mut dst_coin.value, amount);
 }
 
@@ -840,7 +907,8 @@ A helper function that returns the address of CoinType. Returns the balance of owner for provided CoinType. -
public fun balance<CoinType>(owner: address): u64
+
#[view]
+public fun balance<CoinType>(owner: address): u64
 
@@ -869,7 +937,8 @@ Returns the balance of owner for provided CoinType. Returns true if the type CoinType is an initialized coin. -
public fun is_coin_initialized<CoinType>(): bool
+
#[view]
+public fun is_coin_initialized<CoinType>(): bool
 
@@ -894,7 +963,8 @@ Returns true if the type CoinType is an initial Returns true if account_addr is registered to receive CoinType. -
public fun is_account_registered<CoinType>(account_addr: address): bool
+
#[view]
+public fun is_account_registered<CoinType>(account_addr: address): bool
 
@@ -919,7 +989,8 @@ Returns true if account_addr is registered to r Returns the name of the coin. -
public fun name<CoinType>(): string::String
+
#[view]
+public fun name<CoinType>(): string::String
 
@@ -944,7 +1015,8 @@ Returns the name of the coin. Returns the symbol of the coin, usually a shorter version of the name. -
public fun symbol<CoinType>(): string::String
+
#[view]
+public fun symbol<CoinType>(): string::String
 
@@ -971,7 +1043,8 @@ For example, if decimals equals 2, a balance of be displayed to a user as 5.05 (505 / 10 ** 2). -
public fun decimals<CoinType>(): u8
+
#[view]
+public fun decimals<CoinType>(): u8
 
@@ -996,7 +1069,8 @@ be displayed to a user as 5.05 (505 / 10 ** 2). Returns the amount of coin in existence. -
public fun supply<CoinType>(): option::Option<u128>
+
#[view]
+public fun supply<CoinType>(): option::Option<u128>
 
@@ -1006,11 +1080,11 @@ Returns the amount of coin in existence.
public fun supply<CoinType>(): Option<u128> acquires CoinInfo {
-    let maybe_supply = &borrow_global<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
+    let maybe_supply = &borrow_global<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
     if (option::is_some(maybe_supply)) {
-        // We do track supply, in this case read from optional aggregator.
-        let supply = option::borrow(maybe_supply);
-        let value = optional_aggregator::read(supply);
+        // We do track supply, in this case read from optional aggregator.
+        let supply = option::borrow(maybe_supply);
+        let value = optional_aggregator::read(supply);
         option::some(value)
     } else {
         option::none()
@@ -1043,13 +1117,16 @@ The capability _cap should be passed as a reference to coin: Coin<CoinType>,
     _cap: &BurnCapability<CoinType>,
 ) acquires CoinInfo {
+    spec {
+        update supply<CoinType> = supply<CoinType> - coin.value;
+    };
     let Coin { value: amount } = coin;
     assert!(amount > 0, error::invalid_argument(EZERO_COIN_AMOUNT));
 
-    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
+    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
     if (option::is_some(maybe_supply)) {
-        let supply = option::borrow_mut(maybe_supply);
-        optional_aggregator::sub(supply, (amount as u128));
+        let supply = option::borrow_mut(maybe_supply);
+        optional_aggregator::sub(supply, (amount as u128));
     }
 }
 
@@ -1158,6 +1235,9 @@ a BurnCapability for
public fun destroy_zero<CoinType>(zero_coin: Coin<CoinType>) {
+    spec {
+        update supply<CoinType> = supply<CoinType> - zero_coin.value;
+    };
     let Coin { value } = zero_coin;
     assert!(value == 0, error::invalid_argument(EDESTRUCTION_OF_NONZERO_TOKEN))
 }
@@ -1185,7 +1265,13 @@ Extracts amount from the passed-in public fun extract<CoinType>(coin: &mut Coin<CoinType>, amount: u64): Coin<CoinType> {
     assert!(coin.value >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE));
+    spec {
+        update supply<CoinType> = supply<CoinType> - amount;
+    };
     coin.value = coin.value - amount;
+    spec {
+        update supply<CoinType> = supply<CoinType> + amount;
+    };
     Coin { value: amount }
 }
 
@@ -1212,7 +1298,13 @@ Extracts the entire amount from the passed-in c
public fun extract_all<CoinType>(coin: &mut Coin<CoinType>): Coin<CoinType> {
     let total_value = coin.value;
+    spec {
+        update supply<CoinType> = supply<CoinType> - coin.value;
+    };
     coin.value = 0;
+    spec {
+        update supply<CoinType> = supply<CoinType> + total_value;
+    };
     Coin { value: total_value }
 }
 
@@ -1228,7 +1320,8 @@ Extracts the entire amount from the passed-in c Freeze a CoinStore to prevent transfers -
public entry fun freeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
#[legacy_entry_fun]
+public entry fun freeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
 
@@ -1257,7 +1350,8 @@ Freeze a CoinStore to prevent transfers Unfreeze a CoinStore to allow transfers -
public entry fun unfreeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
#[legacy_entry_fun]
+public entry fun unfreeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
 
@@ -1299,7 +1393,7 @@ available.
public entry fun upgrade_supply<CoinType>(account: &signer) acquires CoinInfo, SupplyConfig {
     let account_addr = signer::address_of(account);
 
-    // Only coin creators can upgrade total supply.
+    // Only coin creators can upgrade total supply.
     assert!(
         coin_address<CoinType>() == account_addr,
         error::invalid_argument(ECOIN_INFO_ADDRESS_MISMATCH),
@@ -1311,13 +1405,13 @@ available.
         error::permission_denied(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED)
     );
 
-    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(account_addr).supply;
+    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(account_addr).supply;
     if (option::is_some(maybe_supply)) {
-        let supply = option::borrow_mut(maybe_supply);
+        let supply = option::borrow_mut(maybe_supply);
 
-        // If supply is tracked and the current implementation uses an integer - upgrade.
-        if (!optional_aggregator::is_parallelizable(supply)) {
-            optional_aggregator::switch(supply);
+        // If supply is tracked and the current implementation uses an integer - upgrade.
+        if (!optional_aggregator::is_parallelizable(supply)) {
+            optional_aggregator::switch(supply);
         }
     }
 }
@@ -1434,7 +1528,7 @@ Same as initialize but supply can be initialized to parallelizable
         name,
         symbol,
         decimals,
-        supply: if (monitor_supply) { option::some(optional_aggregator::new(MAX_U128, parallelizable)) } else { option::none() },
+        supply: if (monitor_supply) { option::some(optional_aggregator::new(MAX_U128, parallelizable)) } else { option::none() },
     };
     move_to(account, coin_info);
 
@@ -1467,7 +1561,13 @@ to the sum of the two tokens (dst_coin and source_coin
     spec {
         assume dst_coin.value + source_coin.value <= MAX_U64;
     };
+    spec {
+        update supply<CoinType> = supply<CoinType> - source_coin.value;
+    };
     let Coin { value } = source_coin;
+    spec {
+        update supply<CoinType> = supply<CoinType> + value;
+    };
     dst_coin.value = dst_coin.value + value;
 }
 
@@ -1499,15 +1599,19 @@ Returns minted Coin. _cap: &MintCapability<CoinType>, ): Coin<CoinType> acquires CoinInfo { if (amount == 0) { - return zero<CoinType>() + return Coin<CoinType> { + value: 0 + } }; - let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(coin_address<CoinType>()).supply; + let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(coin_address<CoinType>()).supply; if (option::is_some(maybe_supply)) { - let supply = option::borrow_mut(maybe_supply); - optional_aggregator::add(supply, (amount as u128)); + let supply = option::borrow_mut(maybe_supply); + optional_aggregator::add(supply, (amount as u128)); + }; + spec { + update supply<CoinType> = supply<CoinType> + amount; }; - Coin<CoinType> { value: amount } }
@@ -1670,6 +1774,9 @@ Create a new Coin<CoinType>public fun zero<CoinType>(): Coin<CoinType> { + spec { + update supply<CoinType> = supply<CoinType> + 0; + }; Coin<CoinType> { value: 0 } @@ -1762,6 +1869,67 @@ Destroy a burn capability.
pragma verify = true;
+
+global supply<CoinType>: num;
+
+global aggregate_supply<CoinType>: num;
+apply TotalSupplyTracked<CoinType> to *<CoinType> except
+    initialize, initialize_internal, initialize_with_parallelizable_supply;
+apply TotalSupplyNoChange<CoinType> to *<CoinType> except mint,
+    burn, burn_from, initialize, initialize_internal, initialize_with_parallelizable_supply;
+
+ + + + + + + +
fun spec_fun_supply_tracked<CoinType>(val: u64, supply: Option<OptionalAggregator>): bool {
+   option::spec_is_some(supply) ==> val == optional_aggregator::optional_aggregator_value
+           (option::spec_borrow(supply))
+}
+
+ + + + + + + +
schema TotalSupplyTracked<CoinType> {
+    ensures old(spec_fun_supply_tracked<CoinType>(supply<CoinType> + aggregate_supply<CoinType>,
+        global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply)) ==>
+        spec_fun_supply_tracked<CoinType>(supply<CoinType> + aggregate_supply<CoinType>,
+            global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply);
+}
+
+ + + + + + + +
fun spec_fun_supply_no_change<CoinType>(old_supply: Option<OptionalAggregator>,
+                                            supply: Option<OptionalAggregator>): bool {
+   option::spec_is_some(old_supply) ==> optional_aggregator::optional_aggregator_value
+       (option::spec_borrow(old_supply)) == optional_aggregator::optional_aggregator_value
+       (option::spec_borrow(supply))
+}
+
+ + + + + + + +
schema TotalSupplyNoChange<CoinType> {
+    let old_supply = global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply;
+    let post supply = global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply;
+    ensures spec_fun_supply_no_change<CoinType>(old_supply, supply);
+}
 
@@ -1954,7 +2122,8 @@ Get address by reflection. ### Function `balance` -
public fun balance<CoinType>(owner: address): u64
+
#[view]
+public fun balance<CoinType>(owner: address): u64
 
@@ -1971,7 +2140,8 @@ Get address by reflection. ### Function `is_coin_initialized` -
public fun is_coin_initialized<CoinType>(): bool
+
#[view]
+public fun is_coin_initialized<CoinType>(): bool
 
@@ -1987,7 +2157,7 @@ Get address by reflection.
fun get_coin_supply_opt<CoinType>(): Option<OptionalAggregator> {
-   global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply
+   global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply
 }
 
@@ -2000,7 +2170,7 @@ Get address by reflection.
schema AbortsIfAggregator<CoinType> {
     coin: Coin<CoinType>;
     let addr =  type_info::type_of<CoinType>().account_address;
-    let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+    let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
     aborts_if option::is_some(maybe_supply) && optional_aggregator::is_parallelizable(option::borrow(maybe_supply))
         && aggregator::spec_aggregator_get_val(option::borrow(option::borrow(maybe_supply).aggregator)) <
         coin.value;
@@ -2029,7 +2199,8 @@ Get address by reflection.
 ### Function `name`
 
 
-
public fun name<CoinType>(): string::String
+
#[view]
+public fun name<CoinType>(): string::String
 
@@ -2045,7 +2216,8 @@ Get address by reflection. ### Function `symbol` -
public fun symbol<CoinType>(): string::String
+
#[view]
+public fun symbol<CoinType>(): string::String
 
@@ -2061,7 +2233,8 @@ Get address by reflection. ### Function `decimals` -
public fun decimals<CoinType>(): u8
+
#[view]
+public fun decimals<CoinType>(): u8
 
@@ -2077,7 +2250,8 @@ Get address by reflection. ### Function `supply` -
public fun supply<CoinType>(): option::Option<u128>
+
#[view]
+public fun supply<CoinType>(): option::Option<u128>
 
@@ -2085,9 +2259,9 @@ Get address by reflection.
let coin_addr = type_info::type_of<CoinType>().account_address;
 aborts_if !exists<CoinInfo<CoinType>>(coin_addr);
-let maybe_supply = global<CoinInfo<CoinType>>(coin_addr).supply;
-let supply = option::spec_borrow(maybe_supply);
-let value = optional_aggregator::optional_aggregator_value(supply);
+let maybe_supply = global<CoinInfo<CoinType>>(coin_addr).supply;
+let supply = option::spec_borrow(maybe_supply);
+let value = optional_aggregator::optional_aggregator_value(supply);
 ensures if (option::spec_is_some(maybe_supply)) {
     result == option::spec_some(value)
 } else {
@@ -2137,10 +2311,10 @@ Get address by reflection.
 aborts_if amount != 0 && !exists<CoinInfo<CoinType>>(addr);
 aborts_if amount != 0 && !exists<CoinStore<CoinType>>(account_addr);
 aborts_if coin_store.coin.value < amount;
-let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
-let supply = option::spec_borrow(maybe_supply);
-let value = optional_aggregator::optional_aggregator_value(supply);
-let post post_maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+let supply = option::spec_borrow(maybe_supply);
+let value = optional_aggregator::optional_aggregator_value(supply);
+let post post_maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
 let post post_supply = option::spec_borrow(post_maybe_supply);
 let post post_value = optional_aggregator::optional_aggregator_value(post_supply);
 aborts_if option::spec_is_some(maybe_supply) && value < amount;
@@ -2244,7 +2418,8 @@ The value of zero_coin must be 0.
 ### Function `freeze_coin_store`
 
 
-
public entry fun freeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
#[legacy_entry_fun]
+public entry fun freeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
 
@@ -2264,7 +2439,8 @@ The value of zero_coin must be 0. ### Function `unfreeze_coin_store` -
public entry fun unfreeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
#[legacy_entry_fun]
+public entry fun unfreeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
 
@@ -2300,14 +2476,14 @@ The creator of CoinType must be @aptos_framework. let supply_config = global<SupplyConfig>(@aptos_framework); aborts_if !supply_config.allow_upgrades; modifies global<CoinInfo<CoinType>>(account_addr); -let maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply; -let supply = option::spec_borrow(maybe_supply); -let value = optional_aggregator::optional_aggregator_value(supply); -let post post_maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply; +let maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply; +let supply = option::spec_borrow(maybe_supply); +let value = optional_aggregator::optional_aggregator_value(supply); +let post post_maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply; let post post_supply = option::spec_borrow(post_maybe_supply); let post post_value = optional_aggregator::optional_aggregator_value(post_supply); let supply_no_parallel = option::spec_is_some(maybe_supply) && - !optional_aggregator::is_parallelizable(supply); + !optional_aggregator::is_parallelizable(supply); aborts_if supply_no_parallel && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework); ensures supply_no_parallel ==> optional_aggregator::is_parallelizable(post_supply) && post_value == value; @@ -2395,9 +2571,9 @@ Only the creator of CoinType can initialize. }; let account_addr = signer::address_of(account); let post coin_info = global<CoinInfo<CoinType>>(account_addr); -let post supply = option::spec_borrow(coin_info.supply); -let post value = optional_aggregator::optional_aggregator_value(supply); -let post limit = optional_aggregator::optional_aggregator_limit(supply); +let post supply = option::spec_borrow(coin_info.supply); +let post value = optional_aggregator::optional_aggregator_value(supply); +let post limit = optional_aggregator::optional_aggregator_limit(supply); modifies global<CoinInfo<CoinType>>(account_addr); aborts_if monitor_supply && parallelizable && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework); @@ -2407,9 +2583,9 @@ Only the creator of CoinType can initialize. && coin_info.decimals == decimals; ensures if (monitor_supply) { value == 0 && limit == MAX_U128 - && (parallelizable == optional_aggregator::is_parallelizable(supply)) + && (parallelizable == optional_aggregator::is_parallelizable(supply)) } else { - option::spec_is_none(coin_info.supply) + option::spec_is_none(coin_info.supply) }; ensures result_1 == BurnCapability<CoinType> {}; ensures result_2 == FreezeCapability<CoinType> {}; diff --git a/aptos-move/framework/aptos-framework/doc/delegation_pool.md b/aptos-move/framework/aptos-framework/doc/delegation_pool.md index 205790b4df7e8..b969232722c7f 100644 --- a/aptos-move/framework/aptos-framework/doc/delegation_pool.md +++ b/aptos-move/framework/aptos-framework/doc/delegation_pool.md @@ -738,7 +738,8 @@ Scaling factor of shares pools used within the delegation pool Return whether supplied address addr is owner of a delegation pool. -
public fun owner_cap_exists(addr: address): bool
+
#[view]
+public fun owner_cap_exists(addr: address): bool
 
@@ -763,7 +764,8 @@ Return whether supplied address addr is owner of a delegation pool. Return address of the delegation pool owned by owner or fail if there is none. -
public fun get_owned_pool_address(owner: address): address
+
#[view]
+public fun get_owned_pool_address(owner: address): address
 
@@ -789,7 +791,8 @@ Return address of the delegation pool owned by owner or fail if the Return whether a delegation pool exists at supplied address addr. -
public fun delegation_pool_exists(addr: address): bool
+
#[view]
+public fun delegation_pool_exists(addr: address): bool
 
@@ -814,7 +817,8 @@ Return whether a delegation pool exists at supplied address addr. Return the index of current observed lockup cycle on delegation pool pool_address. -
public fun observed_lockup_cycle(pool_address: address): u64
+
#[view]
+public fun observed_lockup_cycle(pool_address: address): u64
 
@@ -840,7 +844,8 @@ Return the index of current observed lockup cycle on delegation pool pool_ Return the operator commission percentage set on the delegation pool pool_address. -
public fun operator_commission_percentage(pool_address: address): u64
+
#[view]
+public fun operator_commission_percentage(pool_address: address): u64
 
@@ -866,7 +871,8 @@ Return the operator commission percentage set on the delegation pool pool_ Return the number of delegators owning active stake within pool_address. -
public fun shareholders_count_active_pool(pool_address: address): u64
+
#[view]
+public fun shareholders_count_active_pool(pool_address: address): u64
 
@@ -893,7 +899,8 @@ Return the stake amounts on pool_address in the different states: (active,inactive,pending_active,pending_inactive) -
public fun get_delegation_pool_stake(pool_address: address): (u64, u64, u64, u64)
+
#[view]
+public fun get_delegation_pool_stake(pool_address: address): (u64, u64, u64, u64)
 
@@ -920,7 +927,8 @@ Return whether the given delegator has any withdrawable stake. If they recently some stake and the stake pool's lockup cycle has not ended, their coins are not withdrawable yet. -
public fun get_pending_withdrawal(pool_address: address, delegator_address: address): (bool, u64)
+
#[view]
+public fun get_pending_withdrawal(pool_address: address, delegator_address: address): (bool, u64)
 
@@ -979,7 +987,8 @@ Return total stake owned by delegator_address within delegation poo in each of its individual states: (active,inactive,pending_inactive) -
public fun get_stake(pool_address: address, delegator_address: address): (u64, u64, u64)
+
#[view]
+public fun get_stake(pool_address: address, delegator_address: address): (u64, u64, u64)
 
@@ -1060,7 +1069,8 @@ for the rewards the remaining stake would have earned if active: extracted-fee = (amount - extracted-fee) * reward-rate% * (100% - operator-commission%) -
public fun get_add_stake_fee(pool_address: address, amount: u64): u64
+
#[view]
+public fun get_add_stake_fee(pool_address: address, amount: u64): u64
 
@@ -1097,7 +1107,8 @@ the delegation pool, implicitly its stake pool, in the special case the validator had gone inactive before its lockup expired. -
public fun can_withdraw_pending_inactive(pool_address: address): bool
+
#[view]
+public fun can_withdraw_pending_inactive(pool_address: address): bool
 
diff --git a/aptos-move/framework/aptos-framework/doc/fungible_asset.md b/aptos-move/framework/aptos-framework/doc/fungible_asset.md index 4116601a5ac6f..55018f1719eb8 100644 --- a/aptos-move/framework/aptos-framework/doc/fungible_asset.md +++ b/aptos-move/framework/aptos-framework/doc/fungible_asset.md @@ -81,7 +81,8 @@ metadata object can be any object that equipped with Supply has key +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Supply has key
 
@@ -115,7 +116,8 @@ metadata object can be any object that equipped with Metadata has key +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Metadata has key
 
@@ -171,7 +173,8 @@ Metadata of a Fungible asset The store object that holds fungible assets of a specific type associated with an account. -
struct FungibleStore has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct FungibleStore has key
 
@@ -210,7 +213,8 @@ The store object that holds fungible assets of a specific type associated with a -
struct FungibleAssetEvents has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct FungibleAssetEvents has key
 
@@ -854,7 +858,8 @@ This can only be called at object creation time as constructor_ref is only avail Get the current supply from the metadata object. -
public fun supply<T: key>(metadata: object::Object<T>): option::Option<u128>
+
#[view]
+public fun supply<T: key>(metadata: object::Object<T>): option::Option<u128>
 
@@ -885,7 +890,8 @@ Get the current supply from the metadata object. Get the maximum supply from the metadata object. -
public fun maximum<T: key>(metadata: object::Object<T>): option::Option<u128>
+
#[view]
+public fun maximum<T: key>(metadata: object::Object<T>): option::Option<u128>
 
@@ -916,7 +922,8 @@ Get the maximum supply from the metadata object. Get the name of the fungible asset from the metadata object. -
public fun name<T: key>(metadata: object::Object<T>): string::String
+
#[view]
+public fun name<T: key>(metadata: object::Object<T>): string::String
 
@@ -941,7 +948,8 @@ Get the name of the fungible asset from the metadata object. Get the symbol of the fungible asset from the metadata object. -
public fun symbol<T: key>(metadata: object::Object<T>): string::String
+
#[view]
+public fun symbol<T: key>(metadata: object::Object<T>): string::String
 
@@ -966,7 +974,8 @@ Get the symbol of the fungible asset from the metadata object. Get the decimals from the metadata object. -
public fun decimals<T: key>(metadata: object::Object<T>): u8
+
#[view]
+public fun decimals<T: key>(metadata: object::Object<T>): u8
 
@@ -991,7 +1000,8 @@ Get the decimals from the metadata object. Return whether the provided address has a store initialized. -
public fun store_exists(store: address): bool
+
#[view]
+public fun store_exists(store: address): bool
 
@@ -1041,7 +1051,8 @@ Return the underlying metadata object Return the underlying metadata object. -
public fun store_metadata<T: key>(store: object::Object<T>): object::Object<fungible_asset::Metadata>
+
#[view]
+public fun store_metadata<T: key>(store: object::Object<T>): object::Object<fungible_asset::Metadata>
 
@@ -1091,7 +1102,8 @@ Return the amount of a given fungible asset. Get the balance of a given store. -
public fun balance<T: key>(store: object::Object<T>): u64
+
#[view]
+public fun balance<T: key>(store: object::Object<T>): u64
 
@@ -1122,7 +1134,8 @@ Return whether a store is frozen. If the store has not been created, we default to returning false so deposits can be sent to it. -
public fun is_frozen<T: key>(store: object::Object<T>): bool
+
#[view]
+public fun is_frozen<T: key>(store: object::Object<T>): bool
 
diff --git a/aptos-move/framework/aptos-framework/doc/gas_schedule.md b/aptos-move/framework/aptos-framework/doc/gas_schedule.md index 40d4c1bf1873b..4880dc4be03e6 100644 --- a/aptos-move/framework/aptos-framework/doc/gas_schedule.md +++ b/aptos-move/framework/aptos-framework/doc/gas_schedule.md @@ -318,7 +318,7 @@ This can be called by on-chain governance to update the gas schedule. -
pragma timeout = 100;
+
pragma verify_duration_estimate = 200;
 requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
 include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
diff --git a/aptos-move/framework/aptos-framework/doc/genesis.md b/aptos-move/framework/aptos-framework/doc/genesis.md
index 75043570e080b..38011baa60573 100644
--- a/aptos-move/framework/aptos-framework/doc/genesis.md
+++ b/aptos-move/framework/aptos-framework/doc/genesis.md
@@ -444,12 +444,9 @@ Only called for testnets and e2e tests.
 
 
 
fun create_accounts(aptos_framework: &signer, accounts: vector<AccountMap>) {
-    let i = 0;
-    let num_accounts = vector::length(&accounts);
     let unique_accounts = vector::empty();
-
-    while (i < num_accounts) {
-        let account_map = vector::borrow(&accounts, i);
+    vector::for_each_ref(&accounts, |account_map| {
+        let account_map: &AccountMap = account_map;
         assert!(
             !vector::contains(&unique_accounts, &account_map.account_address),
             error::already_exists(EDUPLICATE_ACCOUNT),
@@ -461,9 +458,7 @@ Only called for testnets and e2e tests.
             account_map.account_address,
             account_map.balance,
         );
-
-        i = i + 1;
-    };
+    });
 }
 
@@ -524,13 +519,11 @@ If it exists, it just returns the signer. employee_vesting_period_duration: u64, employees: vector<EmployeeAccountMap>, ) { - let i = 0; - let num_employee_groups = vector::length(&employees); let unique_accounts = vector::empty(); - while (i < num_employee_groups) { + vector::for_each_ref(&employees, |employee_group| { let j = 0; - let employee_group = vector::borrow(&employees, i); + let employee_group: &EmployeeAccountMap = employee_group; let num_employees_in_group = vector::length(&employee_group.accounts); let buy_ins = simple_map::create(); @@ -604,9 +597,7 @@ If it exists, it just returns the signer. if (employee_group.validator.join_during_genesis) { initialize_validator(pool_address, validator); }; - - i = i + 1; - } + }); }
@@ -634,14 +625,10 @@ If it exists, it just returns the signer. use_staking_contract: bool, validators: vector<ValidatorConfigurationWithCommission>, ) { - let i = 0; - let num_validators = vector::length(&validators); - while (i < num_validators) { - let validator = vector::borrow(&validators, i); + vector::for_each_ref(&validators, |validator| { + let validator: &ValidatorConfigurationWithCommission = validator; create_initialize_validator(aptos_framework, validator, use_staking_contract); - - i = i + 1; - }; + }); // Destroy the aptos framework account's ability to mint coins now that we're done with setting up the initial // validators. @@ -681,21 +668,15 @@ encoded in a single BCS byte array.
fun create_initialize_validators(aptos_framework: &signer, validators: vector<ValidatorConfiguration>) {
-    let i = 0;
-    let num_validators = vector::length(&validators);
-
     let validators_with_commission = vector::empty();
-
-    while (i < num_validators) {
+    vector::for_each_reverse(validators, |validator| {
         let validator_with_commission = ValidatorConfigurationWithCommission {
-            validator_config: vector::pop_back(&mut validators),
+            validator_config: validator,
             commission_percentage: 0,
             join_during_genesis: true,
         };
         vector::push_back(&mut validators_with_commission, validator_with_commission);
-
-        i = i + 1;
-    };
+    });
 
     create_initialize_validators_with_commission(aptos_framework, false, validators_with_commission);
 }
@@ -831,7 +812,8 @@ The last step of genesis.
 
 
 
-
fun initialize_for_verification(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64, aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64, accounts: vector<genesis::AccountMap>, employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>, validators: vector<genesis::ValidatorConfigurationWithCommission>)
+
#[verify_only]
+fun initialize_for_verification(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64, aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64, accounts: vector<genesis::AccountMap>, employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>, validators: vector<genesis::ValidatorConfigurationWithCommission>)
 
@@ -914,7 +896,8 @@ The last step of genesis. ### Function `initialize_for_verification` -
fun initialize_for_verification(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64, aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64, accounts: vector<genesis::AccountMap>, employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>, validators: vector<genesis::ValidatorConfigurationWithCommission>)
+
#[verify_only]
+fun initialize_for_verification(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64, aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64, accounts: vector<genesis::AccountMap>, employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>, validators: vector<genesis::ValidatorConfigurationWithCommission>)
 
diff --git a/aptos-move/framework/aptos-framework/doc/multisig_account.md b/aptos-move/framework/aptos-framework/doc/multisig_account.md index b4639d3dafd53..8d890f7668f64 100644 --- a/aptos-move/framework/aptos-framework/doc/multisig_account.md +++ b/aptos-move/framework/aptos-framework/doc/multisig_account.md @@ -71,13 +71,16 @@ and implement the governance voting logic on top. - [Function `create_with_existing_account`](#0x1_multisig_account_create_with_existing_account) - [Function `create`](#0x1_multisig_account_create) - [Function `create_with_owners`](#0x1_multisig_account_create_with_owners) +- [Function `create_with_owners_then_remove_bootstrapper`](#0x1_multisig_account_create_with_owners_then_remove_bootstrapper) - [Function `create_with_owners_internal`](#0x1_multisig_account_create_with_owners_internal) - [Function `add_owner`](#0x1_multisig_account_add_owner) - [Function `add_owners`](#0x1_multisig_account_add_owners) - [Function `add_owners_and_update_signatures_required`](#0x1_multisig_account_add_owners_and_update_signatures_required) - [Function `remove_owner`](#0x1_multisig_account_remove_owner) - [Function `remove_owners`](#0x1_multisig_account_remove_owners) -- [Function `remove_owners_and_update_signatures_required`](#0x1_multisig_account_remove_owners_and_update_signatures_required) +- [Function `swap_owner`](#0x1_multisig_account_swap_owner) +- [Function `swap_owners`](#0x1_multisig_account_swap_owners) +- [Function `swap_owners_and_update_signatures_required`](#0x1_multisig_account_swap_owners_and_update_signatures_required) - [Function `update_signatures_required`](#0x1_multisig_account_update_signatures_required) - [Function `update_metadata`](#0x1_multisig_account_update_metadata) - [Function `update_metadata_internal`](#0x1_multisig_account_update_metadata_internal) @@ -98,6 +101,7 @@ and implement the governance voting logic on top. - [Function `assert_is_owner`](#0x1_multisig_account_assert_is_owner) - [Function `num_approvals_and_rejections`](#0x1_multisig_account_num_approvals_and_rejections) - [Function `assert_multisig_account_exists`](#0x1_multisig_account_assert_multisig_account_exists) +- [Function `update_owner_schema`](#0x1_multisig_account_update_owner_schema)
use 0x1::account;
@@ -865,6 +869,16 @@ The number of metadata keys and values don't match.
 
 
 
+
+
+Provided owners to remove and new owners overlap.
+
+
+
const EOWNERS_TO_REMOVE_NEW_OWNERS_OVERLAP: u64 = 18;
+
+ + + The multisig account itself cannot be an owner. @@ -912,7 +926,8 @@ Transaction with specified id cannot be found. Return the multisig account's metadata. -
public fun metadata(multisig_account: address): simple_map::SimpleMap<string::String, vector<u8>>
+
#[view]
+public fun metadata(multisig_account: address): simple_map::SimpleMap<string::String, vector<u8>>
 
@@ -938,7 +953,8 @@ Return the number of signatures required to execute or execute-reject a transact multisig account. -
public fun num_signatures_required(multisig_account: address): u64
+
#[view]
+public fun num_signatures_required(multisig_account: address): u64
 
@@ -963,7 +979,8 @@ multisig account. Return a vector of all of the provided multisig account's owners. -
public fun owners(multisig_account: address): vector<address>
+
#[view]
+public fun owners(multisig_account: address): vector<address>
 
@@ -988,7 +1005,8 @@ Return a vector of all of the provided multisig account's owners. Return the transaction with the given transaction id. -
public fun get_transaction(multisig_account: address, sequence_number: u64): multisig_account::MultisigTransaction
+
#[view]
+public fun get_transaction(multisig_account: address, sequence_number: u64): multisig_account::MultisigTransaction
 
@@ -1021,7 +1039,8 @@ Return the transaction with the given transaction id. Return all pending transactions. -
public fun get_pending_transactions(multisig_account: address): vector<multisig_account::MultisigTransaction>
+
#[view]
+public fun get_pending_transactions(multisig_account: address): vector<multisig_account::MultisigTransaction>
 
@@ -1054,7 +1073,8 @@ Return all pending transactions. Return the payload for the next transaction in the queue. -
public fun get_next_transaction_payload(multisig_account: address, provided_payload: vector<u8>): vector<u8>
+
#[view]
+public fun get_next_transaction_payload(multisig_account: address, provided_payload: vector<u8>): vector<u8>
 
@@ -1088,7 +1108,8 @@ Return the payload for the next transaction in the queue. Return true if the transaction with given transaction id can be executed now. -
public fun can_be_executed(multisig_account: address, sequence_number: u64): bool
+
#[view]
+public fun can_be_executed(multisig_account: address, sequence_number: u64): bool
 
@@ -1122,7 +1143,8 @@ Return true if the transaction with given transaction id can be executed now. Return true if the transaction with given transaction id can be officially rejected. -
public fun can_be_rejected(multisig_account: address, sequence_number: u64): bool
+
#[view]
+public fun can_be_rejected(multisig_account: address, sequence_number: u64): bool
 
@@ -1156,7 +1178,8 @@ Return true if the transaction with given transaction id can be officially rejec Return the predicted address for the next multisig account if created from the given creator address. -
public fun get_next_multisig_account_address(creator: address): address
+
#[view]
+public fun get_next_multisig_account_address(creator: address): address
 
@@ -1182,7 +1205,8 @@ Return the predicted address for the next multisig account if created from the g Return the id of the last transaction that was executed (successful or failed) or removed. -
public fun last_resolved_sequence_number(multisig_account: address): u64
+
#[view]
+public fun last_resolved_sequence_number(multisig_account: address): u64
 
@@ -1208,7 +1232,8 @@ Return the id of the last transaction that was executed (successful or failed) o Return the id of the next transaction created. -
public fun next_sequence_number(multisig_account: address): u64
+
#[view]
+public fun next_sequence_number(multisig_account: address): u64
 
@@ -1234,7 +1259,8 @@ Return the id of the next transaction created. Return a bool tuple indicating whether an owner has voted and if so, whether they voted yes or no. -
public fun vote(multisig_account: address, sequence_number: u64, owner: address): (bool, bool)
+
#[view]
+public fun vote(multisig_account: address, sequence_number: u64, owner: address): (bool, bool)
 
@@ -1401,6 +1427,53 @@ at most the total number of owners. + + + + +## Function `create_with_owners_then_remove_bootstrapper` + +Like create_with_owners, but removes the calling account after creation. + +This is for creating a vanity multisig account from a bootstrapping account that should not +be an owner after the vanity multisig address has been secured. + + +
public entry fun create_with_owners_then_remove_bootstrapper(bootstrapper: &signer, owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create_with_owners_then_remove_bootstrapper(
+    bootstrapper: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    let bootstrapper_address = address_of(bootstrapper);
+    create_with_owners(
+        bootstrapper,
+        owners,
+        num_signatures_required,
+        metadata_keys,
+        metadata_values
+    );
+    update_owner_schema(
+        get_next_multisig_account_address(bootstrapper_address),
+        vector[],
+        vector[bootstrapper_address],
+        option::none()
+    );
+}
+
+ + +
@@ -1510,22 +1583,12 @@ maliciously alter the owners list.
entry fun add_owners(
     multisig_account: &signer, new_owners: vector<address>) acquires MultisigAccount {
-    // Short circuit if new owners list is empty.
-    // This avoids emitting an event if no changes happen, which is confusing to off-chain components.
-    if (vector::length(&new_owners) == 0) {
-        return
-    };
-
-    let multisig_address = address_of(multisig_account);
-    assert_multisig_account_exists(multisig_address);
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_address);
-
-    vector::append(&mut multisig_account_resource.owners, new_owners);
-    // This will fail if an existing owner is added again.
-    validate_owners(&multisig_account_resource.owners, multisig_address);
-    emit_event(&mut multisig_account_resource.add_owners_events, AddOwnersEvent {
-        owners_added: new_owners,
-    });
+    update_owner_schema(
+        address_of(multisig_account),
+        new_owners,
+        vector[],
+        option::none()
+    );
 }
 
@@ -1554,8 +1617,12 @@ Add owners then update number of signatures required, in a single operation. new_owners: vector<address>, new_num_signatures_required: u64 ) acquires MultisigAccount { - add_owners(multisig_account, new_owners); - update_signatures_required(multisig_account, new_num_signatures_required); + update_owner_schema( + address_of(multisig_account), + new_owners, + vector[], + option::some(new_num_signatures_required) + ); }
@@ -1613,36 +1680,80 @@ maliciously alter the owners list.
entry fun remove_owners(
     multisig_account: &signer, owners_to_remove: vector<address>) acquires MultisigAccount {
-    // Short circuit if the list of owners to remove is empty.
-    // This avoids emitting an event if no changes happen, which is confusing to off-chain components.
-    if (vector::length(&owners_to_remove) == 0) {
-        return
-    };
+    update_owner_schema(
+        address_of(multisig_account),
+        vector[],
+        owners_to_remove,
+        option::none()
+    );
+}
+
- let multisig_address = address_of(multisig_account); - assert_multisig_account_exists(multisig_address); - let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_address); - let owners = &mut multisig_account_resource.owners; - let owners_removed = vector::empty<address>(); - vector::for_each_ref(&owners_to_remove, |owner_to_remove| { - let owner_to_remove = *owner_to_remove; - let (found, index) = vector::index_of(owners, &owner_to_remove); - // Only remove an owner if they're present in the owners list. - if (found) { - vector::push_back(&mut owners_removed, owner_to_remove); - vector::swap_remove(owners, index); - }; - }); - // Make sure there's still at least as many owners as the number of signatures required. - // This also ensures that there's at least one owner left as signature threshold must be > 0. - assert!( - vector::length(owners) >= multisig_account_resource.num_signatures_required, - error::invalid_state(ENOT_ENOUGH_OWNERS), + + + + +## Function `swap_owner` + +Swap an owner in for an old one, without changing required signatures. + + +
entry fun swap_owner(multisig_account: &signer, to_swap_in: address, to_swap_out: address)
+
+ + + +
+Implementation + + +
entry fun swap_owner(
+    multisig_account: &signer,
+    to_swap_in: address,
+    to_swap_out: address
+) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        vector[to_swap_in],
+        vector[to_swap_out],
+        option::none()
     );
+}
+
+ + + +
+ + + +## Function `swap_owners` + +Swap owners in and out, without changing required signatures. + - emit_event(&mut multisig_account_resource.remove_owners_events, RemoveOwnersEvent { owners_removed }); +
entry fun swap_owners(multisig_account: &signer, to_swap_in: vector<address>, to_swap_out: vector<address>)
+
+ + + +
+Implementation + + +
entry fun swap_owners(
+    multisig_account: &signer,
+    to_swap_in: vector<address>,
+    to_swap_out: vector<address>
+) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        to_swap_in,
+        to_swap_out,
+        option::none()
+    );
 }
 
@@ -1650,14 +1761,14 @@ maliciously alter the owners list.
- + -## Function `remove_owners_and_update_signatures_required` +## Function `swap_owners_and_update_signatures_required` -Update the number of signatures required then remove owners, in a single operation. +Swap owners in and out, updating number of required signatures. -
entry fun remove_owners_and_update_signatures_required(multisig_account: &signer, owners_to_remove: vector<address>, new_num_signatures_required: u64)
+
entry fun swap_owners_and_update_signatures_required(multisig_account: &signer, new_owners: vector<address>, owners_to_remove: vector<address>, new_num_signatures_required: u64)
 
@@ -1666,13 +1777,18 @@ Update the number of signatures required then remove owners, in a single operati Implementation -
entry fun remove_owners_and_update_signatures_required(
+
entry fun swap_owners_and_update_signatures_required(
     multisig_account: &signer,
+    new_owners: vector<address>,
     owners_to_remove: vector<address>,
     new_num_signatures_required: u64
 ) acquires MultisigAccount {
-    update_signatures_required(multisig_account, new_num_signatures_required);
-    remove_owners(multisig_account, owners_to_remove);
+    update_owner_schema(
+        address_of(multisig_account),
+        new_owners,
+        owners_to_remove,
+        option::some(new_num_signatures_required)
+    );
 }
 
@@ -1703,28 +1819,11 @@ maliciously alter the number of signatures required.
entry fun update_signatures_required(
     multisig_account: &signer, new_num_signatures_required: u64) acquires MultisigAccount {
-    let multisig_address = address_of(multisig_account);
-    assert_multisig_account_exists(multisig_address);
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_address);
-    // Short-circuit if the new number of signatures required is the same as before.
-    // This avoids emitting an event.
-    if (multisig_account_resource.num_signatures_required == new_num_signatures_required) {
-        return
-    };
-    let num_owners = vector::length(&multisig_account_resource.owners);
-    assert!(
-        new_num_signatures_required > 0 && new_num_signatures_required <= num_owners,
-        error::invalid_argument(EINVALID_SIGNATURES_REQUIRED),
-    );
-
-    let old_num_signatures_required = multisig_account_resource.num_signatures_required;
-    multisig_account_resource.num_signatures_required = new_num_signatures_required;
-    emit_event(
-        &mut multisig_account_resource.update_signature_required_events,
-        UpdateSignaturesRequiredEvent {
-            old_num_signatures_required,
-            new_num_signatures_required,
-        }
+    update_owner_schema(
+        address_of(multisig_account),
+        vector[],
+        vector[],
+        option::some(new_num_signatures_required)
     );
 }
 
@@ -2442,6 +2541,108 @@ This function is private so no other code can call this beside the VM itself as + + + + +## Function `update_owner_schema` + +Add new owners, remove owners to remove, update signatures required. + + +
fun update_owner_schema(multisig_address: address, new_owners: vector<address>, owners_to_remove: vector<address>, optional_new_num_signatures_required: option::Option<u64>)
+
+ + + +
+Implementation + + +
fun update_owner_schema(
+    multisig_address: address,
+    new_owners: vector<address>,
+    owners_to_remove: vector<address>,
+    optional_new_num_signatures_required: Option<u64>,
+) acquires MultisigAccount {
+    assert_multisig_account_exists(multisig_address);
+    let multisig_account_ref_mut =
+        borrow_global_mut<MultisigAccount>(multisig_address);
+    // Verify no overlap between new owners and owners to remove.
+    vector::for_each_ref(&new_owners, |new_owner_ref| {
+        assert!(
+            !vector::contains(&owners_to_remove, new_owner_ref),
+            error::invalid_argument(EOWNERS_TO_REMOVE_NEW_OWNERS_OVERLAP)
+        )
+    });
+    // If new owners provided, try to add them and emit an event.
+    if (vector::length(&new_owners) > 0) {
+        vector::append(&mut multisig_account_ref_mut.owners, new_owners);
+        validate_owners(
+            &multisig_account_ref_mut.owners,
+            multisig_address
+        );
+        emit_event(
+            &mut multisig_account_ref_mut.add_owners_events,
+            AddOwnersEvent { owners_added: new_owners }
+        );
+    };
+    // If owners to remove provided, try to remove them.
+    if (vector::length(&owners_to_remove) > 0) {
+        let owners_ref_mut = &mut multisig_account_ref_mut.owners;
+        let owners_removed = vector[];
+        vector::for_each_ref(&owners_to_remove, |owner_to_remove_ref| {
+            let (found, index) =
+                vector::index_of(owners_ref_mut, owner_to_remove_ref);
+            if (found) {
+                vector::push_back(
+                    &mut owners_removed,
+                    vector::swap_remove(owners_ref_mut, index)
+                );
+            }
+        });
+        // Only emit event if owner(s) actually removed.
+        if (vector::length(&owners_removed) > 0) {
+            emit_event(
+                &mut multisig_account_ref_mut.remove_owners_events,
+                RemoveOwnersEvent { owners_removed }
+            );
+        }
+    };
+    // If new signature count provided, try to update count.
+    if (option::is_some(&optional_new_num_signatures_required)) {
+        let new_num_signatures_required =
+            option::extract(&mut optional_new_num_signatures_required);
+        assert!(
+            new_num_signatures_required > 0,
+            error::invalid_argument(EINVALID_SIGNATURES_REQUIRED)
+        );
+        let old_num_signatures_required =
+            multisig_account_ref_mut.num_signatures_required;
+        // Only apply update and emit event if a change indicated.
+        if (new_num_signatures_required != old_num_signatures_required) {
+            multisig_account_ref_mut.num_signatures_required =
+                new_num_signatures_required;
+            emit_event(
+                &mut multisig_account_ref_mut.update_signature_required_events,
+                UpdateSignaturesRequiredEvent {
+                    old_num_signatures_required,
+                    new_num_signatures_required,
+                }
+            );
+        }
+    };
+    // Verify number of owners.
+    let num_owners = vector::length(&multisig_account_ref_mut.owners);
+    assert!(
+        num_owners >= multisig_account_ref_mut.num_signatures_required,
+        error::invalid_state(ENOT_ENOUGH_OWNERS)
+    );
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-framework/doc/object.md b/aptos-move/framework/aptos-framework/doc/object.md index 520ed1c935a9c..99ceda65c242d 100644 --- a/aptos-move/framework/aptos-framework/doc/object.md +++ b/aptos-move/framework/aptos-framework/doc/object.md @@ -99,7 +99,8 @@ make it so that a reference to a global object can be returned from a function. The core of the object model that defines ownership, transferability, and events. -
struct ObjectCore has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct ObjectCore has key
 
@@ -146,7 +147,8 @@ The core of the object model that defines ownership, transferability, and events A shared resource group for storing object resources together in storage. -
struct ObjectGroup
+
#[resource_group(#[scope = global])]
+struct ObjectGroup
 
diff --git a/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md b/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md index d7b91809831e3..b3666cc5065ee 100644 --- a/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md +++ b/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md @@ -51,7 +51,8 @@ stores for users with deterministic addresses so that users can easily deposit/w assets. -
struct DeriveRefPod has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct DeriveRefPod has key
 
@@ -195,7 +196,8 @@ Create a primary store object to hold fungible asset for the given address. Get the address of the primary store for the given account. -
public fun primary_store_address<T: key>(owner: address, metadata: object::Object<T>): address
+
#[view]
+public fun primary_store_address<T: key>(owner: address, metadata: object::Object<T>): address
 
@@ -221,7 +223,8 @@ Get the address of the primary store for the given account. Get the primary store object for the given account. -
public fun primary_store<T: key>(owner: address, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
+
#[view]
+public fun primary_store<T: key>(owner: address, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
 
@@ -247,7 +250,8 @@ Get the primary store object for the given account. Return whether the given account's primary store exists. -
public fun primary_store_exists<T: key>(account: address, metadata: object::Object<T>): bool
+
#[view]
+public fun primary_store_exists<T: key>(account: address, metadata: object::Object<T>): bool
 
@@ -272,7 +276,8 @@ Return whether the given account's primary store exists. Get the balance of account's primary store. -
public fun balance<T: key>(account: address, metadata: object::Object<T>): u64
+
#[view]
+public fun balance<T: key>(account: address, metadata: object::Object<T>): u64
 
@@ -301,7 +306,8 @@ Get the balance of account's p Return whether the given account's primary store is frozen. -
public fun is_frozen<T: key>(account: address, metadata: object::Object<T>): bool
+
#[view]
+public fun is_frozen<T: key>(account: address, metadata: object::Object<T>): bool
 
diff --git a/aptos-move/framework/aptos-framework/doc/stake.md b/aptos-move/framework/aptos-framework/doc/stake.md index 2e32dfd15cda9..f829d2a58aceb 100644 --- a/aptos-move/framework/aptos-framework/doc/stake.md +++ b/aptos-move/framework/aptos-framework/doc/stake.md @@ -105,6 +105,7 @@ or if their stake drops below the min required, they would get removed at the en - [Function `is_allowed`](#0x1_stake_is_allowed) - [Function `assert_owner_cap_exists`](#0x1_stake_assert_owner_cap_exists) - [Specification](#@Specification_1) + - [Function `initialize_validator_fees`](#@Specification_1_initialize_validator_fees) - [Function `add_transaction_fee`](#@Specification_1_add_transaction_fee) - [Function `get_validator_state`](#@Specification_1_get_validator_state) - [Function `initialize`](#@Specification_1_initialize) @@ -1372,7 +1373,8 @@ Return the lockup expiration of the stake pool at pool_address. This will throw an error if there's no stake pool at pool_address. -
public fun get_lockup_secs(pool_address: address): u64
+
#[view]
+public fun get_lockup_secs(pool_address: address): u64
 
@@ -1399,7 +1401,8 @@ Return the remaining lockup of the stake pool at pool_address. This will throw an error if there's no stake pool at pool_address. -
public fun get_remaining_lockup_secs(pool_address: address): u64
+
#[view]
+public fun get_remaining_lockup_secs(pool_address: address): u64
 
@@ -1431,7 +1434,8 @@ Return the different stake amounts for pool_address (whether the va The returned amounts are for (active, inactive, pending_active, pending_inactive) stake respectively. -
public fun get_stake(pool_address: address): (u64, u64, u64, u64)
+
#[view]
+public fun get_stake(pool_address: address): (u64, u64, u64, u64)
 
@@ -1463,7 +1467,8 @@ The returned amounts are for (active, inactive, pending_active, pending_inactive Returns the validator's state. -
public fun get_validator_state(pool_address: address): u64
+
#[view]
+public fun get_validator_state(pool_address: address): u64
 
@@ -1498,7 +1503,8 @@ Return the voting power of the validator in the current epoch. This is the same as the validator's total active and pending_inactive stake. -
public fun get_current_epoch_voting_power(pool_address: address): u64
+
#[view]
+public fun get_current_epoch_voting_power(pool_address: address): u64
 
@@ -1532,7 +1538,8 @@ This is the same as the validator's total active and pending_inactive stake. Return the delegated voter of the validator at pool_address. -
public fun get_delegated_voter(pool_address: address): address
+
#[view]
+public fun get_delegated_voter(pool_address: address): address
 
@@ -1558,7 +1565,8 @@ Return the delegated voter of the validator at pool_address. Return the operator of the validator at pool_address. -
public fun get_operator(pool_address: address): address
+
#[view]
+public fun get_operator(pool_address: address): address
 
@@ -1609,7 +1617,8 @@ Return the pool address in owner_cap. Return the validator index for pool_address. -
public fun get_validator_index(pool_address: address): u64
+
#[view]
+public fun get_validator_index(pool_address: address): u64
 
@@ -1635,7 +1644,8 @@ Return the validator index for pool_address. Return the number of successful and failed proposals for the proposal at the given validator index. -
public fun get_current_epoch_proposal_counts(validator_index: u64): (u64, u64)
+
#[view]
+public fun get_current_epoch_proposal_counts(validator_index: u64): (u64, u64)
 
@@ -1662,7 +1672,8 @@ Return the number of successful and failed proposals for the proposal at the giv Return the validator's config. -
public fun get_validator_config(pool_address: address): (vector<u8>, vector<u8>, vector<u8>)
+
#[view]
+public fun get_validator_config(pool_address: address): (vector<u8>, vector<u8>, vector<u8>)
 
@@ -1688,7 +1699,8 @@ Return the validator's config. -
public fun stake_pool_exists(addr: address): bool
+
#[view]
+public fun stake_pool_exists(addr: address): bool
 
@@ -2960,23 +2972,17 @@ power. let validator_perf = borrow_global_mut<ValidatorPerformance>(@aptos_framework); // Process pending stake and distribute transaction fees and rewards for each currently active validator. - let i = 0; - let len = vector::length(&validator_set.active_validators); - while (i < len) { - let validator = vector::borrow(&validator_set.active_validators, i); + vector::for_each_ref(&validator_set.active_validators, |validator| { + let validator: &ValidatorInfo = validator; update_stake_pool(validator_perf, validator.addr, &config); - i = i + 1; - }; + }); // Process pending stake and distribute transaction fees and rewards for each currently pending_inactive validator // (requested to leave but not removed yet). - let i = 0; - let len = vector::length(&validator_set.pending_inactive); - while (i < len) { - let validator = vector::borrow(&validator_set.pending_inactive, i); + vector::for_each_ref(&validator_set.pending_inactive, |validator| { + let validator: &ValidatorInfo = validator; update_stake_pool(validator_perf, validator.addr, &config); - i = i + 1; - }; + }); // Activate currently pending_active validators. append(&mut validator_set.active_validators, &mut validator_set.pending_active); @@ -3545,6 +3551,140 @@ Returns validator's next epoch voting power, including pending_active, active, a + + + + +
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
+   forall i in 0..len(validators):
+       spec_has_stake_pool(validators[i].addr) &&
+           spec_has_validator_config(validators[i].addr)
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
+   forall i in 0..len(validators):
+       global<ValidatorConfig>(validators[i].addr).validator_index < spec_validator_index_upper_bound()
+}
+
+ + + + + + + +
fun spec_validator_index_upper_bound(): u64 {
+   len(global<ValidatorPerformance>(@aptos_framework).validators)
+}
+
+ + + + + + + +
fun spec_has_stake_pool(a: address): bool {
+   exists<StakePool>(a)
+}
+
+ + + + + + + +
fun spec_has_validator_config(a: address): bool {
+   exists<ValidatorConfig>(a)
+}
+
+ + + + + + + +
fun spec_rewards_amount(
+   stake_amount: u64,
+   num_successful_proposals: u64,
+   num_total_proposals: u64,
+   rewards_rate: u64,
+   rewards_rate_denominator: u64,
+): u64;
+
+ + + + + + + +
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
+   exists i in 0..len(validators): validators[i].addr == addr
+}
+
+ + + + + + + +
fun spec_is_current_epoch_validator(pool_address: address): bool {
+   let validator_set = global<ValidatorSet>(@aptos_framework);
+   !spec_contains(validator_set.pending_active, pool_address)
+       && (spec_contains(validator_set.active_validators, pool_address)
+       || spec_contains(validator_set.pending_inactive, pool_address))
+}
+
+ + + + + + + +
schema ResourceRequirement {
+    requires exists<AptosCoinCapabilities>(@aptos_framework);
+    requires exists<ValidatorPerformance>(@aptos_framework);
+    requires exists<ValidatorSet>(@aptos_framework);
+    requires exists<StakingConfig>(@aptos_framework);
+    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    requires exists<ValidatorFees>(@aptos_framework);
+}
+
+ + + + + +### Function `initialize_validator_fees` + + +
public(friend) fun initialize_validator_fees(aptos_framework: &signer)
+
+ + + + +
let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<ValidatorFees>(aptos_addr);
+ensures exists<ValidatorFees>(aptos_addr);
+
+ + + ### Function `add_transaction_fee` @@ -3566,7 +3706,8 @@ Returns validator's next epoch voting power, including pending_active, active, a ### Function `get_validator_state` -
public fun get_validator_state(pool_address: address): u64
+
#[view]
+public fun get_validator_state(pool_address: address): u64
 
@@ -4110,119 +4251,4 @@ Returns validator's next epoch voting power, including pending_active, active, a
- - - - - -
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
-   forall i in 0..len(validators):
-       spec_has_stake_pool(validators[i].addr) &&
-           spec_has_validator_config(validators[i].addr)
-}
-
- - - - - - - -
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
-   forall i in 0..len(validators):
-       global<ValidatorConfig>(validators[i].addr).validator_index < spec_validator_index_upper_bound()
-}
-
- - - - - - - -
fun spec_validator_index_upper_bound(): u64 {
-   len(global<ValidatorPerformance>(@aptos_framework).validators)
-}
-
- - - - - - - -
fun spec_has_stake_pool(a: address): bool {
-   exists<StakePool>(a)
-}
-
- - - - - - - -
fun spec_has_validator_config(a: address): bool {
-   exists<ValidatorConfig>(a)
-}
-
- - - - - - - -
fun spec_rewards_amount(
-   stake_amount: u64,
-   num_successful_proposals: u64,
-   num_total_proposals: u64,
-   rewards_rate: u64,
-   rewards_rate_denominator: u64,
-): u64;
-
- - - - - - - -
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
-   exists i in 0..len(validators): validators[i].addr == addr
-}
-
- - - - - - - -
fun spec_is_current_epoch_validator(pool_address: address): bool {
-   let validator_set = global<ValidatorSet>(@aptos_framework);
-   !spec_contains(validator_set.pending_active, pool_address)
-       && (spec_contains(validator_set.active_validators, pool_address)
-       || spec_contains(validator_set.pending_inactive, pool_address))
-}
-
- - - - - - - -
schema ResourceRequirement {
-    requires exists<AptosCoinCapabilities>(@aptos_framework);
-    requires exists<ValidatorPerformance>(@aptos_framework);
-    requires exists<ValidatorSet>(@aptos_framework);
-    requires exists<StakingConfig>(@aptos_framework);
-    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
-    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
-    requires exists<ValidatorFees>(@aptos_framework);
-}
-
- - [move-book]: https://aptos.dev/guides/move-guides/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/staking_contract.md b/aptos-move/framework/aptos-framework/doc/staking_contract.md index 81728ee607b9e..6b096895e48b8 100644 --- a/aptos-move/framework/aptos-framework/doc/staking_contract.md +++ b/aptos-move/framework/aptos-framework/doc/staking_contract.md @@ -123,7 +123,8 @@ pool. -
struct StakingGroupContainer
+
#[resource_group(#[scope = module_])]
+struct StakingGroupContainer
 
@@ -333,7 +334,8 @@ pool. -
struct StakingGroupUpdateCommissionEvent has key
+
#[resource_group_member(#[group = 0x1::staking_contract::StakingGroupContainer])]
+struct StakingGroupUpdateCommissionEvent has key
 
@@ -849,7 +851,8 @@ operator. This errors out the staking contract with the provided staker and operator doesn't exist. -
public fun stake_pool_address(staker: address, operator: address): address
+
#[view]
+public fun stake_pool_address(staker: address, operator: address): address
 
@@ -879,7 +882,8 @@ for staking contract between the provided staker and operator. This errors out the staking contract with the provided staker and operator doesn't exist. -
public fun last_recorded_principal(staker: address, operator: address): u64
+
#[view]
+public fun last_recorded_principal(staker: address, operator: address): u64
 
@@ -909,7 +913,8 @@ between the provided staker and operator. This errors out the staking contract with the provided staker and operator doesn't exist. -
public fun commission_percentage(staker: address, operator: address): u64
+
#[view]
+public fun commission_percentage(staker: address, operator: address): u64
 
@@ -941,7 +946,8 @@ Return a tuple of three numbers: This errors out the staking contract with the provided staker and operator doesn't exist. -
public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64)
+
#[view]
+public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64)
 
@@ -971,7 +977,8 @@ Return the number of pending distributions (e.g. commission, withdrawals from st This errors out the staking contract with the provided staker and operator doesn't exist. -
public fun pending_distribution_counts(staker: address, operator: address): u64
+
#[view]
+public fun pending_distribution_counts(staker: address, operator: address): u64
 
@@ -998,7 +1005,8 @@ This errors out the staking contract with the provided staker and operator doesn Return true if the staking contract between the provided staker and operator exists. -
public fun staking_contract_exists(staker: address, operator: address): bool
+
#[view]
+public fun staking_contract_exists(staker: address, operator: address): bool
 
@@ -1866,10 +1874,8 @@ Calculate accumulated rewards and commissions since last update. // Charge all stakeholders (except for the operator themselves) commission on any rewards earnt relatively to the // previous value of the distribution pool. let shareholders = &pool_u64::shareholders(distribution_pool); - let len = vector::length(shareholders); - let i = 0; - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); + vector::for_each_ref(shareholders, |shareholder| { + let shareholder: address = *shareholder; if (shareholder != operator) { let shares = pool_u64::shares(distribution_pool, shareholder); let previous_worth = pool_u64::balance(distribution_pool, shareholder); @@ -1882,9 +1888,7 @@ Calculate accumulated rewards and commissions since last update. distribution_pool, unpaid_commission, updated_total_coins); pool_u64::transfer_shares(distribution_pool, shareholder, operator, shares_to_transfer); }; - - i = i + 1; - }; + }); pool_u64::update_total_coins(distribution_pool, updated_total_coins); } @@ -1948,7 +1952,8 @@ Create a new staking_contracts resource. ### Function `stake_pool_address` -
public fun stake_pool_address(staker: address, operator: address): address
+
#[view]
+public fun stake_pool_address(staker: address, operator: address): address
 
@@ -1964,7 +1969,8 @@ Create a new staking_contracts resource. ### Function `last_recorded_principal` -
public fun last_recorded_principal(staker: address, operator: address): u64
+
#[view]
+public fun last_recorded_principal(staker: address, operator: address): u64
 
@@ -1981,7 +1987,8 @@ Staking_contract exists the stacker/operator pair. ### Function `commission_percentage` -
public fun commission_percentage(staker: address, operator: address): u64
+
#[view]
+public fun commission_percentage(staker: address, operator: address): u64
 
@@ -1998,7 +2005,8 @@ Staking_contract exists the stacker/operator pair. ### Function `staking_contract_amounts` -
public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64)
+
#[view]
+public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64)
 
@@ -2019,7 +2027,8 @@ Staking_contract exists the stacker/operator pair. ### Function `pending_distribution_counts` -
public fun pending_distribution_counts(staker: address, operator: address): u64
+
#[view]
+public fun pending_distribution_counts(staker: address, operator: address): u64
 
@@ -2036,7 +2045,8 @@ Staking_contract exists the stacker/operator pair. ### Function `staking_contract_exists` -
public fun staking_contract_exists(staker: address, operator: address): bool
+
#[view]
+public fun staking_contract_exists(staker: address, operator: address): bool
 
diff --git a/aptos-move/framework/aptos-framework/doc/staking_proxy.md b/aptos-move/framework/aptos-framework/doc/staking_proxy.md index 60cdf4579c99e..e4e9c7d5e4e38 100644 --- a/aptos-move/framework/aptos-framework/doc/staking_proxy.md +++ b/aptos-move/framework/aptos-framework/doc/staking_proxy.md @@ -102,16 +102,13 @@
public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address) {
     let owner_address = signer::address_of(owner);
     let vesting_contracts = &vesting::vesting_contracts(owner_address);
-    let i = 0;
-    let len = vector::length(vesting_contracts);
-    while (i < len) {
-        let vesting_contract = *vector::borrow(vesting_contracts, i);
+    vector::for_each_ref(vesting_contracts, |vesting_contract| {
+        let vesting_contract = *vesting_contract;
         if (vesting::operator(vesting_contract) == old_operator) {
             let current_commission_percentage = vesting::operator_commission_percentage(vesting_contract);
             vesting::update_operator(owner, vesting_contract, new_operator, current_commission_percentage);
         };
-        i = i + 1;
-    }
+    });
 }
 
@@ -192,15 +189,12 @@
public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address) {
     let owner_address = signer::address_of(owner);
     let vesting_contracts = &vesting::vesting_contracts(owner_address);
-    let i = 0;
-    let len = vector::length(vesting_contracts);
-    while (i < len) {
-        let vesting_contract = *vector::borrow(vesting_contracts, i);
+    vector::for_each_ref(vesting_contracts, |vesting_contract| {
+        let vesting_contract = *vesting_contract;
         if (vesting::operator(vesting_contract) == operator) {
             vesting::update_voter(owner, vesting_contract, new_voter);
         };
-        i = i + 1;
-    }
+    });
 }
 
@@ -363,6 +357,22 @@ One of them are not exists + + + + +
schema SetStakePoolOperator {
+    owner: &signer;
+    new_operator: address;
+    let owner_address = signer::address_of(owner);
+    let ownership_cap = borrow_global<stake::OwnerCapability>(owner_address);
+    let pool_address = ownership_cap.pool_address;
+    aborts_if stake::stake_pool_exists(owner_address) && !(exists<stake::OwnerCapability>(owner_address) && stake::stake_pool_exists(pool_address));
+}
+
+ + + ### Function `set_vesting_contract_voter` diff --git a/aptos-move/framework/aptos-framework/doc/timestamp.md b/aptos-move/framework/aptos-framework/doc/timestamp.md index c00827692ed31..fe4b42000ded5 100644 --- a/aptos-move/framework/aptos-framework/doc/timestamp.md +++ b/aptos-move/framework/aptos-framework/doc/timestamp.md @@ -163,7 +163,8 @@ Updates the wall clock time by consensus. Requires VM privilege and will be invo Gets the current time in microseconds. -
public fun now_microseconds(): u64
+
#[view]
+public fun now_microseconds(): u64
 
@@ -188,7 +189,8 @@ Gets the current time in microseconds. Gets the current time in seconds. -
public fun now_seconds(): u64
+
#[view]
+public fun now_seconds(): u64
 
diff --git a/aptos-move/framework/aptos-framework/doc/transaction_fee.md b/aptos-move/framework/aptos-framework/doc/transaction_fee.md index eff99bc7767f8..1111e88db5756 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_fee.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_fee.md @@ -525,7 +525,14 @@ Only called during genesis. -
pragma verify=false;
+
aborts_if exists<CollectedFeesPerBlock>(@aptos_framework);
+aborts_if burn_percentage > 100;
+let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<ValidatorFees>(aptos_addr);
+include system_addresses::AbortsIfNotAptosFramework {account: aptos_framework};
+include aggregator_factory::CreateAggregatorInternalAbortsIf;
+ensures exists<ValidatorFees>(aptos_addr);
 
@@ -541,7 +548,15 @@ Only called during genesis. -
pragma verify=false;
+
aborts_if new_burn_percentage > 100;
+let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+requires exists<AptosCoinCapabilities>(@aptos_framework);
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+include RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+ensures exists<CollectedFeesPerBlock>(@aptos_framework) ==>
+    global<CollectedFeesPerBlock>(@aptos_framework).burn_percentage == new_burn_percentage;
 
diff --git a/aptos-move/framework/aptos-framework/doc/vesting.md b/aptos-move/framework/aptos-framework/doc/vesting.md index 7485b72557c31..04eaa5b973c8a 100644 --- a/aptos-move/framework/aptos-framework/doc/vesting.md +++ b/aptos-move/framework/aptos-framework/doc/vesting.md @@ -1148,7 +1148,8 @@ Return the address of the underlying stake pool (separate resource account) of t This errors out if the vesting contract with the provided address doesn't exist. -
public fun stake_pool_address(vesting_contract_address: address): address
+
#[view]
+public fun stake_pool_address(vesting_contract_address: address): address
 
@@ -1177,7 +1178,8 @@ Vesting will start at this time, and once a full period has passed, the first ve This errors out if the vesting contract with the provided address doesn't exist. -
public fun vesting_start_secs(vesting_contract_address: address): u64
+
#[view]
+public fun vesting_start_secs(vesting_contract_address: address): u64
 
@@ -1206,7 +1208,8 @@ Each vest is released after one full period has started, starting from the speci This errors out if the vesting contract with the provided address doesn't exist. -
public fun period_duration_secs(vesting_contract_address: address): u64
+
#[view]
+public fun period_duration_secs(vesting_contract_address: address): u64
 
@@ -1237,7 +1240,8 @@ according to the vesting schedule. This errors out if the vesting contract with the provided address doesn't exist. -
public fun remaining_grant(vesting_contract_address: address): u64
+
#[view]
+public fun remaining_grant(vesting_contract_address: address): u64
 
@@ -1266,7 +1270,8 @@ This is the same as the shareholder address by default and only different if it' This errors out if the vesting contract with the provided address doesn't exist. -
public fun beneficiary(vesting_contract_address: address, shareholder: address): address
+
#[view]
+public fun beneficiary(vesting_contract_address: address, shareholder: address): address
 
@@ -1294,7 +1299,8 @@ Return the percentage of accumulated rewards that is paid to the operator as com This errors out if the vesting contract with the provided address doesn't exist. -
public fun operator_commission_percentage(vesting_contract_address: address): u64
+
#[view]
+public fun operator_commission_percentage(vesting_contract_address: address): u64
 
@@ -1320,7 +1326,8 @@ This errors out if the vesting contract with the provided address doesn't exist. Return all the vesting contracts a given address is an admin of. -
public fun vesting_contracts(admin: address): vector<address>
+
#[view]
+public fun vesting_contracts(admin: address): vector<address>
 
@@ -1351,7 +1358,8 @@ Return the operator who runs the validator for the vesting contract. This errors out if the vesting contract with the provided address doesn't exist. -
public fun operator(vesting_contract_address: address): address
+
#[view]
+public fun operator(vesting_contract_address: address): address
 
@@ -1380,7 +1388,8 @@ pool. This errors out if the vesting contract with the provided address doesn't exist. -
public fun voter(vesting_contract_address: address): address
+
#[view]
+public fun voter(vesting_contract_address: address): address
 
@@ -1414,7 +1423,8 @@ So 268435456 = 0.0625. This errors out if the vesting contract with the provided address doesn't exist. -
public fun vesting_schedule(vesting_contract_address: address): vesting::VestingSchedule
+
#[view]
+public fun vesting_schedule(vesting_contract_address: address): vesting::VestingSchedule
 
@@ -1443,7 +1453,8 @@ This excludes any unpaid commission that the operator has not collected. This errors out if the vesting contract with the provided address doesn't exist. -
public fun total_accumulated_rewards(vesting_contract_address: address): u64
+
#[view]
+public fun total_accumulated_rewards(vesting_contract_address: address): u64
 
@@ -1476,7 +1487,8 @@ the beneficiary address instead of shareholder address. This errors out if the vesting contract with the provided address doesn't exist. -
public fun accumulated_rewards(vesting_contract_address: address, shareholder_or_beneficiary: address): u64
+
#[view]
+public fun accumulated_rewards(vesting_contract_address: address, shareholder_or_beneficiary: address): u64
 
@@ -1508,7 +1520,8 @@ This errors out if the vesting contract with the provided address doesn't exist. Return the list of all shareholders in the vesting contract. -
public fun shareholders(vesting_contract_address: address): vector<address>
+
#[view]
+public fun shareholders(vesting_contract_address: address): vector<address>
 
@@ -1540,7 +1553,8 @@ address is actually a shareholder address, just return the address back. This returns 0x0 if no shareholder is found for the given beneficiary / the address is not a shareholder itself. -
public fun shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address
+
#[view]
+public fun shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address
 
@@ -1557,18 +1571,17 @@ This returns 0x0 if no shareholder is found for the given beneficiary / the addr return shareholder_or_beneficiary }; let vesting_contract = borrow_global<VestingContract>(vesting_contract_address); - let i = 0; - let len = vector::length(shareholders); - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); - // This will still return the shareholder if shareholder == beneficiary. - if (shareholder_or_beneficiary == get_beneficiary(vesting_contract, shareholder)) { - return shareholder - }; - i = i + 1; - }; + let result = @0x0; + vector::any(shareholders, |shareholder| { + if (shareholder_or_beneficiary == get_beneficiary(vesting_contract, *shareholder)) { + result = *shareholder; + true + } else { + false + } + }); - @0x0 + result }
@@ -1660,22 +1673,18 @@ Create a vesting contract with a given configurations. let grant = coin::zero<AptosCoin>(); let grant_amount = 0; let grant_pool = pool_u64::create(MAXIMUM_SHAREHOLDERS); - let len = vector::length(shareholders); - let i = 0; - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); + vector::for_each_ref(shareholders, |shareholder| { + let shareholder: address = *shareholder; let (_, buy_in) = simple_map::remove(&mut buy_ins, &shareholder); let buy_in_amount = coin::value(&buy_in); coin::merge(&mut grant, buy_in); pool_u64::buy_in( &mut grant_pool, - *vector::borrow(shareholders, i), + shareholder, buy_in_amount, ); grant_amount = grant_amount + buy_in_amount; - - i = i + 1; - }; + }); assert!(grant_amount > 0, error::invalid_argument(EZERO_GRANT)); // If this is the first time this admin account has created a vesting contract, initialize the admin store. @@ -1789,12 +1798,10 @@ Call unlock_rewards for many vesting contracts. assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION)); - let i = 0; - while (i < len) { - let contract_address = *vector::borrow(&contract_addresses, i); + vector::for_each_ref(&contract_addresses, |contract_address| { + let contract_address: address = *contract_address; unlock_rewards(contract_address); - i = i + 1; - }; + }); }
@@ -1896,12 +1903,10 @@ Call vest for many vesting contracts. assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION)); - let i = 0; - while (i < len) { - let contract_address = *vector::borrow(&contract_addresses, i); + vector::for_each_ref(&contract_addresses, |contract_address| { + let contract_address = *contract_address; vest(contract_address); - i = i + 1; - }; + }); }
@@ -1939,18 +1944,14 @@ Distribute any withdrawable stake from the stake pool. // Distribute coins to all shareholders in the vesting contract. let grant_pool = &vesting_contract.grant_pool; let shareholders = &pool_u64::shareholders(grant_pool); - let len = vector::length(shareholders); - let i = 0; - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); + vector::for_each_ref(shareholders, |shareholder| { + let shareholder = *shareholder; let shares = pool_u64::shares(grant_pool, shareholder); let amount = pool_u64::shares_to_amount_with_total_coins(grant_pool, shares, total_distribution_amount); let share_of_coins = coin::extract(&mut coins, amount); let recipient_address = get_beneficiary(vesting_contract, shareholder); aptos_account::deposit_coins(recipient_address, share_of_coins); - - i = i + 1; - }; + }); // Send any remaining "dust" (leftover due to rounding error) to the withdrawal address. if (coin::value(&coins) > 0) { @@ -1995,12 +1996,10 @@ Call distribute for many vesting contracts. assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION)); - let i = 0; - while (i < len) { - let contract_address = *vector::borrow(&contract_addresses, i); + vector::for_each_ref(&contract_addresses, |contract_address| { + let contract_address = *contract_address; distribute(contract_address); - i = i + 1; - }; + }); }
@@ -2723,7 +2722,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `stake_pool_address` -
public fun stake_pool_address(vesting_contract_address: address): address
+
#[view]
+public fun stake_pool_address(vesting_contract_address: address): address
 
@@ -2739,7 +2739,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `vesting_start_secs` -
public fun vesting_start_secs(vesting_contract_address: address): u64
+
#[view]
+public fun vesting_start_secs(vesting_contract_address: address): u64
 
@@ -2755,7 +2756,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `period_duration_secs` -
public fun period_duration_secs(vesting_contract_address: address): u64
+
#[view]
+public fun period_duration_secs(vesting_contract_address: address): u64
 
@@ -2771,7 +2773,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `remaining_grant` -
public fun remaining_grant(vesting_contract_address: address): u64
+
#[view]
+public fun remaining_grant(vesting_contract_address: address): u64
 
@@ -2787,7 +2790,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `beneficiary` -
public fun beneficiary(vesting_contract_address: address, shareholder: address): address
+
#[view]
+public fun beneficiary(vesting_contract_address: address, shareholder: address): address
 
@@ -2803,7 +2807,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `operator_commission_percentage` -
public fun operator_commission_percentage(vesting_contract_address: address): u64
+
#[view]
+public fun operator_commission_percentage(vesting_contract_address: address): u64
 
@@ -2819,7 +2824,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `vesting_contracts` -
public fun vesting_contracts(admin: address): vector<address>
+
#[view]
+public fun vesting_contracts(admin: address): vector<address>
 
@@ -2835,7 +2841,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `operator` -
public fun operator(vesting_contract_address: address): address
+
#[view]
+public fun operator(vesting_contract_address: address): address
 
@@ -2851,7 +2858,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `voter` -
public fun voter(vesting_contract_address: address): address
+
#[view]
+public fun voter(vesting_contract_address: address): address
 
@@ -2867,7 +2875,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `vesting_schedule` -
public fun vesting_schedule(vesting_contract_address: address): vesting::VestingSchedule
+
#[view]
+public fun vesting_schedule(vesting_contract_address: address): vesting::VestingSchedule
 
@@ -2883,7 +2892,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `total_accumulated_rewards` -
public fun total_accumulated_rewards(vesting_contract_address: address): u64
+
#[view]
+public fun total_accumulated_rewards(vesting_contract_address: address): u64
 
@@ -2919,7 +2929,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `accumulated_rewards` -
public fun accumulated_rewards(vesting_contract_address: address, shareholder_or_beneficiary: address): u64
+
#[view]
+public fun accumulated_rewards(vesting_contract_address: address, shareholder_or_beneficiary: address): u64
 
@@ -2935,7 +2946,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `shareholders` -
public fun shareholders(vesting_contract_address: address): vector<address>
+
#[view]
+public fun shareholders(vesting_contract_address: address): vector<address>
 
@@ -2951,7 +2963,8 @@ This address should be deterministic for the same admin and vesting contract cre ### Function `shareholder` -
public fun shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address
+
#[view]
+public fun shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address
 
diff --git a/aptos-move/framework/aptos-framework/doc/voting.md b/aptos-move/framework/aptos-framework/doc/voting.md index 32828c52a7e56..ca96fe6ae555a 100644 --- a/aptos-move/framework/aptos-framework/doc/voting.md +++ b/aptos-move/framework/aptos-framework/doc/voting.md @@ -1106,7 +1106,8 @@ there are more yes votes than no. If either of these conditions is not met, this Return the next unassigned proposal id -
public fun next_proposal_id<ProposalType: store>(voting_forum_address: address): u64
+
#[view]
+public fun next_proposal_id<ProposalType: store>(voting_forum_address: address): u64
 
@@ -1131,7 +1132,8 @@ Return the next unassigned proposal id -
public fun is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
#[view]
+public fun is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
 
@@ -1193,7 +1195,8 @@ Return the state of the proposal with given id. @return Proposal state as an enum value. -
public fun get_proposal_state<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
#[view]
+public fun get_proposal_state<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
 
@@ -1234,7 +1237,8 @@ Return the state of the proposal with given id. Return the proposal's creation time. -
public fun get_proposal_creation_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
#[view]
+public fun get_proposal_creation_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
 
@@ -1264,7 +1268,8 @@ Return the proposal's creation time. Return the proposal's expiration time. -
public fun get_proposal_expiration_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
#[view]
+public fun get_proposal_expiration_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
 
@@ -1294,7 +1299,8 @@ Return the proposal's expiration time. Return the proposal's execution hash. -
public fun get_execution_hash<ProposalType: store>(voting_forum_address: address, proposal_id: u64): vector<u8>
+
#[view]
+public fun get_execution_hash<ProposalType: store>(voting_forum_address: address, proposal_id: u64): vector<u8>
 
@@ -1324,7 +1330,8 @@ Return the proposal's execution hash. Return the proposal's minimum vote threshold -
public fun get_min_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u128
+
#[view]
+public fun get_min_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u128
 
@@ -1354,7 +1361,8 @@ Return the proposal's minimum vote threshold Return the proposal's early resolution minimum vote threshold (optionally set) -
public fun get_early_resolution_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): option::Option<u128>
+
#[view]
+public fun get_early_resolution_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): option::Option<u128>
 
@@ -1384,7 +1392,8 @@ Return the proposal's early resolution minimum vote threshold (optionally set) Return the proposal's current vote count (yes_votes, no_votes) -
public fun get_votes<ProposalType: store>(voting_forum_address: address, proposal_id: u64): (u128, u128)
+
#[view]
+public fun get_votes<ProposalType: store>(voting_forum_address: address, proposal_id: u64): (u128, u128)
 
@@ -1414,7 +1423,8 @@ Return the proposal's current vote count (yes_votes, no_votes) Return true if the governance proposal has already been resolved. -
public fun is_resolved<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
#[view]
+public fun is_resolved<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
 
@@ -1444,7 +1454,8 @@ Return true if the governance proposal has already been resolved. Return true if the multi-step governance proposal is in execution. -
public fun is_multi_step_proposal_in_execution<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
#[view]
+public fun is_multi_step_proposal_in_execution<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
 
@@ -1555,11 +1566,6 @@ Return true if the voting period of the given proposal has already ended.
-The min_vote_threshold lower thanearly_resolution_vote_threshold. -Make sure the execution script's hash is not empty. -VotingForum existed under the voting_forum_address. -The next_proposal_id in VotingForum is up to MAX_U64. -CurrentTimeMicroseconds existed under the @aptos_framework.
requires chain_status::is_operating();
@@ -1637,7 +1643,23 @@ CurrentTimeMicroseconds existed under the @aptos_framework.
 
 
 
requires chain_status::is_operating();
-pragma aborts_if_is_strict = false;
+include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum =  global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs;
+let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (proposal.yes_votes >= early_resolution_threshold ||
+                             proposal.no_votes >= early_resolution_threshold);
+let voting_closed = voting_period_over || be_resolved_early;
+aborts_if voting_closed && (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold);
+aborts_if !voting_closed;
+aborts_if proposal.is_resolved;
+aborts_if !std::string::spec_internal_check_utf8(RESOLVABLE_TIME_METADATA_KEY);
+aborts_if !simple_map::spec_contains_key(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY));
+aborts_if !from_bcs::deserializable<u64>(simple_map::spec_get(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY)));
+aborts_if timestamp::spec_now_seconds() <= from_bcs::deserialize<u64>(simple_map::spec_get(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY)));
+aborts_if transaction_context::spec_get_script_hash() != proposal.execution_hash;
 
@@ -1686,7 +1708,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `next_proposal_id` -
public fun next_proposal_id<ProposalType: store>(voting_forum_address: address): u64
+
#[view]
+public fun next_proposal_id<ProposalType: store>(voting_forum_address: address): u64
 
@@ -1702,7 +1725,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `is_voting_closed` -
public fun is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
#[view]
+public fun is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
 
@@ -1730,20 +1754,47 @@ CurrentTimeMicroseconds existed under the @aptos_framework. + + + + +
fun spec_get_proposal_state<ProposalType>(
+   voting_forum_address: address,
+   proposal_id: u64,
+): u64;
+
+ + + ### Function `get_proposal_state` -
public fun get_proposal_state<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
#[view]
+public fun get_proposal_state<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
 
-
requires chain_status::is_operating();
-pragma addition_overflow_unchecked;
+
pragma addition_overflow_unchecked;
+requires chain_status::is_operating();
 include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs;
+let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                            (proposal.yes_votes >= early_resolution_threshold ||
+                             proposal.no_votes >= early_resolution_threshold);
+let voting_closed = voting_period_over || be_resolved_early;
+ensures voting_closed ==> if (proposal.yes_votes > proposal.no_votes && proposal.yes_votes + proposal.no_votes >= proposal.min_vote_threshold) {
+    result == PROPOSAL_STATE_SUCCEEDED
+} else {
+    result == PROPOSAL_STATE_FAILED
+};
+ensures !voting_closed ==> result == PROPOSAL_STATE_PENDING;
 
@@ -1753,7 +1804,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `get_proposal_creation_secs` -
public fun get_proposal_creation_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
#[view]
+public fun get_proposal_creation_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
 
@@ -1769,7 +1821,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `get_proposal_expiration_secs` -
public fun get_proposal_expiration_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
#[view]
+public fun get_proposal_expiration_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
 
@@ -1785,7 +1838,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `get_execution_hash` -
public fun get_execution_hash<ProposalType: store>(voting_forum_address: address, proposal_id: u64): vector<u8>
+
#[view]
+public fun get_execution_hash<ProposalType: store>(voting_forum_address: address, proposal_id: u64): vector<u8>
 
@@ -1801,7 +1855,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `get_min_vote_threshold` -
public fun get_min_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u128
+
#[view]
+public fun get_min_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u128
 
@@ -1817,7 +1872,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `get_early_resolution_vote_threshold` -
public fun get_early_resolution_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): option::Option<u128>
+
#[view]
+public fun get_early_resolution_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): option::Option<u128>
 
@@ -1833,7 +1889,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `get_votes` -
public fun get_votes<ProposalType: store>(voting_forum_address: address, proposal_id: u64): (u128, u128)
+
#[view]
+public fun get_votes<ProposalType: store>(voting_forum_address: address, proposal_id: u64): (u128, u128)
 
@@ -1849,7 +1906,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `is_resolved` -
public fun is_resolved<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
#[view]
+public fun is_resolved<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
 
@@ -1880,7 +1938,8 @@ CurrentTimeMicroseconds existed under the @aptos_framework. ### Function `is_multi_step_proposal_in_execution` -
public fun is_multi_step_proposal_in_execution<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
#[view]
+public fun is_multi_step_proposal_in_execution<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
 
diff --git a/aptos-move/framework/aptos-framework/sources/account.spec.move b/aptos-move/framework/aptos-framework/sources/account.spec.move index 4359b39659224..14e637fa3a172 100644 --- a/aptos-move/framework/aptos-framework/sources/account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/account.spec.move @@ -155,18 +155,22 @@ spec aptos_framework::account { challenge: challenge, }; - // let new_auth_key = spec_assert_valid_rotation_proof_signature_and_get_auth_key(to_scheme, to_public_key_bytes, cap_update_table, challenge); + // Verify all properties in update_auth_key_and_originating_address_table + let originating_addr = addr; + let new_auth_key_vector = spec_assert_valid_rotation_proof_signature_and_get_auth_key(to_scheme, to_public_key_bytes, cap_update_table, challenge); - // TODO: boogie error: Error: invalid type for argument 0 in application of $1_from_bcs_deserializable'address': int (expected: Vec int). - // include UpdateAuthKeyAndOriginatingAddressTableAbortsIf{ - // originating_addr: addr, - // account_resource: account_resource, - // new_auth_key_vector: new_auth_key - // }; - pragma aborts_if_is_partial; + let address_map = global(@aptos_framework).address_map; + let new_auth_key = from_bcs::deserialize
(new_auth_key_vector); + + aborts_if !exists(@aptos_framework); + aborts_if !from_bcs::deserializable
(account_resource.authentication_key); + aborts_if table::spec_contains(address_map, curr_auth_key) && + table::spec_get(address_map, curr_auth_key) != originating_addr; + + aborts_if !from_bcs::deserializable
(new_auth_key_vector); + + aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key); - modifies global(addr); - modifies global(@aptos_framework); } spec rotate_authentication_key_with_rotation_capability( @@ -195,10 +199,20 @@ spec aptos_framework::account { signature: cap_update_table, challenge: challenge, }; - // let new_auth_key = spec_assert_valid_rotation_proof_signature_and_get_auth_key(new_scheme, new_public_key_bytes, cap_update_table, challenge); - // TODO: Need to investigate the issue of including UpdateAuthKeyAndOriginatingAddressTableAbortsIf here. - // TODO: boogie error: Error: invalid type for argument 0 in application of $1_from_bcs_deserializable'address': int (expected: Vec int). - pragma aborts_if_is_partial; + + let new_auth_key_vector = spec_assert_valid_rotation_proof_signature_and_get_auth_key(new_scheme, new_public_key_bytes, cap_update_table, challenge); + let address_map = global(@aptos_framework).address_map; + + // Verify all properties in update_auth_key_and_originating_address_table + aborts_if !exists(@aptos_framework); + aborts_if !from_bcs::deserializable
(offerer_account_resource.authentication_key); + aborts_if table::spec_contains(address_map, curr_auth_key) && + table::spec_get(address_map, curr_auth_key) != rotation_cap_offerer_address; + + aborts_if !from_bcs::deserializable
(new_auth_key_vector); + let new_auth_key = from_bcs::deserialize
(new_auth_key_vector); + + aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key); } spec offer_rotation_capability( diff --git a/aptos-move/framework/aptos-framework/sources/aptos_account.move b/aptos-move/framework/aptos-framework/sources/aptos_account.move index e6b12f90aac24..c58395714d113 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_account.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_account.move @@ -52,13 +52,10 @@ module aptos_framework::aptos_account { error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH), ); - let i = 0; - while (i < recipients_len) { - let to = *vector::borrow(&recipients, i); + vector::enumerate_ref(&recipients, |i, to| { let amount = *vector::borrow(&amounts, i); - transfer(source, to, amount); - i = i + 1; - }; + transfer(source, *to, amount); + }); } /// Convenient function to transfer APT to a recipient account that might not exist. @@ -84,13 +81,10 @@ module aptos_framework::aptos_account { error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH), ); - let i = 0; - while (i < recipients_len) { - let to = *vector::borrow(&recipients, i); + vector::enumerate_ref(&recipients, |i, to| { let amount = *vector::borrow(&amounts, i); - transfer_coins(from, to, amount); - i = i + 1; - }; + transfer_coins(from, *to, amount); + }); } /// Convenient function to transfer a custom CoinType to a recipient account that might not exist. @@ -104,6 +98,11 @@ module aptos_framework::aptos_account { public fun deposit_coins(to: address, coins: Coin) acquires DirectTransferConfig { if (!account::exists_at(to)) { create_account(to); + spec { + assert coin::is_account_registered(to); + assume aptos_std::type_info::type_of() == aptos_std::type_info::type_of() ==> + coin::is_account_registered(to); + }; }; if (!coin::is_account_registered(to)) { assert!( diff --git a/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move index 5008f857210f9..661bd0414a3c2 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move @@ -1,6 +1,5 @@ spec aptos_framework::aptos_account { spec module { - pragma verify = true; pragma aborts_if_is_strict; } @@ -27,7 +26,17 @@ spec aptos_framework::aptos_account { } spec transfer(source: &signer, to: address, amount: u64) { - pragma verify = false; + let account_addr_source = signer::address_of(source); + let coin_store_to = global>(to); + + // The 'from' addr is implictly not equal to 'to' addr + requires account_addr_source != to; + + include CreateAccountTransferAbortsIf; + include GuidAbortsIf; + include WithdrawAbortsIf{from: source}; + + aborts_if exists>(to) && global>(to).frozen; } spec assert_account_exists(addr: address) { @@ -47,8 +56,44 @@ spec aptos_framework::aptos_account { } spec batch_transfer(source: &signer, recipients: vector
, amounts: vector) { - // TODO: missing aborts_if spec - pragma verify=false; + //TODO: Can't verify the loop invariant in enumerate + pragma verify = false; + let account_addr_source = signer::address_of(source); + let coin_store_source = global>(account_addr_source); + let balance_source = coin_store_source.coin.value; + + requires forall i in 0..len(recipients): + recipients[i] != account_addr_source; + requires exists i in 0..len(recipients): + amounts[i] > 0; + + // create account properties + aborts_if len(recipients) != len(amounts); + aborts_if exists i in 0..len(recipients): + !account::exists_at(recipients[i]) && length_judgment(recipients[i]); + aborts_if exists i in 0..len(recipients): + !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); + ensures forall i in 0..len(recipients): + (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && + (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); + + // coin::withdraw properties + aborts_if exists i in 0..len(recipients): + !exists>(account_addr_source); + aborts_if exists i in 0..len(recipients): + coin_store_source.frozen; + aborts_if exists i in 0..len(recipients): + global>(account_addr_source).coin.value < amounts[i]; + + // deposit properties + aborts_if exists i in 0..len(recipients): + exists>(recipients[i]) && global>(recipients[i]).frozen; + + // guid properties + aborts_if exists i in 0..len(recipients): + account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + aborts_if exists i in 0..len(recipients): + account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 > MAX_U64; } spec can_receive_direct_coin_transfers(account: address): bool { @@ -60,17 +105,110 @@ spec aptos_framework::aptos_account { } spec batch_transfer_coins(from: &signer, recipients: vector
, amounts: vector) { - // TODO: missing aborts_if spec - pragma verify=false; + //TODO: Can't verify the loop invariant in enumerate + use aptos_std::type_info; + pragma verify = false; + let account_addr_source = signer::address_of(from); + let coin_store_source = global>(account_addr_source); + let balance_source = coin_store_source.coin.value; + + requires forall i in 0..len(recipients): + recipients[i] != account_addr_source; + + requires exists i in 0..len(recipients): + amounts[i] > 0; + + aborts_if len(recipients) != len(amounts); + + //create account properties + aborts_if exists i in 0..len(recipients): + !account::exists_at(recipients[i]) && length_judgment(recipients[i]); + aborts_if exists i in 0..len(recipients): + !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); + ensures forall i in 0..len(recipients): + (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && + (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); + + // coin::withdraw properties + aborts_if exists i in 0..len(recipients): + !exists>(account_addr_source); + aborts_if exists i in 0..len(recipients): + coin_store_source.frozen; + aborts_if exists i in 0..len(recipients): + global>(account_addr_source).coin.value < amounts[i]; + + // deposit properties + aborts_if exists i in 0..len(recipients): + exists>(recipients[i]) && global>(recipients[i]).frozen; + + // guid properties + aborts_if exists i in 0..len(recipients): + account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + aborts_if exists i in 0..len(recipients): + account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 > MAX_U64; + + // register_coin properties + aborts_if exists i in 0..len(recipients): + !coin::is_account_registered(recipients[i]) && !type_info::spec_is_struct(); + aborts_if exists i in 0..len(recipients): + !coin::is_account_registered(recipients[i]) && !can_receive_direct_coin_transfers(recipients[i]); + } spec deposit_coins(to: address, coins: Coin) { - // TODO: missing aborts_if spec - pragma verify=false; + include CreateAccountTransferAbortsIf; + include GuidAbortsIf; + include RegistCoinAbortsIf; + + aborts_if exists>(to) && global>(to).frozen; } spec transfer_coins(from: &signer, to: address, amount: u64) { - // TODO: missing aborts_if spec - pragma verify=false; + let account_addr_source = signer::address_of(from); + let coin_store_to = global>(to); + + //The 'from' addr is implictly not equal to 'to' addr + requires account_addr_source != to; + + include CreateAccountTransferAbortsIf; + include WithdrawAbortsIf; + include GuidAbortsIf; + include RegistCoinAbortsIf; + + aborts_if exists>(to) && global>(to).frozen; + } + + spec schema CreateAccountTransferAbortsIf { + to: address; + aborts_if !account::exists_at(to) && length_judgment(to); + aborts_if !account::exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token); + } + + spec schema WithdrawAbortsIf { + from: &signer; + amount: u64; + let account_addr_source = signer::address_of(from); + let coin_store_source = global>(account_addr_source); + let balance_source = coin_store_source.coin.value; + aborts_if !exists>(account_addr_source); + aborts_if coin_store_source.frozen; + aborts_if balance_source < amount; + } + + spec schema GuidAbortsIf { + to: address; + let acc = global(to); + aborts_if account::exists_at(to) && !exists>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + aborts_if account::exists_at(to) && !exists>(to) && acc.guid_creation_num + 2 > MAX_U64; + } + + spec schema RegistCoinAbortsIf { + use aptos_std::type_info; + to: address; + aborts_if !coin::is_account_registered(to) && !type_info::spec_is_struct(); + aborts_if exists(to) + && !coin::is_account_registered(to) && !can_receive_direct_coin_transfers(to); + aborts_if type_info::type_of() != type_info::type_of() + && !coin::is_account_registered(to) && !can_receive_direct_coin_transfers(to); } } diff --git a/aptos-move/framework/aptos-framework/sources/aptos_coin.move b/aptos-move/framework/aptos-framework/sources/aptos_coin.move index 2eb0f870aebea..c3e656e9c8ca4 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_coin.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_coin.move @@ -112,12 +112,10 @@ module aptos_framework::aptos_coin { public entry fun delegate_mint_capability(account: signer, to: address) acquires Delegations { system_addresses::assert_core_resource(&account); let delegations = &mut borrow_global_mut(@core_resources).inner; - let i = 0; - while (i < vector::length(delegations)) { - let element = vector::borrow(delegations, i); + vector::for_each_ref(delegations, |element| { + let element: &DelegatedMintCapability = element; assert!(element.to != to, error::invalid_argument(EALREADY_DELEGATED)); - i = i + 1; - }; + }); vector::push_back(delegations, DelegatedMintCapability { to }); } diff --git a/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move index a29c978406cf1..682a6df16ca12 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move @@ -112,8 +112,7 @@ spec aptos_framework::aptos_governance { metadata_hash: vector, ) { use aptos_framework::chain_status; - // TODO: Calls `create_proposal_v2`. - pragma aborts_if_is_partial; + requires chain_status::is_operating(); include CreateProposalAbortsIf; } @@ -127,11 +126,7 @@ spec aptos_framework::aptos_governance { is_multi_step_proposal: bool, ) { use aptos_framework::chain_status; - // TODO: The variable `stake_balance` is the return value of the function `get_voting_power`. - // `get_voting_power` has already stated that it cannot be fully verified, - // so the value of `stake_balance` cannot be obtained in the spec, - // and the `aborts_if` of `stake_balancede` cannot be written. - pragma aborts_if_is_partial; + requires chain_status::is_operating(); include CreateProposalAbortsIf; } @@ -148,19 +143,65 @@ spec aptos_framework::aptos_governance { metadata_location: vector; metadata_hash: vector; - let proposer_address = signer::address_of(proposer); - let governance_config = global(@aptos_framework); - let stake_pool_res = global(stake_pool); - aborts_if !exists(@aptos_framework); - aborts_if !exists(stake_pool); - aborts_if global(stake_pool).delegated_voter != proposer_address; + include VotingGetDelegatedVoterAbortsIf { sign: proposer }; include AbortsIfNotGovernanceConfig; - let current_time = timestamp::now_seconds(); + + // verify get_voting_power(stake_pool) + include GetVotingPowerAbortsIf { pool_address: stake_pool }; + let staking_config = global(@aptos_framework); + let allow_validator_set_change = staking_config.allow_validator_set_change; + let stake_pool_res = global(stake_pool); + // Three results of get_voting_power(stake_pool) + let stake_balance_0 = stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value; + let stake_balance_1 = stake_pool_res.active.value + stake_pool_res.pending_inactive.value; + let stake_balance_2 = 0; + let governance_config = global(@aptos_framework); + let required_proposer_stake = governance_config.required_proposer_stake; + // Comparison of the three results of get_voting_power(stake_pool) and required_proposer_stake + aborts_if allow_validator_set_change && stake_balance_0 < required_proposer_stake; + aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(stake_pool) && stake_balance_1 < required_proposer_stake; + aborts_if !allow_validator_set_change && !stake::spec_is_current_epoch_validator(stake_pool) && stake_balance_2 < required_proposer_stake; + + aborts_if !exists(@aptos_framework); + let current_time = timestamp::spec_now_seconds(); let proposal_expiration = current_time + governance_config.voting_duration_secs; aborts_if stake_pool_res.locked_until_secs < proposal_expiration; + + // verify create_proposal_metadata + include CreateProposalMetadataAbortsIf; + + let addr = aptos_std::type_info::type_of().account_address; + aborts_if !exists>(addr); + let maybe_supply = global>(addr).supply; + let supply = option::spec_borrow(maybe_supply); + let total_supply = aptos_framework::optional_aggregator::optional_aggregator_value(supply); + let early_resolution_vote_threshold_value = total_supply / 2 + 1; + + // verify voting::create_proposal_v2 + aborts_if option::spec_is_some(maybe_supply) && governance_config.min_voting_threshold > early_resolution_vote_threshold_value; + aborts_if len(execution_hash) <= 0; + aborts_if !exists>(@aptos_framework); + let voting_forum = global>(@aptos_framework); + let proposal_id = voting_forum.next_proposal_id; + aborts_if proposal_id + 1 > MAX_U64; + let post post_voting_forum = global>(@aptos_framework); + let post post_next_proposal_id = post_voting_forum.next_proposal_id; + ensures post_next_proposal_id == proposal_id + 1; + aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY); + aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY); + aborts_if table::spec_contains(voting_forum.proposals,proposal_id); + ensures table::spec_contains(post_voting_forum.proposals, proposal_id); aborts_if !exists(@aptos_framework); - let allow_validator_set_change = global(@aptos_framework).allow_validator_set_change; - aborts_if !allow_validator_set_change && !exists(@aptos_framework); + } + + spec schema VotingGetDelegatedVoterAbortsIf { + stake_pool: address; + sign: signer; + + let addr = signer::address_of(sign); + let stake_pool_res = global(stake_pool); + aborts_if !exists(stake_pool); + aborts_if stake_pool_res.delegated_voter != addr; } /// stake_pool must exist StakePool. @@ -175,61 +216,202 @@ spec aptos_framework::aptos_governance { use aptos_framework::stake; use aptos_framework::chain_status; - // TODO: The variable `voting_power` is the return value of the function `get_voting_power`. - // `get_voting_power` has already stated that it cannot be completely verified, - // so the value of `voting_power` cannot be obtained in the spec, - // and the `aborts_if` of `voting_power` cannot be written. - pragma aborts_if_is_partial; - requires chain_status::is_operating(); - let voter_address = signer::address_of(voter); - let stake_pool_res = global(stake_pool); - aborts_if !exists(stake_pool); - aborts_if stake_pool_res.delegated_voter != voter_address; + include VotingGetDelegatedVoterAbortsIf { sign: voter }; + aborts_if !exists(@aptos_framework); - aborts_if !exists>(@aptos_framework); + let voting_records = global(@aptos_framework); + let record_key = RecordKey { + stake_pool, + proposal_id, + }; + let post post_voting_records = global(@aptos_framework); + aborts_if table::spec_contains(voting_records.votes, record_key); + ensures table::spec_get(post_voting_records.votes, record_key) == true; + + // verify get_voting_power(stake_pool) + include GetVotingPowerAbortsIf { pool_address: stake_pool }; let allow_validator_set_change = global(@aptos_framework).allow_validator_set_change; - aborts_if !allow_validator_set_change && !exists(@aptos_framework); + let stake_pool_res = global(stake_pool); + // Two results of get_voting_power(stake_pool) and the third one is zero. + let voting_power_0 = stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value; + let voting_power_1 = stake_pool_res.active.value + stake_pool_res.pending_inactive.value; + // Each result is compared with zero, and the following three aborts_if statements represent each of the three results. + aborts_if allow_validator_set_change && voting_power_0 <= 0; + aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(stake_pool) && voting_power_1 <= 0; + aborts_if !allow_validator_set_change && !stake::spec_is_current_epoch_validator(stake_pool); + + aborts_if !exists>(@aptos_framework); let voting_forum = global>(@aptos_framework); let proposal = table::spec_get(voting_forum.proposals, proposal_id); + aborts_if !table::spec_contains(voting_forum.proposals, proposal_id); let proposal_expiration = proposal.expiration_secs; let locked_until_secs = global(stake_pool).locked_until_secs; aborts_if proposal_expiration > locked_until_secs; + + // verify voting::vote + aborts_if timestamp::now_seconds() > proposal_expiration; + aborts_if proposal.is_resolved; + aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY); + let execution_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY); + aborts_if simple_map::spec_contains_key(proposal.metadata, execution_key) && + simple_map::spec_get(proposal.metadata, execution_key) != std::bcs::to_bytes(false); + // Since there are two possibilities for voting_power, the result of the vote is not only related to should_pass, + // but also to allow_validator_set_change which determines the voting_power + aborts_if allow_validator_set_change && + if (should_pass) { proposal.yes_votes + voting_power_0 > MAX_U128 } else { proposal.no_votes + voting_power_0 > MAX_U128 }; + aborts_if !allow_validator_set_change && + if (should_pass) { proposal.yes_votes + voting_power_1 > MAX_U128 } else { proposal.no_votes + voting_power_1 > MAX_U128 }; + let post post_voting_forum = global>(@aptos_framework); + let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id); + ensures allow_validator_set_change ==> + if (should_pass) { post_proposal.yes_votes == proposal.yes_votes + voting_power_0 } else { post_proposal.no_votes == proposal.no_votes + voting_power_0 }; + ensures !allow_validator_set_change ==> + if (should_pass) { post_proposal.yes_votes == proposal.yes_votes + voting_power_1 } else { post_proposal.no_votes == proposal.no_votes + voting_power_1 }; + aborts_if !string::spec_internal_check_utf8(voting::RESOLVABLE_TIME_METADATA_KEY); + let key = utf8(voting::RESOLVABLE_TIME_METADATA_KEY); + ensures simple_map::spec_contains_key(post_proposal.metadata, key); + ensures simple_map::spec_get(post_proposal.metadata, key) == std::bcs::to_bytes(timestamp::now_seconds()); + + aborts_if !exists(@aptos_framework); + + // verify voting::get_proposal_state + let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold); + let is_voting_period_over = timestamp::now_seconds() > proposal_expiration; + // The success state depends on the number of votes, but since the number of votes is related to allow_validator_set_change and should_pass, + // we describe the success state in different cases. + // allow_validator_set_change && should_pass + let new_proposal_yes_votes_0 = proposal.yes_votes + voting_power_0; + let can_be_resolved_early_0 = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (new_proposal_yes_votes_0 >= early_resolution_threshold || + proposal.no_votes >= early_resolution_threshold); + let is_voting_closed_0 = is_voting_period_over || can_be_resolved_early_0; + let proposal_state_successed_0 = is_voting_closed_0 && new_proposal_yes_votes_0 > proposal.no_votes && + new_proposal_yes_votes_0 + proposal.no_votes >= proposal.min_vote_threshold; + // allow_validator_set_change && !should_pass + let new_proposal_no_votes_0 = proposal.no_votes + voting_power_0; + let can_be_resolved_early_1 = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (proposal.yes_votes >= early_resolution_threshold || + new_proposal_no_votes_0 >= early_resolution_threshold); + let is_voting_closed_1 = is_voting_period_over || can_be_resolved_early_1; + let proposal_state_successed_1 = is_voting_closed_1 && proposal.yes_votes > new_proposal_no_votes_0 && + proposal.yes_votes + new_proposal_no_votes_0 >= proposal.min_vote_threshold; + // !allow_validator_set_change && should_pass + let new_proposal_yes_votes_1 = proposal.yes_votes + voting_power_1; + let can_be_resolved_early_2 = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (new_proposal_yes_votes_1 >= early_resolution_threshold || + proposal.no_votes >= early_resolution_threshold); + let is_voting_closed_2 = is_voting_period_over || can_be_resolved_early_2; + let proposal_state_successed_2 = is_voting_closed_2 && new_proposal_yes_votes_1 > proposal.no_votes && + new_proposal_yes_votes_1 + proposal.no_votes >= proposal.min_vote_threshold; + // !allow_validator_set_change && !should_pass + let new_proposal_no_votes_1 = proposal.no_votes + voting_power_1; + let can_be_resolved_early_3 = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (proposal.yes_votes >= early_resolution_threshold || + new_proposal_no_votes_1 >= early_resolution_threshold); + let is_voting_closed_3 = is_voting_period_over || can_be_resolved_early_3; + let proposal_state_successed_3 = is_voting_closed_3 && proposal.yes_votes > new_proposal_no_votes_1 && + proposal.yes_votes + new_proposal_no_votes_1 >= proposal.min_vote_threshold; + // post state + let post can_be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (post_proposal.yes_votes >= early_resolution_threshold || + post_proposal.no_votes >= early_resolution_threshold); + let post is_voting_closed = is_voting_period_over || can_be_resolved_early; + let post proposal_state_successed = is_voting_closed && post_proposal.yes_votes > post_proposal.no_votes && + post_proposal.yes_votes + post_proposal.no_votes >= proposal.min_vote_threshold; + // verify add_approved_script_hash(proposal_id) + let execution_hash = proposal.execution_hash; + let post post_approved_hashes = global(@aptos_framework); + + // Due to the complexity of the success state, the validation of 'borrow_global_mut(@aptos_framework);' is discussed in four cases. + aborts_if allow_validator_set_change && + if (should_pass) { + proposal_state_successed_0 && !exists(@aptos_framework) + } else { + proposal_state_successed_1 && !exists(@aptos_framework) + }; + aborts_if !allow_validator_set_change && + if (should_pass) { + proposal_state_successed_2 && !exists(@aptos_framework) + } else { + proposal_state_successed_3 && !exists(@aptos_framework) + }; + ensures proposal_state_successed ==> simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) && + simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == execution_hash; } spec add_approved_script_hash(proposal_id: u64) { use aptos_framework::chain_status; - // TODO: The variable `proposal_state` is the return value of the function `voting::get_proposal_state`. - // The calling level of `voting::get_proposal_state` is very deep, - // so the value of `proposal_state` cannot be obtained in the spec, - // and the `aborts_if` of `proposal_state` cannot be written. - // Can't cover all aborts_if conditions - pragma aborts_if_is_partial; requires chain_status::is_operating(); - aborts_if !exists(@aptos_framework); + include AddApprovedScriptHash; } spec add_approved_script_hash_script(proposal_id: u64) { - // TODO: Calls `add_approved_script_hash`. - // Can't cover all aborts_if conditions - pragma verify = false; + use aptos_framework::chain_status; + + requires chain_status::is_operating(); + include AddApprovedScriptHash; + } + + spec schema AddApprovedScriptHash { + proposal_id: u64; + aborts_if !exists(@aptos_framework); + + aborts_if !exists>(@aptos_framework); + let voting_forum = global>(@aptos_framework); + let proposal = table::spec_get(voting_forum.proposals, proposal_id); + aborts_if !table::spec_contains(voting_forum.proposals, proposal_id); + let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold); + aborts_if timestamp::now_seconds() <= proposal.expiration_secs && + (option::spec_is_none(proposal.early_resolution_vote_threshold) || + proposal.yes_votes < early_resolution_threshold && proposal.no_votes < early_resolution_threshold); + aborts_if (timestamp::now_seconds() > proposal.expiration_secs || + option::spec_is_some(proposal.early_resolution_vote_threshold) && (proposal.yes_votes >= early_resolution_threshold || + proposal.no_votes >= early_resolution_threshold)) && + (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold); + + let post post_approved_hashes = global(@aptos_framework); + ensures simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) && + simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == proposal.execution_hash; } /// Address @aptos_framework must exist ApprovedExecutionHashes and GovernanceProposal and GovernanceResponsbility. spec resolve(proposal_id: u64, signer_address: address): signer { use aptos_framework::chain_status; - // TODO: Executing the prove command gives an error that the target file is in `from_bcs::from_bytes`, - // and the call level of the function `resolve` is too deep to obtain the parameter `bytes` of spec `from_bytes`, - // so verification cannot be performed. - // Can't cover all aborts_if conditions - pragma aborts_if_is_partial; requires chain_status::is_operating(); - aborts_if !exists>(@aptos_framework); + + // verify voting::resolve + include VotingIsProposalResolvableAbortsif; + + let voting_forum = global>(@aptos_framework); + let proposal = table::spec_get(voting_forum.proposals, proposal_id); + + let multi_step_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY); + let has_multi_step_key = simple_map::spec_contains_key(proposal.metadata, multi_step_key); + let is_multi_step_proposal = aptos_std::from_bcs::deserialize(simple_map::spec_get(proposal.metadata, multi_step_key)); + aborts_if has_multi_step_key && !aptos_std::from_bcs::deserializable(simple_map::spec_get(proposal.metadata, multi_step_key)); + aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY); + aborts_if has_multi_step_key && is_multi_step_proposal; + + let post post_voting_forum = global>(@aptos_framework); + let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id); + ensures post_proposal.is_resolved == true && post_proposal.resolution_time_secs == timestamp::now_seconds(); + aborts_if option::spec_is_none(proposal.execution_content); + + // verify remove_approved_hash aborts_if !exists(@aptos_framework); + let post post_approved_hashes = global(@aptos_framework).hashes; + ensures !simple_map::spec_contains_key(post_approved_hashes, proposal_id); + + // verify get_signer include GetSignerAbortsIf; + let governance_responsibility = global(@aptos_framework); + let signer_cap = simple_map::spec_get(governance_responsibility.signer_caps, signer_address); + let addr = signer_cap.account; + ensures signer::address_of(result) == addr; } /// Address @aptos_framework must exist ApprovedExecutionHashes and GovernanceProposal. @@ -248,18 +430,15 @@ spec aptos_framework::aptos_governance { use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; use aptos_framework::transaction_fee; - use aptos_framework::staking_config; - - pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved) aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; - include staking_config::StakingRewardsConfigRequirement; requires chain_status::is_operating(); - requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time(); requires exists(@aptos_framework); requires exists>(@aptos_framework); + requires exists(@aptos_framework); + include staking_config::StakingRewardsConfigRequirement; } /// Signer address must be @core_resources. @@ -275,21 +454,31 @@ spec aptos_framework::aptos_governance { /// limit addition overflow. /// pool_address must exist in StakePool. spec get_voting_power(pool_address: address): u64 { - // TODO: `stake::get_current_epoch_voting_power` is called in the function, - // the call level is very deep, and `stake::get_stake` has multiple return values, - // and multiple return values cannot be obtained in the spec, - // so the overflow aborts_if of active + pending_active + pending_inactive cannot be written. - pragma aborts_if_is_partial; + include GetVotingPowerAbortsIf; + + let staking_config = global(@aptos_framework); + let allow_validator_set_change = staking_config.allow_validator_set_change; + let stake_pool_res = global(pool_address); + + ensures allow_validator_set_change ==> result == stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value; + ensures !allow_validator_set_change ==> if (stake::spec_is_current_epoch_validator(pool_address)) { + result == stake_pool_res.active.value + stake_pool_res.pending_inactive.value + } else { + result == 0 + }; + } + + spec schema GetVotingPowerAbortsIf { + pool_address: address; let staking_config = global(@aptos_framework); aborts_if !exists(@aptos_framework); let allow_validator_set_change = staking_config.allow_validator_set_change; - let stake_pool = global(pool_address); - aborts_if allow_validator_set_change && (stake_pool.active.value + stake_pool.pending_active.value + stake_pool.pending_inactive.value) > MAX_U64; + let stake_pool_res = global(pool_address); + aborts_if allow_validator_set_change && (stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value) > MAX_U64; aborts_if !exists(pool_address); aborts_if !allow_validator_set_change && !exists(@aptos_framework); - - ensures allow_validator_set_change ==> result == stake_pool.active.value + stake_pool.pending_active.value + stake_pool.pending_inactive.value; + aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(pool_address) && stake_pool_res.active.value + stake_pool_res.pending_inactive.value > MAX_U64; } spec get_signer(signer_address: address): signer { @@ -305,6 +494,13 @@ spec aptos_framework::aptos_governance { } spec create_proposal_metadata(metadata_location: vector, metadata_hash: vector): SimpleMap> { + include CreateProposalMetadataAbortsIf; + } + + spec schema CreateProposalMetadataAbortsIf { + metadata_location: vector; + metadata_hash: vector; + aborts_if string::length(utf8(metadata_location)) > 256; aborts_if string::length(utf8(metadata_hash)) > 256; aborts_if !string::spec_internal_check_utf8(metadata_location); @@ -325,20 +521,81 @@ spec aptos_framework::aptos_governance { spec resolve_multi_step_proposal(proposal_id: u64, signer_address: address, next_execution_hash: vector): signer { use aptos_framework::chain_status; + requires chain_status::is_operating(); - // TODO: Executing the prove command gives an error that the target file is in `voting::is_proposal_resolvable`, - // the level is too deep, it is difficult to obtain the value of `proposal_state`, - // so it cannot be verified. - // Can't cover all aborts_if conditions - pragma aborts_if_is_partial; - let voting_forum = borrow_global>(@aptos_framework); + // verify voting::resolve_proposal_v2 + include VotingIsProposalResolvableAbortsif; + + let voting_forum = global>(@aptos_framework); let proposal = table::spec_get(voting_forum.proposals, proposal_id); - requires chain_status::is_operating(); + let post post_voting_forum = global>(@aptos_framework); + let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id); + + aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY); + let multi_step_in_execution_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY); + let post is_multi_step_proposal_in_execution_value = simple_map::spec_get(post_proposal.metadata, multi_step_in_execution_key); + ensures simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==> + is_multi_step_proposal_in_execution_value == std::bcs::serialize(true); + + aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY); + let multi_step_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY); + aborts_if simple_map::spec_contains_key(proposal.metadata, multi_step_key) && + aptos_std::from_bcs::deserializable(simple_map::spec_get(proposal.metadata, multi_step_key)); + let is_multi_step = simple_map::spec_contains_key(proposal.metadata, multi_step_key) && + aptos_std::from_bcs::deserialize(simple_map::spec_get(proposal.metadata, multi_step_key)); + let next_execution_hash_is_empty = len(next_execution_hash) == 0; + aborts_if !is_multi_step && !next_execution_hash_is_empty; + aborts_if next_execution_hash_is_empty && is_multi_step && !simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key); // ? + ensures next_execution_hash_is_empty ==> post_proposal.is_resolved == true && post_proposal.resolution_time_secs == timestamp::spec_now_seconds() && + if (is_multi_step) { + is_multi_step_proposal_in_execution_value == std::bcs::serialize(false) + } else { + simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==> + is_multi_step_proposal_in_execution_value == std::bcs::serialize(true) + }; + ensures !next_execution_hash_is_empty ==> post_proposal.execution_hash == next_execution_hash && + simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==> + is_multi_step_proposal_in_execution_value == std::bcs::serialize(true); + + // verify remove_approved_hash + aborts_if next_execution_hash_is_empty && !exists(@aptos_framework); + let post post_approved_hashes = global(@aptos_framework).hashes; + ensures next_execution_hash_is_empty ==> !simple_map::spec_contains_key(post_approved_hashes, proposal_id); + ensures !next_execution_hash_is_empty ==> + simple_map::spec_get(post_approved_hashes, proposal_id) == next_execution_hash; + + // verify get_signer + include GetSignerAbortsIf; + let governance_responsibility = global(@aptos_framework); + let signer_cap = simple_map::spec_get(governance_responsibility.signer_caps, signer_address); + let addr = signer_cap.account; + ensures signer::address_of(result) == addr; + } + + spec schema VotingIsProposalResolvableAbortsif { + proposal_id: u64; + aborts_if !exists>(@aptos_framework); - aborts_if !exists(@aptos_framework); - aborts_if !table::spec_contains(voting_forum.proposals,proposal_id); - aborts_if !string::spec_internal_check_utf8(b"IS_MULTI_STEP_PROPOSAL_IN_EXECUTION"); + let voting_forum = global>(@aptos_framework); + let proposal = table::spec_get(voting_forum.proposals, proposal_id); + aborts_if !table::spec_contains(voting_forum.proposals, proposal_id); + let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold); + let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs; + let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (proposal.yes_votes >= early_resolution_threshold || + proposal.no_votes >= early_resolution_threshold); + let voting_closed = voting_period_over || be_resolved_early; + // If Voting Failed + aborts_if voting_closed && (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold); + // If Voting Pending + aborts_if !voting_closed; + + aborts_if proposal.is_resolved; + aborts_if !string::spec_internal_check_utf8(voting::RESOLVABLE_TIME_METADATA_KEY); + aborts_if !simple_map::spec_contains_key(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY)); + let resolvable_time = aptos_std::from_bcs::deserialize(simple_map::spec_get(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY))); + aborts_if !aptos_std::from_bcs::deserializable(simple_map::spec_get(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY))); + aborts_if timestamp::now_seconds() <= resolvable_time; aborts_if aptos_framework::transaction_context::spec_get_script_hash() != proposal.execution_hash; - include GetSignerAbortsIf; } } diff --git a/aptos-move/framework/aptos-framework/sources/code.move b/aptos-move/framework/aptos-framework/sources/code.move index 1e6c8a295aaf9..a2ae9c40af2ac 100644 --- a/aptos-move/framework/aptos-framework/sources/code.move +++ b/aptos-move/framework/aptos-framework/sources/code.move @@ -149,10 +149,9 @@ module aptos_framework::code { let packages = &mut borrow_global_mut(addr).packages; let len = vector::length(packages); let index = len; - let i = 0; let upgrade_number = 0; - while (i < len) { - let old = vector::borrow(packages, i); + vector::enumerate_ref(packages, |i, old| { + let old: &PackageMetadata = old; if (old.name == pack.name) { upgrade_number = old.upgrade_number + 1; check_upgradability(old, &pack, &module_names); @@ -160,8 +159,7 @@ module aptos_framework::code { } else { check_coexistence(old, &module_names) }; - i = i + 1; - }; + }); // Assign the upgrade counter. pack.upgrade_number = upgrade_number; @@ -201,30 +199,27 @@ module aptos_framework::code { assert!(can_change_upgrade_policy_to(old_pack.upgrade_policy, new_pack.upgrade_policy), error::invalid_argument(EUPGRADE_WEAKER_POLICY)); let old_modules = get_module_names(old_pack); - let i = 0; - while (i < vector::length(&old_modules)) { + + vector::for_each_ref(&old_modules, |old_module| { assert!( - vector::contains(new_modules, vector::borrow(&old_modules, i)), + vector::contains(new_modules, old_module), EMODULE_MISSING ); - i = i + 1; - } + }); } /// Checks whether a new package with given names can co-exist with old package. fun check_coexistence(old_pack: &PackageMetadata, new_modules: &vector) { // The modules introduced by each package must not overlap with `names`. - let i = 0; - while (i < vector::length(&old_pack.modules)) { - let old_mod = vector::borrow(&old_pack.modules, i); + vector::for_each_ref(&old_pack.modules, |old_mod| { + let old_mod: &ModuleMetadata = old_mod; let j = 0; while (j < vector::length(new_modules)) { let name = vector::borrow(new_modules, j); assert!(&old_mod.name != name, error::already_exists(EMODULE_NAME_CLASH)); j = j + 1; }; - i = i + 1; - } + }); } /// Check that the upgrade policies of all packages are equal or higher quality than this package. Also @@ -234,54 +229,47 @@ module aptos_framework::code { acquires PackageRegistry { let allowed_module_deps = vector::empty(); let deps = &pack.deps; - let i = 0; - let n = vector::length(deps); - while (i < n) { - let dep = vector::borrow(deps, i); + vector::for_each_ref(deps, |dep| { + let dep: &PackageDep = dep; assert!(exists(dep.account), error::not_found(EPACKAGE_DEP_MISSING)); if (is_policy_exempted_address(dep.account)) { // Allow all modules from this address, by using "" as a wildcard in the AllowedDep - let account = dep.account; + let account: address = dep.account; let module_name = string::utf8(b""); vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name }); - i = i + 1; - continue - }; - let registry = borrow_global(dep.account); - let j = 0; - let m = vector::length(®istry.packages); - let found = false; - while (j < m) { - let dep_pack = vector::borrow(®istry.packages, j); - if (dep_pack.name == dep.package_name) { - found = true; - // Check policy - assert!( - dep_pack.upgrade_policy.policy >= pack.upgrade_policy.policy, - error::invalid_argument(EDEP_WEAKER_POLICY) - ); - if (dep_pack.upgrade_policy == upgrade_policy_arbitrary()) { + } else { + let registry = borrow_global(dep.account); + let found = vector::any(®istry.packages, |dep_pack| { + let dep_pack: &PackageMetadata = dep_pack; + if (dep_pack.name == dep.package_name) { + // Check policy assert!( - dep.account == publish_address, - error::invalid_argument(EDEP_ARBITRARY_NOT_SAME_ADDRESS) - ) - }; - // Add allowed deps - let k = 0; - let r = vector::length(&dep_pack.modules); - while (k < r) { + dep_pack.upgrade_policy.policy >= pack.upgrade_policy.policy, + error::invalid_argument(EDEP_WEAKER_POLICY) + ); + if (dep_pack.upgrade_policy == upgrade_policy_arbitrary()) { + assert!( + dep.account == publish_address, + error::invalid_argument(EDEP_ARBITRARY_NOT_SAME_ADDRESS) + ) + }; + // Add allowed deps let account = dep.account; - let module_name = vector::borrow(&dep_pack.modules, k).name; - vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name }); - k = k + 1; - }; - break - }; - j = j + 1; + let k = 0; + let r = vector::length(&dep_pack.modules); + while (k < r) { + let module_name = vector::borrow(&dep_pack.modules, k).name; + vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name }); + k = k + 1; + }; + true + } else { + false + } + }); + assert!(found, error::not_found(EPACKAGE_DEP_MISSING)); }; - assert!(found, error::not_found(EPACKAGE_DEP_MISSING)); - i = i + 1; - }; + }); allowed_module_deps } @@ -296,11 +284,10 @@ module aptos_framework::code { /// Get the names of the modules in a package. fun get_module_names(pack: &PackageMetadata): vector { let module_names = vector::empty(); - let i = 0; - while (i < vector::length(&pack.modules)) { - vector::push_back(&mut module_names, vector::borrow(&pack.modules, i).name); - i = i + 1 - }; + vector::for_each_ref(&pack.modules, |pack_module| { + let pack_module: &ModuleMetadata = pack_module; + vector::push_back(&mut module_names, pack_module.name); + }); module_names } diff --git a/aptos-move/framework/aptos-framework/sources/code.spec.move b/aptos-move/framework/aptos-framework/sources/code.spec.move index 5e79a1ae6af4d..bcc76a388cc33 100644 --- a/aptos-move/framework/aptos-framework/sources/code.spec.move +++ b/aptos-move/framework/aptos-framework/sources/code.spec.move @@ -46,4 +46,11 @@ spec aptos_framework::code { // TODO: loop too deep. pragma verify = false; } + + spec get_module_names(pack: &PackageMetadata): vector { + pragma opaque; + aborts_if [abstract] false; + ensures [abstract] len(result) == len(pack.modules); + ensures [abstract] forall i in 0..len(result): result[i] == pack.modules[i].name; + } } diff --git a/aptos-move/framework/aptos-framework/sources/coin.move b/aptos-move/framework/aptos-framework/sources/coin.move index 7f7bcc2a734ef..d45751deeddb1 100644 --- a/aptos-move/framework/aptos-framework/sources/coin.move +++ b/aptos-move/framework/aptos-framework/sources/coin.move @@ -183,8 +183,13 @@ module aptos_framework::coin { }; let amount = aggregator::read(&coin.value); assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE)); - + spec { + update aggregate_supply = aggregate_supply - amount; + }; aggregator::sub(&mut coin.value, amount); + spec { + update supply = supply + amount; + }; Coin { value: (amount as u64), } @@ -192,8 +197,14 @@ module aptos_framework::coin { /// Merges `coin` into aggregatable coin (`dst_coin`). public(friend) fun merge_aggregatable_coin(dst_coin: &mut AggregatableCoin, coin: Coin) { + spec { + update supply = supply - coin.value; + }; let Coin { value } = coin; let amount = (value as u128); + spec { + update aggregate_supply = aggregate_supply + amount; + }; aggregator::add(&mut dst_coin.value, amount); } @@ -286,6 +297,9 @@ module aptos_framework::coin { coin: Coin, _cap: &BurnCapability, ) acquires CoinInfo { + spec { + update supply = supply - coin.value; + }; let Coin { value: amount } = coin; assert!(amount > 0, error::invalid_argument(EZERO_COIN_AMOUNT)); @@ -341,6 +355,9 @@ module aptos_framework::coin { /// so it is impossible to "burn" any non-zero amount of `Coin` without having /// a `BurnCapability` for the specific `CoinType`. public fun destroy_zero(zero_coin: Coin) { + spec { + update supply = supply - zero_coin.value; + }; let Coin { value } = zero_coin; assert!(value == 0, error::invalid_argument(EDESTRUCTION_OF_NONZERO_TOKEN)) } @@ -348,14 +365,26 @@ module aptos_framework::coin { /// Extracts `amount` from the passed-in `coin`, where the original token is modified in place. public fun extract(coin: &mut Coin, amount: u64): Coin { assert!(coin.value >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE)); + spec { + update supply = supply - amount; + }; coin.value = coin.value - amount; + spec { + update supply = supply + amount; + }; Coin { value: amount } } /// Extracts the entire amount from the passed-in `coin`, where the original token is modified in place. public fun extract_all(coin: &mut Coin): Coin { let total_value = coin.value; + spec { + update supply = supply - coin.value; + }; coin.value = 0; + spec { + update supply = supply + total_value; + }; Coin { value: total_value } } @@ -472,7 +501,13 @@ module aptos_framework::coin { spec { assume dst_coin.value + source_coin.value <= MAX_U64; }; + spec { + update supply = supply - source_coin.value; + }; let Coin { value } = source_coin; + spec { + update supply = supply + value; + }; dst_coin.value = dst_coin.value + value; } @@ -484,7 +519,9 @@ module aptos_framework::coin { _cap: &MintCapability, ): Coin acquires CoinInfo { if (amount == 0) { - return zero() + return Coin { + value: 0 + } }; let maybe_supply = &mut borrow_global_mut>(coin_address()).supply; @@ -492,7 +529,9 @@ module aptos_framework::coin { let supply = option::borrow_mut(maybe_supply); optional_aggregator::add(supply, (amount as u128)); }; - + spec { + update supply = supply + amount; + }; Coin { value: amount } } @@ -555,6 +594,9 @@ module aptos_framework::coin { /// Create a new `Coin` with a value of `0`. public fun zero(): Coin { + spec { + update supply = supply + 0; + }; Coin { value: 0 } diff --git a/aptos-move/framework/aptos-framework/sources/coin.spec.move b/aptos-move/framework/aptos-framework/sources/coin.spec.move index 410c44f7efee6..da2e8a308f38d 100644 --- a/aptos-move/framework/aptos-framework/sources/coin.spec.move +++ b/aptos-move/framework/aptos-framework/sources/coin.spec.move @@ -1,6 +1,37 @@ spec aptos_framework::coin { spec module { pragma verify = true; + global supply: num; + global aggregate_supply: num; + apply TotalSupplyTracked to * except + initialize, initialize_internal, initialize_with_parallelizable_supply; + apply TotalSupplyNoChange to * except mint, + burn, burn_from, initialize, initialize_internal, initialize_with_parallelizable_supply; + } + + spec fun spec_fun_supply_tracked(val: u64, supply: Option): bool { + option::spec_is_some(supply) ==> val == optional_aggregator::optional_aggregator_value + (option::spec_borrow(supply)) + } + + spec schema TotalSupplyTracked { + ensures old(spec_fun_supply_tracked(supply + aggregate_supply, + global>(type_info::type_of().account_address).supply)) ==> + spec_fun_supply_tracked(supply + aggregate_supply, + global>(type_info::type_of().account_address).supply); + } + + spec fun spec_fun_supply_no_change(old_supply: Option, + supply: Option): bool { + option::spec_is_some(old_supply) ==> optional_aggregator::optional_aggregator_value + (option::spec_borrow(old_supply)) == optional_aggregator::optional_aggregator_value + (option::spec_borrow(supply)) + } + + spec schema TotalSupplyNoChange { + let old_supply = global>(type_info::type_of().account_address).supply; + let post supply = global>(type_info::type_of().account_address).supply; + ensures spec_fun_supply_no_change(old_supply, supply); } spec AggregatableCoin { diff --git a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move index 0ba4a949e4174..ded0038308818 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move @@ -42,7 +42,7 @@ spec aptos_framework::gas_schedule { use aptos_framework::transaction_fee; use aptos_framework::staking_config; - pragma timeout = 100; + pragma verify_duration_estimate = 200; requires exists(@aptos_framework); requires exists>(@aptos_framework); diff --git a/aptos-move/framework/aptos-framework/sources/genesis.move b/aptos-move/framework/aptos-framework/sources/genesis.move index 20cbe67e0fae9..2d089cb9346df 100644 --- a/aptos-move/framework/aptos-framework/sources/genesis.move +++ b/aptos-move/framework/aptos-framework/sources/genesis.move @@ -158,12 +158,9 @@ module aptos_framework::genesis { } fun create_accounts(aptos_framework: &signer, accounts: vector) { - let i = 0; - let num_accounts = vector::length(&accounts); let unique_accounts = vector::empty(); - - while (i < num_accounts) { - let account_map = vector::borrow(&accounts, i); + vector::for_each_ref(&accounts, |account_map| { + let account_map: &AccountMap = account_map; assert!( !vector::contains(&unique_accounts, &account_map.account_address), error::already_exists(EDUPLICATE_ACCOUNT), @@ -175,9 +172,7 @@ module aptos_framework::genesis { account_map.account_address, account_map.balance, ); - - i = i + 1; - }; + }); } /// This creates an funds an account if it doesn't exist. @@ -198,13 +193,11 @@ module aptos_framework::genesis { employee_vesting_period_duration: u64, employees: vector, ) { - let i = 0; - let num_employee_groups = vector::length(&employees); let unique_accounts = vector::empty(); - while (i < num_employee_groups) { + vector::for_each_ref(&employees, |employee_group| { let j = 0; - let employee_group = vector::borrow(&employees, i); + let employee_group: &EmployeeAccountMap = employee_group; let num_employees_in_group = vector::length(&employee_group.accounts); let buy_ins = simple_map::create(); @@ -278,9 +271,7 @@ module aptos_framework::genesis { if (employee_group.validator.join_during_genesis) { initialize_validator(pool_address, validator); }; - - i = i + 1; - } + }); } fun create_initialize_validators_with_commission( @@ -288,14 +279,10 @@ module aptos_framework::genesis { use_staking_contract: bool, validators: vector, ) { - let i = 0; - let num_validators = vector::length(&validators); - while (i < num_validators) { - let validator = vector::borrow(&validators, i); + vector::for_each_ref(&validators, |validator| { + let validator: &ValidatorConfigurationWithCommission = validator; create_initialize_validator(aptos_framework, validator, use_staking_contract); - - i = i + 1; - }; + }); // Destroy the aptos framework account's ability to mint coins now that we're done with setting up the initial // validators. @@ -315,21 +302,15 @@ module aptos_framework::genesis { /// Network address fields are a vector per account, where each entry is a vector of addresses /// encoded in a single BCS byte array. fun create_initialize_validators(aptos_framework: &signer, validators: vector) { - let i = 0; - let num_validators = vector::length(&validators); - let validators_with_commission = vector::empty(); - - while (i < num_validators) { + vector::for_each_reverse(validators, |validator| { let validator_with_commission = ValidatorConfigurationWithCommission { - validator_config: vector::pop_back(&mut validators), + validator_config: validator, commission_percentage: 0, join_during_genesis: true, }; vector::push_back(&mut validators_with_commission, validator_with_commission); - - i = i + 1; - }; + }); create_initialize_validators_with_commission(aptos_framework, false, validators_with_commission); } diff --git a/aptos-move/framework/aptos-framework/sources/multisig_account.move b/aptos-move/framework/aptos-framework/sources/multisig_account.move index 92d170116e7b8..4ed285a232b95 100644 --- a/aptos-move/framework/aptos-framework/sources/multisig_account.move +++ b/aptos-move/framework/aptos-framework/sources/multisig_account.move @@ -91,6 +91,8 @@ module aptos_framework::multisig_account { const EDUPLICATE_METADATA_KEY: u64 = 16; /// The sequence number provided is invalid. It must be between [1, next pending transaction - 1]. const EINVALID_SEQUENCE_NUMBER: u64 = 17; + /// Provided owners to remove and new owners overlap. + const EOWNERS_TO_REMOVE_NEW_OWNERS_OVERLAP: u64 = 18; /// Represents a multisig account's configurations and transactions. /// This will be stored in the multisig account (created as a resource account separate from any owner accounts). @@ -448,6 +450,33 @@ module aptos_framework::multisig_account { ); } + /// Like `create_with_owners`, but removes the calling account after creation. + /// + /// This is for creating a vanity multisig account from a bootstrapping account that should not + /// be an owner after the vanity multisig address has been secured. + public entry fun create_with_owners_then_remove_bootstrapper( + bootstrapper: &signer, + owners: vector
, + num_signatures_required: u64, + metadata_keys: vector, + metadata_values: vector>, + ) acquires MultisigAccount { + let bootstrapper_address = address_of(bootstrapper); + create_with_owners( + bootstrapper, + owners, + num_signatures_required, + metadata_keys, + metadata_values + ); + update_owner_schema( + get_next_multisig_account_address(bootstrapper_address), + vector[], + vector[bootstrapper_address], + option::none() + ); + } + fun create_with_owners_internal( multisig_account: &signer, owners: vector
, @@ -502,22 +531,12 @@ module aptos_framework::multisig_account { /// maliciously alter the owners list. entry fun add_owners( multisig_account: &signer, new_owners: vector
) acquires MultisigAccount { - // Short circuit if new owners list is empty. - // This avoids emitting an event if no changes happen, which is confusing to off-chain components. - if (vector::length(&new_owners) == 0) { - return - }; - - let multisig_address = address_of(multisig_account); - assert_multisig_account_exists(multisig_address); - let multisig_account_resource = borrow_global_mut(multisig_address); - - vector::append(&mut multisig_account_resource.owners, new_owners); - // This will fail if an existing owner is added again. - validate_owners(&multisig_account_resource.owners, multisig_address); - emit_event(&mut multisig_account_resource.add_owners_events, AddOwnersEvent { - owners_added: new_owners, - }); + update_owner_schema( + address_of(multisig_account), + new_owners, + vector[], + option::none() + ); } /// Add owners then update number of signatures required, in a single operation. @@ -526,8 +545,12 @@ module aptos_framework::multisig_account { new_owners: vector
, new_num_signatures_required: u64 ) acquires MultisigAccount { - add_owners(multisig_account, new_owners); - update_signatures_required(multisig_account, new_num_signatures_required); + update_owner_schema( + address_of(multisig_account), + new_owners, + vector[], + option::some(new_num_signatures_required) + ); } /// Similar to remove_owners, but only allow removing one owner. @@ -545,46 +568,55 @@ module aptos_framework::multisig_account { /// maliciously alter the owners list. entry fun remove_owners( multisig_account: &signer, owners_to_remove: vector
) acquires MultisigAccount { - // Short circuit if the list of owners to remove is empty. - // This avoids emitting an event if no changes happen, which is confusing to off-chain components. - if (vector::length(&owners_to_remove) == 0) { - return - }; - - let multisig_address = address_of(multisig_account); - assert_multisig_account_exists(multisig_address); - let multisig_account_resource = borrow_global_mut(multisig_address); - - let owners = &mut multisig_account_resource.owners; - let owners_removed = vector::empty
(); - vector::for_each_ref(&owners_to_remove, |owner_to_remove| { - let owner_to_remove = *owner_to_remove; - let (found, index) = vector::index_of(owners, &owner_to_remove); - // Only remove an owner if they're present in the owners list. - if (found) { - vector::push_back(&mut owners_removed, owner_to_remove); - vector::swap_remove(owners, index); - }; - }); + update_owner_schema( + address_of(multisig_account), + vector[], + owners_to_remove, + option::none() + ); + } - // Make sure there's still at least as many owners as the number of signatures required. - // This also ensures that there's at least one owner left as signature threshold must be > 0. - assert!( - vector::length(owners) >= multisig_account_resource.num_signatures_required, - error::invalid_state(ENOT_ENOUGH_OWNERS), + /// Swap an owner in for an old one, without changing required signatures. + entry fun swap_owner( + multisig_account: &signer, + to_swap_in: address, + to_swap_out: address + ) acquires MultisigAccount { + update_owner_schema( + address_of(multisig_account), + vector[to_swap_in], + vector[to_swap_out], + option::none() ); + } - emit_event(&mut multisig_account_resource.remove_owners_events, RemoveOwnersEvent { owners_removed }); + /// Swap owners in and out, without changing required signatures. + entry fun swap_owners( + multisig_account: &signer, + to_swap_in: vector
, + to_swap_out: vector
+ ) acquires MultisigAccount { + update_owner_schema( + address_of(multisig_account), + to_swap_in, + to_swap_out, + option::none() + ); } - /// Update the number of signatures required then remove owners, in a single operation. - entry fun remove_owners_and_update_signatures_required( + /// Swap owners in and out, updating number of required signatures. + entry fun swap_owners_and_update_signatures_required( multisig_account: &signer, + new_owners: vector
, owners_to_remove: vector
, new_num_signatures_required: u64 ) acquires MultisigAccount { - update_signatures_required(multisig_account, new_num_signatures_required); - remove_owners(multisig_account, owners_to_remove); + update_owner_schema( + address_of(multisig_account), + new_owners, + owners_to_remove, + option::some(new_num_signatures_required) + ); } /// Update the number of signatures required to execute transaction in the specified multisig account. @@ -595,28 +627,11 @@ module aptos_framework::multisig_account { /// maliciously alter the number of signatures required. entry fun update_signatures_required( multisig_account: &signer, new_num_signatures_required: u64) acquires MultisigAccount { - let multisig_address = address_of(multisig_account); - assert_multisig_account_exists(multisig_address); - let multisig_account_resource = borrow_global_mut(multisig_address); - // Short-circuit if the new number of signatures required is the same as before. - // This avoids emitting an event. - if (multisig_account_resource.num_signatures_required == new_num_signatures_required) { - return - }; - let num_owners = vector::length(&multisig_account_resource.owners); - assert!( - new_num_signatures_required > 0 && new_num_signatures_required <= num_owners, - error::invalid_argument(EINVALID_SIGNATURES_REQUIRED), - ); - - let old_num_signatures_required = multisig_account_resource.num_signatures_required; - multisig_account_resource.num_signatures_required = new_num_signatures_required; - emit_event( - &mut multisig_account_resource.update_signature_required_events, - UpdateSignaturesRequiredEvent { - old_num_signatures_required, - new_num_signatures_required, - } + update_owner_schema( + address_of(multisig_account), + vector[], + vector[], + option::some(new_num_signatures_required) ); } @@ -958,6 +973,88 @@ module aptos_framework::multisig_account { assert!(exists(multisig_account), error::invalid_state(EACCOUNT_NOT_MULTISIG)); } + /// Add new owners, remove owners to remove, update signatures required. + fun update_owner_schema( + multisig_address: address, + new_owners: vector
, + owners_to_remove: vector
, + optional_new_num_signatures_required: Option, + ) acquires MultisigAccount { + assert_multisig_account_exists(multisig_address); + let multisig_account_ref_mut = + borrow_global_mut(multisig_address); + // Verify no overlap between new owners and owners to remove. + vector::for_each_ref(&new_owners, |new_owner_ref| { + assert!( + !vector::contains(&owners_to_remove, new_owner_ref), + error::invalid_argument(EOWNERS_TO_REMOVE_NEW_OWNERS_OVERLAP) + ) + }); + // If new owners provided, try to add them and emit an event. + if (vector::length(&new_owners) > 0) { + vector::append(&mut multisig_account_ref_mut.owners, new_owners); + validate_owners( + &multisig_account_ref_mut.owners, + multisig_address + ); + emit_event( + &mut multisig_account_ref_mut.add_owners_events, + AddOwnersEvent { owners_added: new_owners } + ); + }; + // If owners to remove provided, try to remove them. + if (vector::length(&owners_to_remove) > 0) { + let owners_ref_mut = &mut multisig_account_ref_mut.owners; + let owners_removed = vector[]; + vector::for_each_ref(&owners_to_remove, |owner_to_remove_ref| { + let (found, index) = + vector::index_of(owners_ref_mut, owner_to_remove_ref); + if (found) { + vector::push_back( + &mut owners_removed, + vector::swap_remove(owners_ref_mut, index) + ); + } + }); + // Only emit event if owner(s) actually removed. + if (vector::length(&owners_removed) > 0) { + emit_event( + &mut multisig_account_ref_mut.remove_owners_events, + RemoveOwnersEvent { owners_removed } + ); + } + }; + // If new signature count provided, try to update count. + if (option::is_some(&optional_new_num_signatures_required)) { + let new_num_signatures_required = + option::extract(&mut optional_new_num_signatures_required); + assert!( + new_num_signatures_required > 0, + error::invalid_argument(EINVALID_SIGNATURES_REQUIRED) + ); + let old_num_signatures_required = + multisig_account_ref_mut.num_signatures_required; + // Only apply update and emit event if a change indicated. + if (new_num_signatures_required != old_num_signatures_required) { + multisig_account_ref_mut.num_signatures_required = + new_num_signatures_required; + emit_event( + &mut multisig_account_ref_mut.update_signature_required_events, + UpdateSignaturesRequiredEvent { + old_num_signatures_required, + new_num_signatures_required, + } + ); + } + }; + // Verify number of owners. + let num_owners = vector::length(&multisig_account_ref_mut.owners); + assert!( + num_owners >= multisig_account_ref_mut.num_signatures_required, + error::invalid_state(ENOT_ENOUGH_OWNERS) + ); + } + ////////////////////////// Tests /////////////////////////////// #[test_only] @@ -1213,7 +1310,7 @@ module aptos_framework::multisig_account { } #[test(owner = @0x123)] - #[expected_failure(abort_code = 0x1000B, location = Self)] + #[expected_failure(abort_code = 0x30005, location = Self)] public entry fun test_update_with_too_many_signatures_required_should_fail( owner: &signer) acquires MultisigAccount { setup(); @@ -1629,4 +1726,36 @@ module aptos_framework::multisig_account { reject_transaction(owner_2, multisig_account, 1); execute_rejected_transaction(owner_3, multisig_account); } + + #[test( + owner_1 = @0x123, + owner_2 = @0x124, + owner_3 = @0x125 + )] + #[expected_failure(abort_code = 0x10012, location = Self)] + fun test_update_owner_schema_overlap_should_fail( + owner_1: &signer, + owner_2: &signer, + owner_3: &signer + ) acquires MultisigAccount { + setup(); + let owner_1_addr = address_of(owner_1); + let owner_2_addr = address_of(owner_2); + let owner_3_addr = address_of(owner_3); + create_account(owner_1_addr); + let multisig_address = get_next_multisig_account_address(owner_1_addr); + create_with_owners( + owner_1, + vector[owner_2_addr, owner_3_addr], + 2, + vector[], + vector[] + ); + update_owner_schema( + multisig_address, + vector[owner_1_addr], + vector[owner_1_addr], + option::none() + ); + } } diff --git a/aptos-move/framework/aptos-framework/sources/stake.move b/aptos-move/framework/aptos-framework/sources/stake.move index 2511fc960e23d..9ae0a75f25900 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.move +++ b/aptos-move/framework/aptos-framework/sources/stake.move @@ -1040,23 +1040,17 @@ module aptos_framework::stake { let validator_perf = borrow_global_mut(@aptos_framework); // Process pending stake and distribute transaction fees and rewards for each currently active validator. - let i = 0; - let len = vector::length(&validator_set.active_validators); - while (i < len) { - let validator = vector::borrow(&validator_set.active_validators, i); + vector::for_each_ref(&validator_set.active_validators, |validator| { + let validator: &ValidatorInfo = validator; update_stake_pool(validator_perf, validator.addr, &config); - i = i + 1; - }; + }); // Process pending stake and distribute transaction fees and rewards for each currently pending_inactive validator // (requested to leave but not removed yet). - let i = 0; - let len = vector::length(&validator_set.pending_inactive); - while (i < len) { - let validator = vector::borrow(&validator_set.pending_inactive, i); + vector::for_each_ref(&validator_set.pending_inactive, |validator| { + let validator: &ValidatorInfo = validator; update_stake_pool(validator_perf, validator.addr, &config); - i = i + 1; - }; + }); // Activate currently pending_active validators. append(&mut validator_set.active_validators, &mut validator_set.pending_active); @@ -2586,15 +2580,12 @@ module aptos_framework::stake { #[test_only] public fun set_validator_perf_at_least_one_block() acquires ValidatorPerformance { let validator_perf = borrow_global_mut(@aptos_framework); - let len = vector::length(&validator_perf.validators); - let i = 0; - while (i < len) { - let validator = vector::borrow_mut(&mut validator_perf.validators, i); + vector::for_each_mut(&mut validator_perf.validators, |validator|{ + let validator: &mut IndividualValidatorPerformance = validator; if (validator.successful_proposals + validator.failed_proposals < 1) { validator.successful_proposals = 1; }; - i = i + 1; - }; + }); } #[test(aptos_framework = @0x1, validator_1 = @0x123, validator_2 = @0x234)] diff --git a/aptos-move/framework/aptos-framework/sources/stake.spec.move b/aptos-move/framework/aptos-framework/sources/stake.spec.move index 1e59cd3968458..aa117251a0bb2 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.spec.move +++ b/aptos-move/framework/aptos-framework/sources/stake.spec.move @@ -27,6 +27,13 @@ spec aptos_framework::stake { // Function specifications // ----------------------- + spec initialize_validator_fees(aptos_framework: &signer) { + let aptos_addr = signer::address_of(aptos_framework); + aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); + aborts_if exists(aptos_addr); + ensures exists(aptos_addr); + } + // `Validator` is initialized once. spec initialize(aptos_framework: &signer) { let aptos_addr = signer::address_of(aptos_framework); diff --git a/aptos-move/framework/aptos-framework/sources/staking_contract.move b/aptos-move/framework/aptos-framework/sources/staking_contract.move index d22841dbac877..9aee053363268 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_contract.move +++ b/aptos-move/framework/aptos-framework/sources/staking_contract.move @@ -703,10 +703,8 @@ module aptos_framework::staking_contract { // Charge all stakeholders (except for the operator themselves) commission on any rewards earnt relatively to the // previous value of the distribution pool. let shareholders = &pool_u64::shareholders(distribution_pool); - let len = vector::length(shareholders); - let i = 0; - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); + vector::for_each_ref(shareholders, |shareholder| { + let shareholder: address = *shareholder; if (shareholder != operator) { let shares = pool_u64::shares(distribution_pool, shareholder); let previous_worth = pool_u64::balance(distribution_pool, shareholder); @@ -719,9 +717,7 @@ module aptos_framework::staking_contract { distribution_pool, unpaid_commission, updated_total_coins); pool_u64::transfer_shares(distribution_pool, shareholder, operator, shares_to_transfer); }; - - i = i + 1; - }; + }); pool_u64::update_total_coins(distribution_pool, updated_total_coins); } diff --git a/aptos-move/framework/aptos-framework/sources/staking_proxy.move b/aptos-move/framework/aptos-framework/sources/staking_proxy.move index 3ac513bfa7df8..26d1aa33372ce 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_proxy.move +++ b/aptos-move/framework/aptos-framework/sources/staking_proxy.move @@ -21,16 +21,13 @@ module aptos_framework::staking_proxy { public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address) { let owner_address = signer::address_of(owner); let vesting_contracts = &vesting::vesting_contracts(owner_address); - let i = 0; - let len = vector::length(vesting_contracts); - while (i < len) { - let vesting_contract = *vector::borrow(vesting_contracts, i); + vector::for_each_ref(vesting_contracts, |vesting_contract| { + let vesting_contract = *vesting_contract; if (vesting::operator(vesting_contract) == old_operator) { let current_commission_percentage = vesting::operator_commission_percentage(vesting_contract); vesting::update_operator(owner, vesting_contract, new_operator, current_commission_percentage); }; - i = i + 1; - } + }); } public entry fun set_staking_contract_operator(owner: &signer, old_operator: address, new_operator: address) { @@ -51,15 +48,12 @@ module aptos_framework::staking_proxy { public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address) { let owner_address = signer::address_of(owner); let vesting_contracts = &vesting::vesting_contracts(owner_address); - let i = 0; - let len = vector::length(vesting_contracts); - while (i < len) { - let vesting_contract = *vector::borrow(vesting_contracts, i); + vector::for_each_ref(vesting_contracts, |vesting_contract| { + let vesting_contract = *vesting_contract; if (vesting::operator(vesting_contract) == operator) { vesting::update_voter(owner, vesting_contract, new_voter); }; - i = i + 1; - } + }); } public entry fun set_staking_contract_voter(owner: &signer, operator: address, new_voter: address) { diff --git a/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move b/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move index 34716da641497..334f046e1de14 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move @@ -12,13 +12,41 @@ spec aptos_framework::transaction_fee { } spec initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8) { - // TODO: monomorphization issue. duplicated boogie procedures. - pragma verify=false; + use std::signer; + use aptos_framework::stake::ValidatorFees; + use aptos_framework::aggregator_factory; + use aptos_framework::system_addresses; + + aborts_if exists(@aptos_framework); + aborts_if burn_percentage > 100; + + let aptos_addr = signer::address_of(aptos_framework); + aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); + aborts_if exists(aptos_addr); + + include system_addresses::AbortsIfNotAptosFramework {account: aptos_framework}; + include aggregator_factory::CreateAggregatorInternalAbortsIf; + + ensures exists(aptos_addr); } spec upgrade_burn_percentage(aptos_framework: &signer, new_burn_percentage: u8) { - // TODO: missing aborts_if spec - pragma verify=false; + use std::signer; + use aptos_framework::coin::CoinInfo; + use aptos_framework::aptos_coin::AptosCoin; + // Percentage validation + aborts_if new_burn_percentage > 100; + // Signer validation + let aptos_addr = signer::address_of(aptos_framework); + aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); + // Requirements of `process_collected_fees` + requires exists(@aptos_framework); + requires exists(@aptos_framework); + requires exists>(@aptos_framework); + include RequiresCollectedFeesPerValueLeqBlockAptosSupply; + // The effect of upgrading the burn percentage + ensures exists(@aptos_framework) ==> + global(@aptos_framework).burn_percentage == new_burn_percentage; } spec register_proposer_for_fee_collection(proposer_addr: address) { diff --git a/aptos-move/framework/aptos-framework/sources/vesting.move b/aptos-move/framework/aptos-framework/sources/vesting.move index 0e6231274b2c8..18ae2c2675500 100644 --- a/aptos-move/framework/aptos-framework/sources/vesting.move +++ b/aptos-move/framework/aptos-framework/sources/vesting.move @@ -401,18 +401,17 @@ module aptos_framework::vesting { return shareholder_or_beneficiary }; let vesting_contract = borrow_global(vesting_contract_address); - let i = 0; - let len = vector::length(shareholders); - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); - // This will still return the shareholder if shareholder == beneficiary. - if (shareholder_or_beneficiary == get_beneficiary(vesting_contract, shareholder)) { - return shareholder - }; - i = i + 1; - }; + let result = @0x0; + vector::any(shareholders, |shareholder| { + if (shareholder_or_beneficiary == get_beneficiary(vesting_contract, *shareholder)) { + result = *shareholder; + true + } else { + false + } + }); - @0x0 + result } /// Create a vesting schedule with the given schedule of distributions, a vesting start time and period duration. @@ -464,22 +463,18 @@ module aptos_framework::vesting { let grant = coin::zero(); let grant_amount = 0; let grant_pool = pool_u64::create(MAXIMUM_SHAREHOLDERS); - let len = vector::length(shareholders); - let i = 0; - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); + vector::for_each_ref(shareholders, |shareholder| { + let shareholder: address = *shareholder; let (_, buy_in) = simple_map::remove(&mut buy_ins, &shareholder); let buy_in_amount = coin::value(&buy_in); coin::merge(&mut grant, buy_in); pool_u64::buy_in( &mut grant_pool, - *vector::borrow(shareholders, i), + shareholder, buy_in_amount, ); grant_amount = grant_amount + buy_in_amount; - - i = i + 1; - }; + }); assert!(grant_amount > 0, error::invalid_argument(EZERO_GRANT)); // If this is the first time this admin account has created a vesting contract, initialize the admin store. @@ -553,12 +548,10 @@ module aptos_framework::vesting { assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION)); - let i = 0; - while (i < len) { - let contract_address = *vector::borrow(&contract_addresses, i); + vector::for_each_ref(&contract_addresses, |contract_address| { + let contract_address: address = *contract_address; unlock_rewards(contract_address); - i = i + 1; - }; + }); } /// Unlock any vested portion of the grant. @@ -620,12 +613,10 @@ module aptos_framework::vesting { assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION)); - let i = 0; - while (i < len) { - let contract_address = *vector::borrow(&contract_addresses, i); + vector::for_each_ref(&contract_addresses, |contract_address| { + let contract_address = *contract_address; vest(contract_address); - i = i + 1; - }; + }); } /// Distribute any withdrawable stake from the stake pool. @@ -643,18 +634,14 @@ module aptos_framework::vesting { // Distribute coins to all shareholders in the vesting contract. let grant_pool = &vesting_contract.grant_pool; let shareholders = &pool_u64::shareholders(grant_pool); - let len = vector::length(shareholders); - let i = 0; - while (i < len) { - let shareholder = *vector::borrow(shareholders, i); + vector::for_each_ref(shareholders, |shareholder| { + let shareholder = *shareholder; let shares = pool_u64::shares(grant_pool, shareholder); let amount = pool_u64::shares_to_amount_with_total_coins(grant_pool, shares, total_distribution_amount); let share_of_coins = coin::extract(&mut coins, amount); let recipient_address = get_beneficiary(vesting_contract, shareholder); aptos_account::deposit_coins(recipient_address, share_of_coins); - - i = i + 1; - }; + }); // Send any remaining "dust" (leftover due to rounding error) to the withdrawal address. if (coin::value(&coins) > 0) { @@ -679,12 +666,10 @@ module aptos_framework::vesting { assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION)); - let i = 0; - while (i < len) { - let contract_address = *vector::borrow(&contract_addresses, i); + vector::for_each_ref(&contract_addresses, |contract_address| { + let contract_address = *contract_address; distribute(contract_address); - i = i + 1; - }; + }); } /// Terminate the vesting contract and send all funds back to the withdrawal address. @@ -1017,15 +1002,12 @@ module aptos_framework::vesting { stake::initialize_for_test_custom(aptos_framework, MIN_STAKE, GRANT_AMOUNT * 10, 3600, true, 10, 10000, 1000000); - let len = vector::length(accounts); - let i = 0; - while (i < len) { - let addr = *vector::borrow(accounts, i); + vector::for_each_ref(accounts, |addr| { + let addr: address = *addr; if (!account::exists_at(addr)) { create_account(addr); }; - i = i + 1; - }; + }); } #[test_only] @@ -1058,13 +1040,9 @@ module aptos_framework::vesting { vesting_denominator: u64, ): address acquires AdminStore { let schedule = vector::empty(); - let i = 0; - let len = vector::length(vesting_numerators); - while (i < len) { - let num = *vector::borrow(vesting_numerators, i); - vector::push_back(&mut schedule, fixed_point32::create_from_rational(num, vesting_denominator)); - i = i + 1; - }; + vector::for_each_ref(vesting_numerators, |num| { + vector::push_back(&mut schedule, fixed_point32::create_from_rational(*num, vesting_denominator)); + }); let vesting_schedule = create_vesting_schedule( schedule, timestamp::now_seconds() + VESTING_SCHEDULE_CLIFF, @@ -1073,13 +1051,10 @@ module aptos_framework::vesting { let admin_address = signer::address_of(admin); let buy_ins = simple_map::create>(); - let i = 0; - let len = vector::length(shares); - while (i < len) { + vector::enumerate_ref(shares, |i, share| { let shareholder = *vector::borrow(shareholders, i); - simple_map::add(&mut buy_ins, shareholder, stake::mint_coins(*vector::borrow(shares, i))); - i = i + 1; - }; + simple_map::add(&mut buy_ins, shareholder, stake::mint_coins(*share)); + }); create_vesting_contract( admin, diff --git a/aptos-move/framework/aptos-framework/sources/voting.spec.move b/aptos-move/framework/aptos-framework/sources/voting.spec.move index f887b3c7250a8..0f523f8b083ad 100644 --- a/aptos-move/framework/aptos-framework/sources/voting.spec.move +++ b/aptos-move/framework/aptos-framework/sources/voting.spec.move @@ -36,11 +36,11 @@ spec aptos_framework::voting { include CreateProposalAbortsIf{is_multi_step_proposal: false}; } - /// The min_vote_threshold lower thanearly_resolution_vote_threshold. - /// Make sure the execution script's hash is not empty. - /// VotingForum existed under the voting_forum_address. - /// The next_proposal_id in VotingForum is up to MAX_U64. - /// CurrentTimeMicroseconds existed under the @aptos_framework. + // The min_vote_threshold lower thanearly_resolution_vote_threshold. + // Make sure the execution script's hash is not empty. + // VotingForum existed under the voting_forum_address. + // The next_proposal_id in VotingForum is up to MAX_U64. + // CurrentTimeMicroseconds existed under the @aptos_framework. spec create_proposal_v2( proposer: address, voting_forum_address: address, @@ -90,7 +90,8 @@ spec aptos_framework::voting { should_pass: bool, ) { use aptos_framework::chain_status; - requires chain_status::is_operating(); // Ensures existence of Timestamp + // Ensures existence of Timestamp + requires chain_status::is_operating(); aborts_if !exists>(voting_forum_address); let voting_forum = global>(voting_forum_address); @@ -114,14 +115,31 @@ spec aptos_framework::voting { voting_forum_address: address, proposal_id: u64, ) { - use aptos_framework::chain_status; - requires chain_status::is_operating(); // Ensures existence of Timestamp + use aptos_framework::chain_status; + // Ensures existence of Timestamp + requires chain_status::is_operating(); + include AbortsIfNotContainProposalID; - // If the proposal is not resolvable, this function aborts. + let voting_forum = global>(voting_forum_address); + let proposal = table::spec_get(voting_forum.proposals, proposal_id); + let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold); + let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs; + let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (proposal.yes_votes >= early_resolution_threshold || + proposal.no_votes >= early_resolution_threshold); + let voting_closed = voting_period_over || be_resolved_early; + // Avoid Overflow + aborts_if voting_closed && (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold); + // Resolvable_time Properties + aborts_if !voting_closed; - // TODO: Find a way to specify when it will abort. The opaque with spec fun doesn't work. - pragma aborts_if_is_strict = false; + aborts_if proposal.is_resolved; + aborts_if !std::string::spec_internal_check_utf8(RESOLVABLE_TIME_METADATA_KEY); + aborts_if !simple_map::spec_contains_key(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY)); + aborts_if !from_bcs::deserializable(simple_map::spec_get(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY))); + aborts_if timestamp::spec_now_seconds() <= from_bcs::deserialize(simple_map::spec_get(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY))); + aborts_if transaction_context::spec_get_script_hash() != proposal.execution_hash; } spec resolve( @@ -129,7 +147,8 @@ spec aptos_framework::voting { proposal_id: u64, ): ProposalType { use aptos_framework::chain_status; - requires chain_status::is_operating(); // Ensures existence of Timestamp + // Ensures existence of Timestamp + requires chain_status::is_operating(); pragma aborts_if_is_partial; include AbortsIfNotContainProposalID; @@ -142,7 +161,8 @@ spec aptos_framework::voting { next_execution_hash: vector, ) { use aptos_framework::chain_status; - requires chain_status::is_operating(); // Ensures existence of Timestamp + // Ensures existence of Timestamp + requires chain_status::is_operating(); pragma aborts_if_is_partial; include AbortsIfNotContainProposalID; @@ -156,7 +176,8 @@ spec aptos_framework::voting { spec is_voting_closed(voting_forum_address: address, proposal_id: u64): bool { use aptos_framework::chain_status; - requires chain_status::is_operating(); // Ensures existence of Timestamp + // Ensures existence of Timestamp + requires chain_status::is_operating(); include AbortsIfNotContainProposalID; } @@ -164,16 +185,42 @@ spec aptos_framework::voting { aborts_if false; } + spec fun spec_get_proposal_state( + voting_forum_address: address, + proposal_id: u64, + ): u64; + spec get_proposal_state( voting_forum_address: address, proposal_id: u64, ): u64 { + use aptos_framework::chain_status; - requires chain_status::is_operating(); // Ensures existence of Timestamp - // Addition of yes_votes and no_votes might overflow. + pragma addition_overflow_unchecked; + // Ensures existence of Timestamp + requires chain_status::is_operating(); + include AbortsIfNotContainProposalID; - // Any way to specify the result? + + let voting_forum = global>(voting_forum_address); + let proposal = table::spec_get(voting_forum.proposals, proposal_id); + let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold); + let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs; + let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) && + (proposal.yes_votes >= early_resolution_threshold || + proposal.no_votes >= early_resolution_threshold); + let voting_closed = voting_period_over || be_resolved_early; + // Voting Succeeded or Failed + ensures voting_closed ==> if (proposal.yes_votes > proposal.no_votes && proposal.yes_votes + proposal.no_votes >= proposal.min_vote_threshold) { + result == PROPOSAL_STATE_SUCCEEDED + } else { + result == PROPOSAL_STATE_FAILED + }; + + // Voting is Pending + ensures !voting_closed ==> result == PROPOSAL_STATE_PENDING; + } spec get_proposal_creation_secs( @@ -255,4 +302,5 @@ spec aptos_framework::voting { requires chain_status::is_operating(); aborts_if false; } + } diff --git a/aptos-move/framework/aptos-stdlib/doc/big_vector.md b/aptos-move/framework/aptos-stdlib/doc/big_vector.md index b378efa9e92af..a92da6cf7e232 100644 --- a/aptos-move/framework/aptos-stdlib/doc/big_vector.md +++ b/aptos-move/framework/aptos-stdlib/doc/big_vector.md @@ -557,17 +557,13 @@ Disclaimer: This function is costly. Use it at your own discretion. while (num_buckets_left > 0) { let pop_bucket = table_with_length::remove(&mut v.buckets, num_buckets_left - 1); - let pop_bucket_length = vector::length(&pop_bucket); - let i = 0; - while(i < pop_bucket_length){ - vector::push_back(&mut push_bucket, vector::pop_back(&mut pop_bucket)); + vector::for_each_reverse(pop_bucket, |val| { + vector::push_back(&mut push_bucket, val); if (vector::length(&push_bucket) == v.bucket_size) { vector::push_back(&mut new_buckets, push_bucket); push_bucket = vector[]; }; - i = i + 1; - }; - vector::destroy_empty(pop_bucket); + }); num_buckets_left = num_buckets_left - 1; }; diff --git a/aptos-move/framework/aptos-stdlib/doc/from_bcs.md b/aptos-move/framework/aptos-stdlib/doc/from_bcs.md index adb0222db0dd3..5dc4812f0bfef 100644 --- a/aptos-move/framework/aptos-stdlib/doc/from_bcs.md +++ b/aptos-move/framework/aptos-stdlib/doc/from_bcs.md @@ -337,6 +337,10 @@ owned. fun deserializable<T>(bytes: vector<u8>): bool; axiom<T> forall b1: vector<u8>, b2: vector<u8>: (deserialize<T>(b1) == deserialize<T>(b2) ==> b1 == b2); +axiom<T> forall b1: vector<u8>, b2: vector<u8>: + ( b1 == b2 ==> deserializable<T>(b1) == deserializable<T>(b2) ); +axiom<T> forall b1: vector<u8>, b2: vector<u8>: + ( b1 == b2 ==> deserialize<T>(b1) == deserialize<T>(b2) );
diff --git a/aptos-move/framework/aptos-stdlib/doc/math128.md b/aptos-move/framework/aptos-stdlib/doc/math128.md index 14734913c9480..d6a83377c3331 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math128.md +++ b/aptos-move/framework/aptos-stdlib/doc/math128.md @@ -421,7 +421,8 @@ For functions that approximate a value it's useful to test a value is close to the most correct value up to last digit -
fun assert_approx_the_same(x: u128, y: u128, precission: u128)
+
#[testonly]
+fun assert_approx_the_same(x: u128, y: u128, precission: u128)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/math64.md b/aptos-move/framework/aptos-stdlib/doc/math64.md index 2cc3dafb66e28..3e8d628be3318 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math64.md +++ b/aptos-move/framework/aptos-stdlib/doc/math64.md @@ -376,7 +376,8 @@ For functions that approximate a value it's useful to test a value is close to the most correct value up to last digit -
fun assert_approx_the_same(x: u128, y: u128, precission: u64)
+
#[testonly]
+fun assert_approx_the_same(x: u128, y: u128, precission: u64)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/math_fixed.md b/aptos-move/framework/aptos-stdlib/doc/math_fixed.md index 3b877662f9f71..2fc1bb6fe70dc 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math_fixed.md +++ b/aptos-move/framework/aptos-stdlib/doc/math_fixed.md @@ -304,7 +304,8 @@ For functions that approximate a value it's useful to test a value is close to the most correct value up to last digit -
fun assert_approx_the_same(x: u128, y: u128, precission: u128)
+
#[testonly]
+fun assert_approx_the_same(x: u128, y: u128, precission: u128)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md b/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md index 7a250af288eb7..cd972b7019ca1 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md +++ b/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md @@ -299,7 +299,8 @@ For functions that approximate a value it's useful to test a value is close to the most correct value up to last digit -
fun assert_approx_the_same(x: u256, y: u256, precission: u128)
+
#[testonly]
+fun assert_approx_the_same(x: u256, y: u256, precission: u128)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/simple_map.md b/aptos-move/framework/aptos-stdlib/doc/simple_map.md index fdbd00ab5e0d6..876f4238ae9ad 100644 --- a/aptos-move/framework/aptos-stdlib/doc/simple_map.md +++ b/aptos-move/framework/aptos-stdlib/doc/simple_map.md @@ -15,13 +15,18 @@ This module provides a solution for sorted maps, that is it has the properties t - [Struct `Element`](#0x1_simple_map_Element) - [Constants](#@Constants_0) - [Function `length`](#0x1_simple_map_length) +- [Function `new`](#0x1_simple_map_new) +- [Function `new_from`](#0x1_simple_map_new_from) - [Function `create`](#0x1_simple_map_create) - [Function `borrow`](#0x1_simple_map_borrow) - [Function `borrow_mut`](#0x1_simple_map_borrow_mut) - [Function `contains_key`](#0x1_simple_map_contains_key) - [Function `destroy_empty`](#0x1_simple_map_destroy_empty) - [Function `add`](#0x1_simple_map_add) +- [Function `add_all`](#0x1_simple_map_add_all) - [Function `upsert`](#0x1_simple_map_upsert) +- [Function `keys`](#0x1_simple_map_keys) +- [Function `values`](#0x1_simple_map_values) - [Function `to_vec_pair`](#0x1_simple_map_to_vec_pair) - [Function `destroy`](#0x1_simple_map_destroy) - [Function `remove`](#0x1_simple_map_remove) @@ -29,13 +34,18 @@ This module provides a solution for sorted maps, that is it has the properties t - [Specification](#@Specification_1) - [Struct `SimpleMap`](#@Specification_1_SimpleMap) - [Function `length`](#@Specification_1_length) + - [Function `new`](#@Specification_1_new) + - [Function `new_from`](#@Specification_1_new_from) - [Function `create`](#@Specification_1_create) - [Function `borrow`](#@Specification_1_borrow) - [Function `borrow_mut`](#@Specification_1_borrow_mut) - [Function `contains_key`](#@Specification_1_contains_key) - [Function `destroy_empty`](#@Specification_1_destroy_empty) - [Function `add`](#@Specification_1_add) + - [Function `add_all`](#@Specification_1_add_all) - [Function `upsert`](#@Specification_1_upsert) + - [Function `keys`](#@Specification_1_keys) + - [Function `values`](#@Specification_1_values) - [Function `to_vec_pair`](#@Specification_1_to_vec_pair) - [Function `remove`](#@Specification_1_remove) - [Function `find`](#@Specification_1_find) @@ -133,6 +143,16 @@ Map key is not found + + +Lengths of keys and values do not match + + +
const EMISMATCHED_LENGTHS: u64 = 3;
+
+ + + ## Function `length` @@ -157,13 +177,14 @@ Map key is not found - + -## Function `create` +## Function `new` +Create an empty SimpleMap. -
public fun create<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
public fun new<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
 
@@ -172,7 +193,7 @@ Map key is not found Implementation -
public fun create<Key: store, Value: store>(): SimpleMap<Key, Value> {
+
public fun new<Key: store, Value: store>(): SimpleMap<Key, Value> {
     SimpleMap {
         data: vector::empty(),
     }
@@ -181,6 +202,63 @@ Map key is not found
 
 
 
+
+
+
+
+## Function `new_from`
+
+Create a SimpleMap from a vector of keys and values. The keys must be unique.
+
+
+
public fun new_from<Key: store, Value: store>(keys: vector<Key>, values: vector<Value>): simple_map::SimpleMap<Key, Value>
+
+ + + +
+Implementation + + +
public fun new_from<Key: store, Value: store>(
+    keys: vector<Key>,
+    values: vector<Value>,
+): SimpleMap<Key, Value> {
+    let map = new();
+    add_all(&mut map, keys, values);
+    map
+}
+
+ + + +
+ + + +## Function `create` + +Create an empty SimpleMap. +This function is deprecated, use new instead. + + +
#[deprecated]
+public fun create<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
+ + + +
+Implementation + + +
public fun create<Key: store, Value: store>(): SimpleMap<Key, Value> {
+    new()
+}
+
+ + +
@@ -300,6 +378,7 @@ Map key is not found ## Function `add` +Add a key/value pair to the map. The key must not already exist.
public fun add<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
@@ -325,6 +404,38 @@ Map key is not found
 
 
 
+
+
+
+
+## Function `add_all`
+
+Add multiple key/value pairs to the map. The keys must not already exist.
+
+
+
public fun add_all<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
+ + + +
+Implementation + + +
public fun add_all<Key: store, Value: store>(
+    map: &mut SimpleMap<Key, Value>,
+    keys: vector<Key>,
+    values: vector<Value>,
+) {
+    assert!(vector::length(&keys) == vector::length(&values), error::invalid_argument(EMISMATCHED_LENGTHS));
+    vector::zip(keys, values, |key, value| {
+        add(map, key, value);
+    });
+}
+
+ + +
@@ -368,6 +479,62 @@ Insert key/value pair or update an existing key to a new value + + + + +## Function `keys` + +Return all keys in the map. This requires keys to be copyable. + + +
public fun keys<Key: copy, Value>(map: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
+ + + +
+Implementation + + +
public fun keys<Key: copy, Value>(map: &SimpleMap<Key, Value>): vector<Key> {
+    vector::map_ref(&map.data, |e| {
+        let e: &Element<Key, Value> = e;
+        e.key
+    })
+}
+
+ + + +
+ + + +## Function `values` + +Return all values in the map. This requires values to be copyable. + + +
public fun values<Key, Value: copy>(map: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
+ + + +
+Implementation + + +
public fun values<Key, Value: copy>(map: &SimpleMap<Key, Value>): vector<Value> {
+    vector::map_ref(&map.data, |e| {
+        let e: &Element<Key, Value> = e;
+        e.value
+    })
+}
+
+ + +
@@ -437,6 +604,7 @@ using lambdas to destroy the individual keys and values. ## Function `remove` +Remove a key/value pair from the map. The key must exist.
public fun remove<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
@@ -505,6 +673,51 @@ using lambdas to destroy the individual keys and values.
 ## Specification
 
 
+
+
+
+
+
native fun spec_len<K, V>(t: SimpleMap<K, V>): num;
+
+ + + + + + + +
native fun spec_contains_key<K, V>(t: SimpleMap<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: SimpleMap<K, V>, k: K, v: V): SimpleMap<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: SimpleMap<K, V>, k: K): SimpleMap<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: SimpleMap<K, V>, k: K): V;
+
+ + + ### Struct `SimpleMap` @@ -555,6 +768,38 @@ using lambdas to destroy the individual keys and values. +
pragma intrinsic;
+
+ + + + + +### Function `new` + + +
public fun new<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `new_from` + + +
public fun new_from<Key: store, Value: store>(keys: vector<Key>, values: vector<Value>): simple_map::SimpleMap<Key, Value>
+
+ + + +
pragma intrinsic;
 
@@ -565,7 +810,8 @@ using lambdas to destroy the individual keys and values. ### Function `create` -
public fun create<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
#[deprecated]
+public fun create<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
 
@@ -651,6 +897,22 @@ using lambdas to destroy the individual keys and values. +
pragma intrinsic;
+
+ + + + + +### Function `add_all` + + +
public fun add_all<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
+ + + +
pragma intrinsic;
 
@@ -679,47 +941,34 @@ using lambdas to destroy the individual keys and values. + - +### Function `keys` -
native fun spec_len<K, V>(t: SimpleMap<K, V>): num;
+
public fun keys<Key: copy, Value>(map: &simple_map::SimpleMap<Key, Value>): vector<Key>
 
- - - -
native fun spec_contains_key<K, V>(t: SimpleMap<K, V>, k: K): bool;
-
- - - - - - - -
native fun spec_set<K, V>(t: SimpleMap<K, V>, k: K, v: V): SimpleMap<K, V>;
+
pragma verify=false;
 
+ - +### Function `values` -
native fun spec_remove<K, V>(t: SimpleMap<K, V>, k: K): SimpleMap<K, V>;
+
public fun values<Key, Value: copy>(map: &simple_map::SimpleMap<Key, Value>): vector<Value>
 
- - - -
native fun spec_get<K, V>(t: SimpleMap<K, V>, k: K): V;
+
pragma verify=false;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/smart_vector.md b/aptos-move/framework/aptos-stdlib/doc/smart_vector.md index c4ae93ff04641..9ed0d1bcf4cd9 100644 --- a/aptos-move/framework/aptos-stdlib/doc/smart_vector.md +++ b/aptos-move/framework/aptos-stdlib/doc/smart_vector.md @@ -7,6 +7,7 @@ - [Struct `SmartVector`](#0x1_smart_vector_SmartVector) - [Constants](#@Constants_0) +- [Function `new`](#0x1_smart_vector_new) - [Function `empty`](#0x1_smart_vector_empty) - [Function `empty_with_config`](#0x1_smart_vector_empty_with_config) - [Function `singleton`](#0x1_smart_vector_singleton) @@ -31,6 +32,7 @@ - [Function `destroy_empty`](#@Specification_1_destroy_empty) - [Function `borrow`](#@Specification_1_borrow) - [Function `append`](#@Specification_1_append) + - [Function `push_back`](#@Specification_1_push_back) - [Function `pop_back`](#@Specification_1_pop_back) - [Function `remove`](#@Specification_1_remove) - [Function `swap_remove`](#@Specification_1_swap_remove) @@ -140,16 +142,44 @@ bucket_size cannot be 0 + + +## Function `new` + +Regular Vector API +Create an empty vector using default logic to estimate inline_capacity and bucket_size, which may be +inaccurate. +This is exactly the same as empty() but is more standardized as all other data structures have new(). + + +
public fun new<T: store>(): smart_vector::SmartVector<T>
+
+ + + +
+Implementation + + +
public fun new<T: store>(): SmartVector<T> {
+    empty()
+}
+
+ + + +
+ ## Function `empty` -Regular Vector API Create an empty vector using default logic to estimate inline_capacity and bucket_size, which may be inaccurate. -
public fun empty<T: store>(): smart_vector::SmartVector<T>
+
#[deprecated]
+public fun empty<T: store>(): smart_vector::SmartVector<T>
 
@@ -816,7 +846,8 @@ Return true if the vector v has no elements and ### Function `empty` -
public fun empty<T: store>(): smart_vector::SmartVector<T>
+
#[deprecated]
+public fun empty<T: store>(): smart_vector::SmartVector<T>
 
@@ -891,6 +922,22 @@ Return true if the vector v has no elements and +
pragma verify = false;
+
+ + + + + +### Function `push_back` + + +
public fun push_back<T: store>(v: &mut smart_vector::SmartVector<T>, val: T)
+
+ + + +
pragma verify = false;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/string_utils.md b/aptos-move/framework/aptos-stdlib/doc/string_utils.md index 60f2459b6d70e..7c79233721f4e 100644 --- a/aptos-move/framework/aptos-stdlib/doc/string_utils.md +++ b/aptos-move/framework/aptos-stdlib/doc/string_utils.md @@ -102,7 +102,8 @@ A module for formatting move values as strings. -
struct FakeCons<T, N> has copy, drop, store
+
#[testonly]
+struct FakeCons<T, N> has copy, drop, store
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/type_info.md b/aptos-move/framework/aptos-stdlib/doc/type_info.md index 1846162aa4491..29985bd1b4536 100644 --- a/aptos-move/framework/aptos-stdlib/doc/type_info.md +++ b/aptos-move/framework/aptos-stdlib/doc/type_info.md @@ -193,6 +193,7 @@ return whichever ID was passed to aptos_framework::chain_id::initialize_fo ## Function `type_of` +Return the TypeInfo struct containing for the type T.
public fun type_of<T>(): type_info::TypeInfo
@@ -215,6 +216,9 @@ return whichever ID was passed to aptos_framework::chain_id::initialize_fo
 
 ## Function `type_name`
 
+Return the human readable string for the type, including the address, module name, and any type arguments.
+Example: 0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>
+Or: 0x1::table::Table<0x1::string::String, 0x1::string::String>
 
 
 
public fun type_name<T>(): string::String
@@ -226,7 +230,7 @@ return whichever ID was passed to aptos_framework::chain_id::initialize_fo
 Implementation
 
 
-
public native fun type_name<T>(): string::String;
+
public native fun type_name<T>(): String;
 
@@ -293,7 +297,8 @@ analysis of vector size dynamism. -
fun verify_type_of()
+
#[verify_only]
+fun verify_type_of()
 
@@ -325,7 +330,8 @@ analysis of vector size dynamism. -
fun verify_type_of_generic<T>()
+
#[verify_only]
+fun verify_type_of_generic<T>()
 
@@ -427,7 +433,8 @@ analysis of vector size dynamism. ### Function `verify_type_of_generic` -
fun verify_type_of_generic<T>()
+
#[verify_only]
+fun verify_type_of_generic<T>()
 
diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move index 809c8db79232e..80dfd8df1a804 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move @@ -211,17 +211,13 @@ module aptos_std::big_vector { while (num_buckets_left > 0) { let pop_bucket = table_with_length::remove(&mut v.buckets, num_buckets_left - 1); - let pop_bucket_length = vector::length(&pop_bucket); - let i = 0; - while(i < pop_bucket_length){ - vector::push_back(&mut push_bucket, vector::pop_back(&mut pop_bucket)); + vector::for_each_reverse(pop_bucket, |val| { + vector::push_back(&mut push_bucket, val); if (vector::length(&push_bucket) == v.bucket_size) { vector::push_back(&mut new_buckets, push_bucket); push_bucket = vector[]; }; - i = i + 1; - }; - vector::destroy_empty(pop_bucket); + }); num_buckets_left = num_buckets_left - 1; }; diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move index 6330e2e4b96e2..1368956100d75 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move @@ -27,6 +27,14 @@ module aptos_std::smart_vector { /// Regular Vector API + /// Create an empty vector using default logic to estimate `inline_capacity` and `bucket_size`, which may be + /// inaccurate. + /// This is exactly the same as empty() but is more standardized as all other data structures have new(). + public fun new(): SmartVector { + empty() + } + + #[deprecated] /// Create an empty vector using default logic to estimate `inline_capacity` and `bucket_size`, which may be /// inaccurate. public fun empty(): SmartVector { diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move index ba8f135b68ddf..c4bfca925cd51 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move @@ -33,6 +33,10 @@ spec aptos_std::smart_vector { ); } + spec push_back(v: &mut SmartVector, val: T) { + pragma verify = false; // TODO: set to false because of timeout + } + spec pop_back { use aptos_std::table_with_length; diff --git a/aptos-move/framework/aptos-stdlib/sources/from_bcs.spec.move b/aptos-move/framework/aptos-stdlib/sources/from_bcs.spec.move index 71f43efd783d7..9eeb4f025cdc2 100644 --- a/aptos-move/framework/aptos-stdlib/sources/from_bcs.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/from_bcs.spec.move @@ -14,6 +14,13 @@ spec aptos_std::from_bcs { axiom forall b1: vector, b2: vector: (deserialize(b1) == deserialize(b2) ==> b1 == b2); + // If the input are equal, the result of deserialize should be equal too + axiom forall b1: vector, b2: vector: + ( b1 == b2 ==> deserializable(b1) == deserializable(b2) ); + + axiom forall b1: vector, b2: vector: + ( b1 == b2 ==> deserialize(b1) == deserialize(b2) ); + // `deserialize` is an inverse function of `bcs::serialize`. // TODO: disabled because this generic axiom causes a timeout. // axiom forall v: T: deserialize(bcs::serialize(v)) == v; diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.move index 70a6cd07931e4..828ac26a8e71d 100644 --- a/aptos-move/framework/aptos-stdlib/sources/simple_map.move +++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.move @@ -13,6 +13,8 @@ module aptos_std::simple_map { const EKEY_ALREADY_EXISTS: u64 = 1; /// Map key is not found const EKEY_NOT_FOUND: u64 = 2; + /// Lengths of keys and values do not match + const EMISMATCHED_LENGTHS: u64 = 3; struct SimpleMap has copy, drop, store { data: vector>, @@ -27,12 +29,30 @@ module aptos_std::simple_map { vector::length(&map.data) } - public fun create(): SimpleMap { + /// Create an empty SimpleMap. + public fun new(): SimpleMap { SimpleMap { data: vector::empty(), } } + /// Create a SimpleMap from a vector of keys and values. The keys must be unique. + public fun new_from( + keys: vector, + values: vector, + ): SimpleMap { + let map = new(); + add_all(&mut map, keys, values); + map + } + + #[deprecated] + /// Create an empty SimpleMap. + /// This function is deprecated, use `new` instead. + public fun create(): SimpleMap { + new() + } + public fun borrow( map: &SimpleMap, key: &Key, @@ -66,6 +86,7 @@ module aptos_std::simple_map { vector::destroy_empty(data); } + /// Add a key/value pair to the map. The key must not already exist. public fun add( map: &mut SimpleMap, key: Key, @@ -77,6 +98,18 @@ module aptos_std::simple_map { vector::push_back(&mut map.data, Element { key, value }); } + /// Add multiple key/value pairs to the map. The keys must not already exist. + public fun add_all( + map: &mut SimpleMap, + keys: vector, + values: vector, + ) { + assert!(vector::length(&keys) == vector::length(&values), error::invalid_argument(EMISMATCHED_LENGTHS)); + vector::zip(keys, values, |key, value| { + add(map, key, value); + }); + } + /// Insert key/value pair or update an existing key to a new value public fun upsert( map: &mut SimpleMap, @@ -100,6 +133,22 @@ module aptos_std::simple_map { (std::option::none(), std::option::none()) } + /// Return all keys in the map. This requires keys to be copyable. + public fun keys(map: &SimpleMap): vector { + vector::map_ref(&map.data, |e| { + let e: &Element = e; + e.key + }) + } + + /// Return all values in the map. This requires values to be copyable. + public fun values(map: &SimpleMap): vector { + vector::map_ref(&map.data, |e| { + let e: &Element = e; + e.value + }) + } + /// Transform the map into two vectors with the keys and values respectively /// Primarily used to destroy a map public fun to_vec_pair( @@ -123,6 +172,7 @@ module aptos_std::simple_map { vector::destroy(values, |_v| dv(_v)); } + /// Remove a key/value pair from the map. The key must exist. public fun remove( map: &mut SimpleMap, key: &Key, @@ -151,7 +201,7 @@ module aptos_std::simple_map { } #[test] - public fun add_remove_many() { + public fun test_add_remove_many() { let map = create(); assert!(length(&map) == 0, 0); @@ -183,9 +233,46 @@ module aptos_std::simple_map { destroy_empty(map); } + #[test] + public fun test_add_all() { + let map = create(); + + assert!(length(&map) == 0, 0); + add_all(&mut map, vector[1, 2, 3], vector[10, 20, 30]); + assert!(length(&map) ==3, 1); + assert!(borrow(&map, &1) == &10, 2); + assert!(borrow(&map, &2) == &20, 3); + assert!(borrow(&map, &3) == &30, 4); + + remove(&mut map, &1); + remove(&mut map, &2); + remove(&mut map, &3); + destroy_empty(map); + } + + #[test] + public fun test_keys() { + let map = create(); + assert!(keys(&map) == vector[], 0); + add(&mut map, 2, 1); + add(&mut map, 3, 1); + + assert!(keys(&map) == vector[2, 3], 0); + } + + #[test] + public fun test_values() { + let map = create(); + assert!(values(&map) == vector[], 0); + add(&mut map, 2, 1); + add(&mut map, 3, 2); + + assert!(values(&map) == vector[1, 2], 0); + } + #[test] #[expected_failure] - public fun add_twice() { + public fun test_add_twice() { let map = create(); add(&mut map, 3, 1); add(&mut map, 3, 1); @@ -196,7 +283,7 @@ module aptos_std::simple_map { #[test] #[expected_failure] - public fun remove_twice() { + public fun test_remove_twice() { let map = create(); add(&mut map, 3, 1); remove(&mut map, &3); @@ -206,7 +293,7 @@ module aptos_std::simple_map { } #[test] - public fun upsert_test() { + public fun test_upsert_test() { let map = create(); // test adding 3 elements using upsert upsert(&mut map, 1, 1 ); diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move index b9a40e720c93d..b5a4a4a984b95 100644 --- a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move @@ -24,6 +24,14 @@ spec aptos_std::simple_map { pragma intrinsic; } + spec new { + pragma intrinsic; + } + + spec new_from { + pragma intrinsic; + } + spec create { pragma intrinsic; } @@ -48,6 +56,10 @@ spec aptos_std::simple_map { pragma intrinsic; } + spec add_all { + pragma intrinsic; + } + spec remove { pragma intrinsic; } @@ -56,6 +68,14 @@ spec aptos_std::simple_map { pragma verify=false; } + spec keys { + pragma verify=false; + } + + spec values { + pragma verify=false; + } + spec to_vec_pair(map: SimpleMap): (vector, vector) { pragma intrinsic; pragma opaque; diff --git a/aptos-move/framework/aptos-stdlib/sources/type_info.move b/aptos-move/framework/aptos-stdlib/sources/type_info.move index c758476426b17..22f97b324df9b 100644 --- a/aptos-move/framework/aptos-stdlib/sources/type_info.move +++ b/aptos-move/framework/aptos-stdlib/sources/type_info.move @@ -1,7 +1,7 @@ module aptos_std::type_info { use std::bcs; - use std::string; use std::features; + use std::string::{Self, String}; use std::vector; // @@ -47,9 +47,13 @@ module aptos_std::type_info { chain_id_internal() } + /// Return the `TypeInfo` struct containing for the type `T`. public native fun type_of(): TypeInfo; - public native fun type_name(): string::String; + /// Return the human readable string for the type, including the address, module name, and any type arguments. + /// Example: 0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin> + /// Or: 0x1::table::Table<0x1::string::String, 0x1::string::String> + public native fun type_name(): String; native fun chain_id_internal(): u8; @@ -65,14 +69,25 @@ module aptos_std::type_info { vector::length(&bcs::to_bytes(val_ref)) } + #[test_only] + use aptos_std::table::Table; + #[test] - fun test() { + fun test_type_of() { let type_info = type_of(); assert!(account_address(&type_info) == @aptos_std, 0); assert!(module_name(&type_info) == b"type_info", 1); assert!(struct_name(&type_info) == b"TypeInfo", 2); } + #[test] + fun test_type_of_with_type_arg() { + let type_info = type_of>(); + assert!(account_address(&type_info) == @aptos_std, 0); + assert!(module_name(&type_info) == b"table", 1); + assert!(struct_name(&type_info) == b"Table<0x1::string::String, 0x1::string::String>", 2); + } + #[test(fx = @std)] fun test_chain_id(fx: signer) { // We need to enable the feature in order for the native call to be allowed. @@ -84,7 +99,7 @@ module aptos_std::type_info { #[test] fun test_type_name() { - use aptos_std::table::Table; + assert!(type_name() == string::utf8(b"bool"), 0); assert!(type_name() == string::utf8(b"u8"), 1); diff --git a/aptos-move/framework/aptos-token-objects/doc/aptos_token.md b/aptos-move/framework/aptos-token-objects/doc/aptos_token.md index 8e483e065f638..54c8cc83e58fc 100644 --- a/aptos-move/framework/aptos-token-objects/doc/aptos_token.md +++ b/aptos-move/framework/aptos-token-objects/doc/aptos_token.md @@ -77,7 +77,8 @@ The key features are: Storage state for managing the no-code Collection. -
struct AptosCollection has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct AptosCollection has key
 
@@ -159,7 +160,8 @@ Storage state for managing the no-code Collection. Storage state for managing the no-code Token. -
struct AptosToken has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct AptosToken has key
 
@@ -553,7 +555,8 @@ With an existing collection, directly mint a soul bound token into the recipient -
public fun are_properties_mutable<T: key>(token: object::Object<T>): bool
+
#[view]
+public fun are_properties_mutable<T: key>(token: object::Object<T>): bool
 
@@ -578,7 +581,8 @@ With an existing collection, directly mint a soul bound token into the recipient -
public fun is_burnable<T: key>(token: object::Object<T>): bool
+
#[view]
+public fun is_burnable<T: key>(token: object::Object<T>): bool
 
@@ -602,7 +606,8 @@ With an existing collection, directly mint a soul bound token into the recipient -
public fun is_freezable_by_creator<T: key>(token: object::Object<T>): bool
+
#[view]
+public fun is_freezable_by_creator<T: key>(token: object::Object<T>): bool
 
@@ -626,7 +631,8 @@ With an existing collection, directly mint a soul bound token into the recipient -
public fun is_mutable_description<T: key>(token: object::Object<T>): bool
+
#[view]
+public fun is_mutable_description<T: key>(token: object::Object<T>): bool
 
@@ -650,7 +656,8 @@ With an existing collection, directly mint a soul bound token into the recipient -
public fun is_mutable_name<T: key>(token: object::Object<T>): bool
+
#[view]
+public fun is_mutable_name<T: key>(token: object::Object<T>): bool
 
@@ -674,7 +681,8 @@ With an existing collection, directly mint a soul bound token into the recipient -
public fun is_mutable_uri<T: key>(token: object::Object<T>): bool
+
#[view]
+public fun is_mutable_uri<T: key>(token: object::Object<T>): bool
 
diff --git a/aptos-move/framework/aptos-token-objects/doc/collection.md b/aptos-move/framework/aptos-token-objects/doc/collection.md index 4eefe2c264bef..ae8c944277e44 100644 --- a/aptos-move/framework/aptos-token-objects/doc/collection.md +++ b/aptos-move/framework/aptos-token-objects/doc/collection.md @@ -70,7 +70,8 @@ require adding the field original_name. Represents the common fields for a collection. -
struct Collection has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Collection has key
 
@@ -181,7 +182,8 @@ Fixed supply tracker, this is useful for ensuring that a limited number of token and adding events and supply tracking to a collection. -
struct FixedSupply has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct FixedSupply has key
 
@@ -233,7 +235,8 @@ and adding events and supply tracking to a collection. Unlimited supply tracker, this is useful for adding events and supply tracking to a collection. -
struct UnlimitedSupply has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct UnlimitedSupply has key
 
@@ -887,7 +890,8 @@ Creates a MutatorRef, which gates the ability to mutate any fields that support Provides the count of the current selection if supply tracking is used -
public fun count<T: key>(collection: object::Object<T>): option::Option<u64>
+
#[view]
+public fun count<T: key>(collection: object::Object<T>): option::Option<u64>
 
@@ -922,7 +926,8 @@ Provides the count of the current selection if supply tracking is used -
public fun creator<T: key>(collection: object::Object<T>): address
+
#[view]
+public fun creator<T: key>(collection: object::Object<T>): address
 
@@ -946,7 +951,8 @@ Provides the count of the current selection if supply tracking is used -
public fun description<T: key>(collection: object::Object<T>): string::String
+
#[view]
+public fun description<T: key>(collection: object::Object<T>): string::String
 
@@ -970,7 +976,8 @@ Provides the count of the current selection if supply tracking is used -
public fun name<T: key>(collection: object::Object<T>): string::String
+
#[view]
+public fun name<T: key>(collection: object::Object<T>): string::String
 
@@ -994,7 +1001,8 @@ Provides the count of the current selection if supply tracking is used -
public fun uri<T: key>(collection: object::Object<T>): string::String
+
#[view]
+public fun uri<T: key>(collection: object::Object<T>): string::String
 
diff --git a/aptos-move/framework/aptos-token-objects/doc/property_map.md b/aptos-move/framework/aptos-token-objects/doc/property_map.md index 8775910804e79..15f7a895a3d35 100644 --- a/aptos-move/framework/aptos-token-objects/doc/property_map.md +++ b/aptos-move/framework/aptos-token-objects/doc/property_map.md @@ -64,7 +64,8 @@ A Map for typed key to value mapping, the contract using it should keep track of what keys are what types, and parse them accordingly. -
struct PropertyMap has drop, key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct PropertyMap has drop, key
 
diff --git a/aptos-move/framework/aptos-token-objects/doc/royalty.md b/aptos-move/framework/aptos-token-objects/doc/royalty.md index 9e2351b452464..68635764b9621 100644 --- a/aptos-move/framework/aptos-token-objects/doc/royalty.md +++ b/aptos-move/framework/aptos-token-objects/doc/royalty.md @@ -40,7 +40,8 @@ Royalties are optional for a collection. Royalty percentage is calculated by (numerator / denominator) * 100% -
struct Royalty has copy, drop, key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Royalty has copy, drop, key
 
diff --git a/aptos-move/framework/aptos-token-objects/doc/token.md b/aptos-move/framework/aptos-token-objects/doc/token.md index fe6671a10a364..0b23237681a04 100644 --- a/aptos-move/framework/aptos-token-objects/doc/token.md +++ b/aptos-move/framework/aptos-token-objects/doc/token.md @@ -58,7 +58,8 @@ token are: Represents the common fields to all tokens. -
struct Token has key
+
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Token has key
 
@@ -612,7 +613,8 @@ Extracts the tokens address from a BurnRef. -
public fun creator<T: key>(token: object::Object<T>): address
+
#[view]
+public fun creator<T: key>(token: object::Object<T>): address
 
@@ -636,7 +638,8 @@ Extracts the tokens address from a BurnRef. -
public fun collection_name<T: key>(token: object::Object<T>): string::String
+
#[view]
+public fun collection_name<T: key>(token: object::Object<T>): string::String
 
@@ -660,7 +663,8 @@ Extracts the tokens address from a BurnRef. -
public fun collection_object<T: key>(token: object::Object<T>): object::Object<collection::Collection>
+
#[view]
+public fun collection_object<T: key>(token: object::Object<T>): object::Object<collection::Collection>
 
@@ -684,7 +688,8 @@ Extracts the tokens address from a BurnRef. -
public fun description<T: key>(token: object::Object<T>): string::String
+
#[view]
+public fun description<T: key>(token: object::Object<T>): string::String
 
@@ -708,7 +713,8 @@ Extracts the tokens address from a BurnRef. -
public fun name<T: key>(token: object::Object<T>): string::String
+
#[view]
+public fun name<T: key>(token: object::Object<T>): string::String
 
@@ -732,7 +738,8 @@ Extracts the tokens address from a BurnRef. -
public fun uri<T: key>(token: object::Object<T>): string::String
+
#[view]
+public fun uri<T: key>(token: object::Object<T>): string::String
 
@@ -756,7 +763,8 @@ Extracts the tokens address from a BurnRef. -
public fun royalty<T: key>(token: object::Object<T>): option::Option<royalty::Royalty>
+
#[view]
+public fun royalty<T: key>(token: object::Object<T>): option::Option<royalty::Royalty>
 
diff --git a/aptos-move/framework/aptos-token/doc/property_map.md b/aptos-move/framework/aptos-token/doc/property_map.md index 5cd717ee647bf..cddfd84c49170 100644 --- a/aptos-move/framework/aptos-token/doc/property_map.md +++ b/aptos-move/framework/aptos-token/doc/property_map.md @@ -19,6 +19,9 @@ It also supports deserializing property value to it original type. - [Function `add`](#0x3_property_map_add) - [Function `length`](#0x3_property_map_length) - [Function `borrow`](#0x3_property_map_borrow) +- [Function `keys`](#0x3_property_map_keys) +- [Function `types`](#0x3_property_map_types) +- [Function `values`](#0x3_property_map_values) - [Function `read_string`](#0x3_property_map_read_string) - [Function `read_u8`](#0x3_property_map_read_u8) - [Function `read_u64`](#0x3_property_map_read_u64) @@ -40,6 +43,9 @@ It also supports deserializing property value to it original type. - [Function `add`](#@Specification_1_add) - [Function `length`](#@Specification_1_length) - [Function `borrow`](#@Specification_1_borrow) + - [Function `keys`](#@Specification_1_keys) + - [Function `types`](#@Specification_1_types) + - [Function `values`](#@Specification_1_values) - [Function `read_string`](#@Specification_1_read_string) - [Function `read_u8`](#@Specification_1_read_u8) - [Function `read_u64`](#@Specification_1_read_u64) @@ -374,7 +380,7 @@ Create property map directly from key and property value
public fun add(map: &mut PropertyMap, key: String, value: PropertyValue) {
     assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG));
-    assert!(simple_map::length<String, PropertyValue>(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT));
+    assert!(simple_map::length(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT));
     simple_map::add(&mut map.map, key, value);
 }
 
@@ -431,6 +437,87 @@ Create property map directly from key and property value + + + + +## Function `keys` + +Return all the keys in the property map in the order they are added. + + +
public fun keys(map: &property_map::PropertyMap): vector<string::String>
+
+ + + +
+Implementation + + +
public fun keys(map: &PropertyMap): vector<String> {
+    simple_map::keys(&map.map)
+}
+
+ + + +
+ + + +## Function `types` + +Return the types of all properties in the property map in the order they are added. + + +
public fun types(map: &property_map::PropertyMap): vector<string::String>
+
+ + + +
+Implementation + + +
public fun types(map: &PropertyMap): vector<String> {
+    vector::map_ref(&simple_map::values(&map.map), |v| {
+        let v: &PropertyValue = v;
+        v.type
+    })
+}
+
+ + + +
+ + + +## Function `values` + +Return the values of all properties in the property map in the order they are added. + + +
public fun values(map: &property_map::PropertyMap): vector<vector<u8>>
+
+ + + +
+Implementation + + +
public fun values(map: &PropertyMap): vector<vector<u8>> {
+    vector::map_ref(&simple_map::values(&map.map), |v| {
+        let v: &PropertyValue = v;
+        v.value
+    })
+}
+
+ + +
@@ -947,6 +1034,54 @@ create a property value from generic type data + + +### Function `keys` + + +
public fun keys(map: &property_map::PropertyMap): vector<string::String>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `types` + + +
public fun types(map: &property_map::PropertyMap): vector<string::String>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `values` + + +
public fun values(map: &property_map::PropertyMap): vector<vector<u8>>
+
+ + + + +
pragma verify = false;
+
+ + + ### Function `read_string` diff --git a/aptos-move/framework/aptos-token/doc/token.md b/aptos-move/framework/aptos-token/doc/token.md index 249d231331b4e..f4c11b8ad1dbc 100644 --- a/aptos-move/framework/aptos-token/doc/token.md +++ b/aptos-move/framework/aptos-token/doc/token.md @@ -4,7 +4,7 @@ # Module `0x3::token` This module provides the foundation for Tokens. -Checkout our developer doc on our token standard https://aptos.dev/concepts/coin-and-token/aptos-token +Checkout our developer doc on our token standard https://aptos.dev/standards - [Struct `Token`](#0x3_token_Token) @@ -4150,7 +4150,8 @@ return if the tokendata's default properties is mutable with a token mutability return the collection mutation setting -
public fun get_collection_mutability_config(creator: address, collection_name: string::String): token::CollectionMutabilityConfig
+
#[view]
+public fun get_collection_mutability_config(creator: address, collection_name: string::String): token::CollectionMutabilityConfig
 
@@ -4517,17 +4518,14 @@ Deposit the token balance into the recipients account and emit an event.
fun assert_non_standard_reserved_property(keys: &vector<String>) {
-    let len = vector::length(keys);
-    let i = 0;
-    while ( i < len) {
-        let key = vector::borrow(keys, i);
+    vector::for_each_ref(keys, |key| {
+        let key: &String = key;
         let length = string::length(key);
         if (length >= 6) {
             let prefix = string::sub_string(&*key, 0, 6);
             assert!(prefix != string::utf8(b"TOKEN_"), error::permission_denied(EPROPERTY_RESERVED_BY_STANDARD));
         };
-        i = i + 1;
-    };
+    });
 }
 
@@ -5961,7 +5959,8 @@ The length of name should less than MAX_NFT_NAME_LENGTH ### Function `get_collection_mutability_config` -
public fun get_collection_mutability_config(creator: address, collection_name: string::String): token::CollectionMutabilityConfig
+
#[view]
+public fun get_collection_mutability_config(creator: address, collection_name: string::String): token::CollectionMutabilityConfig
 
@@ -6082,6 +6081,21 @@ The collection_name should exist in collection_data of the creator_address's Col + + + + +
schema AssertCollectionExistsAbortsIf {
+    creator_address: address;
+    collection_name: String;
+    let all_collection_data = global<Collections>(creator_address).collection_data;
+    aborts_if !exists<Collections>(creator_address);
+    aborts_if !table::spec_contains(all_collection_data, collection_name);
+}
+
+ + + ### Function `assert_tokendata_exists` diff --git a/aptos-move/framework/aptos-token/sources/property_map.move b/aptos-move/framework/aptos-token/sources/property_map.move index 321266cf59cd7..45546cc402644 100644 --- a/aptos-move/framework/aptos-token/sources/property_map.move +++ b/aptos-move/framework/aptos-token/sources/property_map.move @@ -117,7 +117,7 @@ module aptos_token::property_map { public fun add(map: &mut PropertyMap, key: String, value: PropertyValue) { assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); - assert!(simple_map::length(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT)); + assert!(simple_map::length(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT)); simple_map::add(&mut map.map, key, value); } @@ -131,6 +131,27 @@ module aptos_token::property_map { simple_map::borrow(&map.map, key) } + /// Return all the keys in the property map in the order they are added. + public fun keys(map: &PropertyMap): vector { + simple_map::keys(&map.map) + } + + /// Return the types of all properties in the property map in the order they are added. + public fun types(map: &PropertyMap): vector { + vector::map_ref(&simple_map::values(&map.map), |v| { + let v: &PropertyValue = v; + v.type + }) + } + + /// Return the values of all properties in the property map in the order they are added. + public fun values(map: &PropertyMap): vector> { + vector::map_ref(&simple_map::values(&map.map), |v| { + let v: &PropertyValue = v; + v.value + }) + } + public fun read_string(map: &PropertyMap, key: &String): String { let prop = borrow(map, key); assert!(prop.type == string::utf8(b"0x1::string::String"), error::invalid_state(ETYPE_NOT_MATCH)); @@ -250,18 +271,31 @@ module aptos_token::property_map { } } + #[test_only] + use std::string::utf8; + + #[test_only] + fun test_keys(): vector { + vector[ utf8(b"attack"), utf8(b"durability"), utf8(b"type") ] + } + + #[test_only] + fun test_values(): vector> { + vector[ b"10", b"5", b"weapon" ] + } + + #[test_only] + fun test_types(): vector { + vector[ utf8(b"integer"), utf8(b"integer"), utf8(b"String") ] + } + #[test_only] fun create_property_list(): PropertyMap { - use std::string::utf8; - let keys = vector[ utf8(b"attack"), utf8(b"durability"), utf8(b"type")]; - let values = vector>[ b"10", b"5", b"weapon" ]; - let types = vector[ utf8(b"integer"), utf8(b"integer"), utf8(b"String") ]; - new(keys, values, types) + new(test_keys(), test_values(), test_types()) } #[test] fun test_add_property(): PropertyMap { - use std::string::utf8; let properties = create_property_list(); add( &mut properties, utf8(b"level"), @@ -275,9 +309,23 @@ module aptos_token::property_map { properties } + #[test] + fun test_get_property_keys() { + assert!(keys(&create_property_list()) == test_keys(), 0); + } + + #[test] + fun test_get_property_types() { + assert!(types(&create_property_list()) == test_types(), 0); + } + + #[test] + fun test_get_property_values() { + assert!(values(&create_property_list()) == test_values(), 0); + } + #[test] fun test_update_property(): PropertyMap { - use std::string::utf8; let properties = create_property_list(); update_property_value(&mut properties, &utf8(b"attack"), PropertyValue { value: b"7", type: utf8(b"integer") }); assert!( @@ -289,7 +337,6 @@ module aptos_token::property_map { #[test] fun test_remove_property(): PropertyMap { - use std::string::utf8; let properties = create_property_list(); assert!(length(&mut properties) == 3, 1); let (_, _) = remove(&mut properties, &utf8(b"attack")); @@ -307,7 +354,6 @@ module aptos_token::property_map { #[test] fun test_read_value_with_type() { - use std::string::utf8; let keys = vector[ utf8(b"attack"), utf8(b"mutable")]; let values = vector>[ bcs::to_bytes(&10), bcs::to_bytes(&false) ]; let types = vector[ utf8(b"u8"), utf8(b"bool")]; diff --git a/aptos-move/framework/aptos-token/sources/property_map.spec.move b/aptos-move/framework/aptos-token/sources/property_map.spec.move index 30a1bd242705f..b97e90b2c1bae 100644 --- a/aptos-move/framework/aptos-token/sources/property_map.spec.move +++ b/aptos-move/framework/aptos-token/sources/property_map.spec.move @@ -52,6 +52,18 @@ spec aptos_token::property_map { aborts_if false; } + spec keys(map: &PropertyMap): vector { + pragma verify = false; + } + + spec types(map: &PropertyMap): vector { + pragma verify = false; + } + + spec values(map: &PropertyMap): vector> { + pragma verify = false; + } + spec borrow(map: &PropertyMap, key: &String): &PropertyValue { use aptos_framework::simple_map; aborts_if !simple_map::spec_contains_key(map.map, key); diff --git a/aptos-move/framework/aptos-token/sources/token.move b/aptos-move/framework/aptos-token/sources/token.move index ba7cd6805cc2e..168a6fab2299e 100644 --- a/aptos-move/framework/aptos-token/sources/token.move +++ b/aptos-move/framework/aptos-token/sources/token.move @@ -1,5 +1,5 @@ /// This module provides the foundation for Tokens. -/// Checkout our developer doc on our token standard https://aptos.dev/concepts/coin-and-token/aptos-token +/// Checkout our developer doc on our token standard https://aptos.dev/standards module aptos_token::token { use std::error; use std::option::{Self, Option}; @@ -1660,17 +1660,14 @@ module aptos_token::token { } fun assert_non_standard_reserved_property(keys: &vector) { - let len = vector::length(keys); - let i = 0; - while ( i < len) { - let key = vector::borrow(keys, i); + vector::for_each_ref(keys, |key| { + let key: &String = key; let length = string::length(key); if (length >= 6) { let prefix = string::sub_string(&*key, 0, 6); assert!(prefix != string::utf8(b"TOKEN_"), error::permission_denied(EPROPERTY_RESERVED_BY_STANDARD)); }; - i = i + 1; - }; + }); } // ****************** TEST-ONLY FUNCTIONS ************** diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index 8cade781a1858..815f0efdc626d 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -399,6 +399,17 @@ pub enum EntryFunctionCall { metadata_values: Vec>, }, + /// Like `create_with_owners`, but removes the calling account after creation. + /// + /// This is for creating a vanity multisig account from a bootstrapping account that should not + /// be an owner after the vanity multisig address has been secured. + MultisigAccountCreateWithOwnersThenRemoveBootstrapper { + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, + }, + /// Remove the next transaction if it has sufficient owner rejections. MultisigAccountExecuteRejectedTransaction { multisig_account: AccountAddress, @@ -426,8 +437,21 @@ pub enum EntryFunctionCall { owners_to_remove: Vec, }, - /// Update the number of signatures required then remove owners, in a single operation. - MultisigAccountRemoveOwnersAndUpdateSignaturesRequired { + /// Swap an owner in for an old one, without changing required signatures. + MultisigAccountSwapOwner { + to_swap_in: AccountAddress, + to_swap_out: AccountAddress, + }, + + /// Swap owners in and out, without changing required signatures. + MultisigAccountSwapOwners { + to_swap_in: Vec, + to_swap_out: Vec, + }, + + /// Swap owners in and out, updating number of required signatures. + MultisigAccountSwapOwnersAndUpdateSignaturesRequired { + new_owners: Vec, owners_to_remove: Vec, new_num_signatures_required: u64, }, @@ -1005,6 +1029,17 @@ impl EntryFunctionCall { metadata_keys, metadata_values, ), + MultisigAccountCreateWithOwnersThenRemoveBootstrapper { + owners, + num_signatures_required, + metadata_keys, + metadata_values, + } => multisig_account_create_with_owners_then_remove_bootstrapper( + owners, + num_signatures_required, + metadata_keys, + metadata_values, + ), MultisigAccountExecuteRejectedTransaction { multisig_account } => { multisig_account_execute_rejected_transaction(multisig_account) }, @@ -1018,10 +1053,20 @@ impl EntryFunctionCall { MultisigAccountRemoveOwners { owners_to_remove } => { multisig_account_remove_owners(owners_to_remove) }, - MultisigAccountRemoveOwnersAndUpdateSignaturesRequired { + MultisigAccountSwapOwner { + to_swap_in, + to_swap_out, + } => multisig_account_swap_owner(to_swap_in, to_swap_out), + MultisigAccountSwapOwners { + to_swap_in, + to_swap_out, + } => multisig_account_swap_owners(to_swap_in, to_swap_out), + MultisigAccountSwapOwnersAndUpdateSignaturesRequired { + new_owners, owners_to_remove, new_num_signatures_required, - } => multisig_account_remove_owners_and_update_signatures_required( + } => multisig_account_swap_owners_and_update_signatures_required( + new_owners, owners_to_remove, new_num_signatures_required, ), @@ -2254,6 +2299,35 @@ pub fn multisig_account_create_with_owners( )) } +/// Like `create_with_owners`, but removes the calling account after creation. +/// +/// This is for creating a vanity multisig account from a bootstrapping account that should not +/// be an owner after the vanity multisig address has been secured. +pub fn multisig_account_create_with_owners_then_remove_bootstrapper( + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("create_with_owners_then_remove_bootstrapper").to_owned(), + vec![], + vec![ + bcs::to_bytes(&owners).unwrap(), + bcs::to_bytes(&num_signatures_required).unwrap(), + bcs::to_bytes(&metadata_keys).unwrap(), + bcs::to_bytes(&metadata_values).unwrap(), + ], + )) +} + /// Remove the next transaction if it has sufficient owner rejections. pub fn multisig_account_execute_rejected_transaction( multisig_account: AccountAddress, @@ -2332,8 +2406,53 @@ pub fn multisig_account_remove_owners(owners_to_remove: Vec) -> )) } -/// Update the number of signatures required then remove owners, in a single operation. -pub fn multisig_account_remove_owners_and_update_signatures_required( +/// Swap an owner in for an old one, without changing required signatures. +pub fn multisig_account_swap_owner( + to_swap_in: AccountAddress, + to_swap_out: AccountAddress, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("swap_owner").to_owned(), + vec![], + vec![ + bcs::to_bytes(&to_swap_in).unwrap(), + bcs::to_bytes(&to_swap_out).unwrap(), + ], + )) +} + +/// Swap owners in and out, without changing required signatures. +pub fn multisig_account_swap_owners( + to_swap_in: Vec, + to_swap_out: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("swap_owners").to_owned(), + vec![], + vec![ + bcs::to_bytes(&to_swap_in).unwrap(), + bcs::to_bytes(&to_swap_out).unwrap(), + ], + )) +} + +/// Swap owners in and out, updating number of required signatures. +pub fn multisig_account_swap_owners_and_update_signatures_required( + new_owners: Vec, owners_to_remove: Vec, new_num_signatures_required: u64, ) -> TransactionPayload { @@ -2345,9 +2464,10 @@ pub fn multisig_account_remove_owners_and_update_signatures_required( ]), ident_str!("multisig_account").to_owned(), ), - ident_str!("remove_owners_and_update_signatures_required").to_owned(), + ident_str!("swap_owners_and_update_signatures_required").to_owned(), vec![], vec![ + bcs::to_bytes(&new_owners).unwrap(), bcs::to_bytes(&owners_to_remove).unwrap(), bcs::to_bytes(&new_num_signatures_required).unwrap(), ], @@ -4049,6 +4169,23 @@ mod decoder { } } + pub fn multisig_account_create_with_owners_then_remove_bootstrapper( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::MultisigAccountCreateWithOwnersThenRemoveBootstrapper { + owners: bcs::from_bytes(script.args().get(0)?).ok()?, + num_signatures_required: bcs::from_bytes(script.args().get(1)?).ok()?, + metadata_keys: bcs::from_bytes(script.args().get(2)?).ok()?, + metadata_values: bcs::from_bytes(script.args().get(3)?).ok()?, + }, + ) + } else { + None + } + } + pub fn multisig_account_execute_rejected_transaction( payload: &TransactionPayload, ) -> Option { @@ -4100,14 +4237,37 @@ mod decoder { } } - pub fn multisig_account_remove_owners_and_update_signatures_required( + pub fn multisig_account_swap_owner(payload: &TransactionPayload) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::MultisigAccountSwapOwner { + to_swap_in: bcs::from_bytes(script.args().get(0)?).ok()?, + to_swap_out: bcs::from_bytes(script.args().get(1)?).ok()?, + }) + } else { + None + } + } + + pub fn multisig_account_swap_owners(payload: &TransactionPayload) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::MultisigAccountSwapOwners { + to_swap_in: bcs::from_bytes(script.args().get(0)?).ok()?, + to_swap_out: bcs::from_bytes(script.args().get(1)?).ok()?, + }) + } else { + None + } + } + + pub fn multisig_account_swap_owners_and_update_signatures_required( payload: &TransactionPayload, ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some( - EntryFunctionCall::MultisigAccountRemoveOwnersAndUpdateSignaturesRequired { - owners_to_remove: bcs::from_bytes(script.args().get(0)?).ok()?, - new_num_signatures_required: bcs::from_bytes(script.args().get(1)?).ok()?, + EntryFunctionCall::MultisigAccountSwapOwnersAndUpdateSignaturesRequired { + new_owners: bcs::from_bytes(script.args().get(0)?).ok()?, + owners_to_remove: bcs::from_bytes(script.args().get(1)?).ok()?, + new_num_signatures_required: bcs::from_bytes(script.args().get(2)?).ok()?, }, ) } else { @@ -4969,6 +5129,10 @@ static SCRIPT_FUNCTION_DECODER_MAP: once_cell::sync::Lazypublic fun shift_left(bitvector: &mut BitVector, amount: u64) { if (amount >= bitvector.length) { - let len = vector::length(&bitvector.bit_field); - let i = 0; - while (i < len) { - let elem = vector::borrow_mut(&mut bitvector.bit_field, i); - *elem = false; - i = i + 1; - }; + vector::for_each_mut(&mut bitvector.bit_field, |elem| { + *elem = false; + }); } else { let i = amount; diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md index 96c26cbd06b8b..b4e73ba58b3e2 100644 --- a/aptos-move/framework/move-stdlib/doc/features.md +++ b/aptos-move/framework/move-stdlib/doc/features.md @@ -997,18 +997,12 @@ Function to enable and disable features. Can only be called by a signer of @std. move_to<Features>(framework, Features{features: vector[]}) }; let features = &mut borrow_global_mut<Features>(@std).features; - let i = 0; - let n = vector::length(&enable); - while (i < n) { - set(features, *vector::borrow(&enable, i), true); - i = i + 1 - }; - let i = 0; - let n = vector::length(&disable); - while (i < n) { - set(features, *vector::borrow(&disable, i), false); - i = i + 1 - }; + vector::for_each_ref(&enable, |feature| { + set(features, *feature, true); + }); + vector::for_each_ref(&disable, |feature| { + set(features, *feature, false); + }); }
diff --git a/aptos-move/framework/move-stdlib/doc/vector.md b/aptos-move/framework/move-stdlib/doc/vector.md index eb444b2955683..3c6ffb2f20df4 100644 --- a/aptos-move/framework/move-stdlib/doc/vector.md +++ b/aptos-move/framework/move-stdlib/doc/vector.md @@ -36,15 +36,24 @@ the return on investment didn't seem worth it for these simple functions. - [Function `index_of`](#0x1_vector_index_of) - [Function `insert`](#0x1_vector_insert) - [Function `remove`](#0x1_vector_remove) +- [Function `remove_value`](#0x1_vector_remove_value) - [Function `swap_remove`](#0x1_vector_swap_remove) - [Function `for_each`](#0x1_vector_for_each) - [Function `for_each_reverse`](#0x1_vector_for_each_reverse) - [Function `for_each_ref`](#0x1_vector_for_each_ref) +- [Function `zip`](#0x1_vector_zip) +- [Function `zip_reverse`](#0x1_vector_zip_reverse) +- [Function `zip_ref`](#0x1_vector_zip_ref) +- [Function `enumerate_ref`](#0x1_vector_enumerate_ref) - [Function `for_each_mut`](#0x1_vector_for_each_mut) +- [Function `zip_mut`](#0x1_vector_zip_mut) +- [Function `enumerate_mut`](#0x1_vector_enumerate_mut) - [Function `fold`](#0x1_vector_fold) - [Function `foldr`](#0x1_vector_foldr) - [Function `map_ref`](#0x1_vector_map_ref) +- [Function `zip_map_ref`](#0x1_vector_zip_map_ref) - [Function `map`](#0x1_vector_map) +- [Function `zip_map`](#0x1_vector_zip_map) - [Function `filter`](#0x1_vector_filter) - [Function `partition`](#0x1_vector_partition) - [Function `rotate`](#0x1_vector_rotate) @@ -67,6 +76,7 @@ the return on investment didn't seem worth it for these simple functions. - [Function `index_of`](#@Specification_1_index_of) - [Function `insert`](#@Specification_1_insert) - [Function `remove`](#@Specification_1_remove) + - [Function `remove_value`](#@Specification_1_remove_value) - [Function `swap_remove`](#@Specification_1_swap_remove) - [Function `rotate`](#@Specification_1_rotate) - [Function `rotate_slice`](#@Specification_1_rotate_slice) @@ -101,6 +111,16 @@ The index into the vector is out of bounds + + +The length of the vectors are not equal. + + +
const EVECTORS_LENGTH_MISMATCH: u64 = 131074;
+
+ + + ## Function `empty` @@ -108,7 +128,8 @@ The index into the vector is out of bounds Create an empty vector. -
public fun empty<Element>(): vector<Element>
+
#[bytecode_instruction]
+public fun empty<Element>(): vector<Element>
 
@@ -131,7 +152,8 @@ Create an empty vector. Return the length of the vector. -
public fun length<Element>(v: &vector<Element>): u64
+
#[bytecode_instruction]
+public fun length<Element>(v: &vector<Element>): u64
 
@@ -155,7 +177,8 @@ Acquire an immutable reference to the ith element of the vector i
is out of bounds. -
public fun borrow<Element>(v: &vector<Element>, i: u64): &Element
+
#[bytecode_instruction]
+public fun borrow<Element>(v: &vector<Element>, i: u64): &Element
 
@@ -178,7 +201,8 @@ Aborts if i is out of bounds. Add element e to the end of the vector v. -
public fun push_back<Element>(v: &mut vector<Element>, e: Element)
+
#[bytecode_instruction]
+public fun push_back<Element>(v: &mut vector<Element>, e: Element)
 
@@ -202,7 +226,8 @@ Return a mutable reference to the ith element in the vector v Aborts if i is out of bounds. -
public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element
+
#[bytecode_instruction]
+public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element
 
@@ -226,7 +251,8 @@ Pop an element from the end of vector v. Aborts if v is empty. -
public fun pop_back<Element>(v: &mut vector<Element>): Element
+
#[bytecode_instruction]
+public fun pop_back<Element>(v: &mut vector<Element>): Element
 
@@ -250,7 +276,8 @@ Destroy the vector v. Aborts if v is not empty. -
public fun destroy_empty<Element>(v: vector<Element>)
+
#[bytecode_instruction]
+public fun destroy_empty<Element>(v: vector<Element>)
 
@@ -274,7 +301,8 @@ Swaps the elements at the ith and jth indices in the v Aborts if i or j is out of bounds. -
public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64)
+
#[bytecode_instruction]
+public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64)
 
@@ -641,6 +669,43 @@ Aborts if i is out of bounds. + + + + +## Function `remove_value` + +Remove the first occurrence of a given value in the vector v and return it in a vector, shifting all +subsequent elements. +This is O(n) and preserves ordering of elements in the vector. +This returns an empty vector if the value isn't present in the vector. +Note that this cannot return an option as option uses vector and there'd be a circular dependency between option +and vector. + + +
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element>
+
+ + + +
+Implementation + + +
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element> {
+    // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found,
+    // while remove would continue from the identified index to the end of the vector.
+    let (found, index) = index_of(v, val);
+    if (found) {
+        vector[remove(v, index)]
+    } else {
+       vector[]
+    }
+}
+
+ + +
@@ -757,6 +822,141 @@ Apply the function to a reference of each element in the vector. + + + + +## Function `zip` + +Apply the function to each pair of elements in the two given vectors, consuming them. + + +
public fun zip<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|())
+
+ + + +
+Implementation + + +
public inline fun zip<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |Element1, Element2|) {
+    // We need to reverse the vectors to consume it efficiently
+    reverse(&mut v1);
+    reverse(&mut v2);
+    zip_reverse(v1, v2, |e1, e2| f(e1, e2));
+}
+
+ + + +
+ + + +## Function `zip_reverse` + +Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. +This errors out if the vectors are not of the same length. + + +
public fun zip_reverse<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|())
+
+ + + +
+Implementation + + +
public inline fun zip_reverse<Element1, Element2>(
+    v1: vector<Element1>,
+    v2: vector<Element2>,
+    f: |Element1, Element2|,
+) {
+    let len = length(&v1);
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == length(&v2), 0x20002);
+    while (len > 0) {
+        f(pop_back(&mut v1), pop_back(&mut v2));
+        len = len - 1;
+    };
+    destroy_empty(v1);
+    destroy_empty(v2);
+}
+
+ + + +
+ + + +## Function `zip_ref` + +Apply the function to the references of each pair of elements in the two given vectors. +This errors out if the vectors are not of the same length. + + +
public fun zip_ref<Element1, Element2>(v1: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|())
+
+ + + +
+Implementation + + +
public inline fun zip_ref<Element1, Element2>(
+    v1: &vector<Element1>,
+    v2: &vector<Element2>,
+    f: |&Element1, &Element2|,
+) {
+    let len = length(v1);
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == length(v2), 0x20002);
+    let i = 0;
+    while (i < len) {
+        f(borrow(v1, i), borrow(v2, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `enumerate_ref` + +Apply the function to a reference of each element in the vector with its index. + + +
public fun enumerate_ref<Element>(v: &vector<Element>, f: |(u64, &Element)|())
+
+ + + +
+Implementation + + +
public inline fun enumerate_ref<Element>(v: &vector<Element>, f: |u64, &Element|) {
+    let i = 0;
+    let len = length(v);
+    while (i < len) {
+        f(i, borrow(v, i));
+        i = i + 1;
+    };
+}
+
+ + +
@@ -787,6 +987,74 @@ Apply the function to a mutable reference to each element in the vector. + + + + +## Function `zip_mut` + +Apply the function to mutable references to each pair of elements in the two given vectors. +This errors out if the vectors are not of the same length. + + +
public fun zip_mut<Element1, Element2>(v1: &mut vector<Element1>, v2: &mut vector<Element2>, f: |(&mut Element1, &mut Element2)|())
+
+ + + +
+Implementation + + +
public inline fun zip_mut<Element1, Element2>(
+    v1: &mut vector<Element1>,
+    v2: &mut vector<Element2>,
+    f: |&mut Element1, &mut Element2|,
+) {
+    let i = 0;
+    let len = length(v1);
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == length(v2), 0x20002);
+    while (i < len) {
+        f(borrow_mut(v1, i), borrow_mut(v2, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `enumerate_mut` + +Apply the function to a mutable reference of each element in the vector with its index. + + +
public fun enumerate_mut<Element>(v: &mut vector<Element>, f: |(u64, &mut Element)|())
+
+ + + +
+Implementation + + +
public inline fun enumerate_mut<Element>(v: &mut vector<Element>, f: |u64, &mut Element|) {
+    let i = 0;
+    let len = length(v);
+    while (i < len) {
+        f(i, borrow_mut(v, i));
+        i = i + 1;
+    };
+}
+
+ + +
@@ -858,7 +1126,7 @@ Fold right like fold above but working right to left. For example, public fun map_ref<Element, NewElement>(v: &vector<Element>, f: |&Element|NewElement): vector<NewElement> @@ -882,6 +1150,42 @@ original map. + + + + +## Function `zip_map_ref` + +Map the function over the references of the element pairs of two vectors, producing a new vector from the return +values without modifying the original vectors. + + +
public fun zip_map_ref<Element1, Element2, NewElement>(v1: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|NewElement): vector<NewElement>
+
+ + + +
+Implementation + + +
public inline fun zip_map_ref<Element1, Element2, NewElement>(
+    v1: &vector<Element1>,
+    v2: &vector<Element2>,
+    f: |&Element1, &Element2|NewElement
+): vector<NewElement> {
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(length(v1) == length(v2), 0x20002);
+
+    let result = vector<NewElement>[];
+    zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    result
+}
+
+ + +
@@ -912,6 +1216,41 @@ Map the function over the elements of the vector, producing a new vector. + + + + +## Function `zip_map` + +Map the function over the element pairs of the two vectors, producing a new vector. + + +
public fun zip_map<Element1, Element2, NewElement>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|NewElement): vector<NewElement>
+
+ + + +
+Implementation + + +
public inline fun zip_map<Element1, Element2, NewElement>(
+    v1: vector<Element1>,
+    v2: vector<Element2>,
+    f: |Element1, Element2|NewElement
+): vector<NewElement> {
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(length(&v1) == length(&v2), 0x20002);
+
+    let result = vector<NewElement>[];
+    zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    result
+}
+
+ + +
@@ -1469,6 +1808,22 @@ Check if v contains e. +
pragma intrinsic = true;
+
+ + + + + +### Function `remove_value` + + +
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element>
+
+ + + +
pragma intrinsic = true;
 
diff --git a/aptos-move/framework/move-stdlib/sources/bit_vector.move b/aptos-move/framework/move-stdlib/sources/bit_vector.move index e89795422c373..e0202f13b849c 100644 --- a/aptos-move/framework/move-stdlib/sources/bit_vector.move +++ b/aptos-move/framework/move-stdlib/sources/bit_vector.move @@ -85,13 +85,9 @@ module std::bit_vector { /// bitvector's length the bitvector will be zeroed out. public fun shift_left(bitvector: &mut BitVector, amount: u64) { if (amount >= bitvector.length) { - let len = vector::length(&bitvector.bit_field); - let i = 0; - while (i < len) { - let elem = vector::borrow_mut(&mut bitvector.bit_field, i); - *elem = false; - i = i + 1; - }; + vector::for_each_mut(&mut bitvector.bit_field, |elem| { + *elem = false; + }); } else { let i = amount; diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index 1e327ea9fe5b5..8f76a00cd6305 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -205,18 +205,12 @@ module std::features { move_to(framework, Features{features: vector[]}) }; let features = &mut borrow_global_mut(@std).features; - let i = 0; - let n = vector::length(&enable); - while (i < n) { - set(features, *vector::borrow(&enable, i), true); - i = i + 1 - }; - let i = 0; - let n = vector::length(&disable); - while (i < n) { - set(features, *vector::borrow(&disable, i), false); - i = i + 1 - }; + vector::for_each_ref(&enable, |feature| { + set(features, *feature, true); + }); + vector::for_each_ref(&disable, |feature| { + set(features, *feature, false); + }); } /// Check whether the feature is enabled. diff --git a/aptos-move/framework/move-stdlib/sources/vector.move b/aptos-move/framework/move-stdlib/sources/vector.move index d575af37376f9..f7f467286d381 100644 --- a/aptos-move/framework/move-stdlib/sources/vector.move +++ b/aptos-move/framework/move-stdlib/sources/vector.move @@ -15,6 +15,9 @@ module std::vector { /// The index into the vector is out of bounds const EINVALID_RANGE: u64 = 0x20001; + /// The length of the vectors are not equal. + const EVECTORS_LENGTH_MISMATCH: u64 = 0x20002; + #[bytecode_instruction] /// Create an empty vector. native public fun empty(): vector; @@ -204,6 +207,26 @@ module std::vector { pragma intrinsic = true; } + /// Remove the first occurrence of a given value in the vector `v` and return it in a vector, shifting all + /// subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// This returns an empty vector if the value isn't present in the vector. + /// Note that this cannot return an option as option uses vector and there'd be a circular dependency between option + /// and vector. + public fun remove_value(v: &mut vector, val: &Element): vector { + // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found, + // while remove would continue from the identified index to the end of the vector. + let (found, index) = index_of(v, val); + if (found) { + vector[remove(v, index)] + } else { + vector[] + } + } + spec remove_value { + pragma intrinsic = true; + } + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. /// This is O(1), but does not preserve ordering of elements in the vector. /// Aborts if `i` is out of bounds. @@ -243,6 +266,61 @@ module std::vector { } } + /// Apply the function to each pair of elements in the two given vectors, consuming them. + public inline fun zip(v1: vector, v2: vector, f: |Element1, Element2|) { + // We need to reverse the vectors to consume it efficiently + reverse(&mut v1); + reverse(&mut v2); + zip_reverse(v1, v2, |e1, e2| f(e1, e2)); + } + + /// Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. + /// This errors out if the vectors are not of the same length. + public inline fun zip_reverse( + v1: vector, + v2: vector, + f: |Element1, Element2|, + ) { + let len = length(&v1); + // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it + // due to how inline functions work. + assert!(len == length(&v2), 0x20002); + while (len > 0) { + f(pop_back(&mut v1), pop_back(&mut v2)); + len = len - 1; + }; + destroy_empty(v1); + destroy_empty(v2); + } + + /// Apply the function to the references of each pair of elements in the two given vectors. + /// This errors out if the vectors are not of the same length. + public inline fun zip_ref( + v1: &vector, + v2: &vector, + f: |&Element1, &Element2|, + ) { + let len = length(v1); + // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it + // due to how inline functions work. + assert!(len == length(v2), 0x20002); + let i = 0; + while (i < len) { + f(borrow(v1, i), borrow(v2, i)); + i = i + 1 + } + } + + /// Apply the function to a reference of each element in the vector with its index. + public inline fun enumerate_ref(v: &vector, f: |u64, &Element|) { + let i = 0; + let len = length(v); + while (i < len) { + f(i, borrow(v, i)); + i = i + 1; + }; + } + /// Apply the function to a mutable reference to each element in the vector. public inline fun for_each_mut(v: &mut vector, f: |&mut Element|) { let i = 0; @@ -253,6 +331,34 @@ module std::vector { } } + /// Apply the function to mutable references to each pair of elements in the two given vectors. + /// This errors out if the vectors are not of the same length. + public inline fun zip_mut( + v1: &mut vector, + v2: &mut vector, + f: |&mut Element1, &mut Element2|, + ) { + let i = 0; + let len = length(v1); + // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it + // due to how inline functions work. + assert!(len == length(v2), 0x20002); + while (i < len) { + f(borrow_mut(v1, i), borrow_mut(v2, i)); + i = i + 1 + } + } + + /// Apply the function to a mutable reference of each element in the vector with its index. + public inline fun enumerate_mut(v: &mut vector, f: |u64, &mut Element|) { + let i = 0; + let len = length(v); + while (i < len) { + f(i, borrow_mut(v, i)); + i = i + 1; + }; + } + /// Fold the function over the elements. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(f(f(0, 1), 2), 3)` public inline fun fold( @@ -278,7 +384,7 @@ module std::vector { } /// Map the function over the references of the elements of the vector, producing a new vector without modifying the - /// original map. + /// original vector. public inline fun map_ref( v: &vector, f: |&Element|NewElement @@ -288,6 +394,22 @@ module std::vector { result } + /// Map the function over the references of the element pairs of two vectors, producing a new vector from the return + /// values without modifying the original vectors. + public inline fun zip_map_ref( + v1: &vector, + v2: &vector, + f: |&Element1, &Element2|NewElement + ): vector { + // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it + // due to how inline functions work. + assert!(length(v1) == length(v2), 0x20002); + + let result = vector[]; + zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + result + } + /// Map the function over the elements of the vector, producing a new vector. public inline fun map( v: vector, @@ -298,6 +420,21 @@ module std::vector { result } + /// Map the function over the element pairs of the two vectors, producing a new vector. + public inline fun zip_map( + v1: vector, + v2: vector, + f: |Element1, Element2|NewElement + ): vector { + // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it + // due to how inline functions work. + assert!(length(&v1) == length(&v2), 0x20002); + + let result = vector[]; + zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + result + } + /// Filter the vector using the boolean function, removing all elements for which `p(e)` is not true. public inline fun filter( v: vector, diff --git a/aptos-move/framework/move-stdlib/tests/bcs_tests.move b/aptos-move/framework/move-stdlib/tests/bcs_tests.move index 09266136ea4a3..72437ebb00fd6 100644 --- a/aptos-move/framework/move-stdlib/tests/bcs_tests.move +++ b/aptos-move/framework/move-stdlib/tests/bcs_tests.move @@ -79,9 +79,11 @@ module std::bcs_tests { bcs::to_bytes(&box127(true)); } + /* Deactivated because we now limit the depth of values you could create inside the VM #[test] #[expected_failure(abort_code = 453, location = std::bcs)] fun encode_129() { bcs::to_bytes(&Box { x: box127(true) }); } + */ } diff --git a/aptos-move/framework/move-stdlib/tests/vector_tests.move b/aptos-move/framework/move-stdlib/tests/vector_tests.move index 0804c2c5d7b99..606e9cd66fcfc 100644 --- a/aptos-move/framework/move-stdlib/tests/vector_tests.move +++ b/aptos-move/framework/move-stdlib/tests/vector_tests.move @@ -266,6 +266,59 @@ module std::vector_tests { V::remove(&mut v, 1); } + #[test] + fun remove_value_singleton_vector() { + let v = V::empty(); + V::push_back(&mut v, 0); + assert!(V::borrow(&V::remove_value(&mut v, &0), 0) == &0, 0); + assert!(V::length(&v) == 0, 0); + } + + #[test] + fun remove_value_nonsingleton_vector() { + let v = V::empty(); + V::push_back(&mut v, 0); + V::push_back(&mut v, 1); + V::push_back(&mut v, 2); + V::push_back(&mut v, 3); + + assert!(V::borrow(&V::remove_value(&mut v, &2), 0) == &2, 0); + assert!(V::length(&v) == 3, 0); + assert!(*V::borrow(&v, 0) == 0, 0); + assert!(*V::borrow(&v, 1) == 1, 0); + assert!(*V::borrow(&v, 2) == 3, 0); + } + + #[test] + fun remove_value_nonsingleton_vector_last_elem() { + let v = V::empty(); + V::push_back(&mut v, 0); + V::push_back(&mut v, 1); + V::push_back(&mut v, 2); + V::push_back(&mut v, 3); + + assert!(V::borrow(&V::remove_value(&mut v, &3), 0) == &3, 0); + assert!(V::length(&v) == 3, 0); + assert!(*V::borrow(&v, 0) == 0, 0); + assert!(*V::borrow(&v, 1) == 1, 0); + assert!(*V::borrow(&v, 2) == 2, 0); + } + + #[test] + fun remove_value_empty_vector() { + let v = V::empty(); + assert!(V::length(&V::remove_value(&mut v, &1)) == 0, 0); + assert!(V::length(&v) == 0, 1); + } + + #[test] + fun remove_value_nonexistent() { + let v = V::empty(); + V::push_back(&mut v, 0); + assert!(V::length(&V::remove_value(&mut v, &1)) == 0, 0); + assert!(V::length(&v) == 1, 1); + } + #[test] fun reverse_vector_empty() { let v = V::empty(); @@ -535,6 +588,38 @@ module std::vector_tests { assert!(s == 6, 0) } + #[test] + fun test_zip() { + let v1 = vector[1, 2, 3]; + let v2 = vector[10, 20, 30]; + let s = 0; + V::zip(v1, v2, |e1, e2| s = s + e1 * e2); + assert!(s == 140, 0); + } + + #[test] + // zip is an inline function so any error code will be reported at the call site. + #[expected_failure(abort_code = V::EVECTORS_LENGTH_MISMATCH, location = Self)] + fun test_zip_mismatching_lengths_should_fail() { + let v1 = vector[1]; + let v2 = vector[10, 20]; + let s = 0; + V::zip(v1, v2, |e1, e2| s = s + e1 * e2); + } + + #[test] + fun test_enumerate_ref() { + let v = vector[1, 2, 3]; + let i_s = 0; + let s = 0; + V::enumerate_ref(&v, |i, e| { + i_s = i_s + i; + s = s + *e; + }); + assert!(i_s == 3, 0); + assert!(s == 6, 0); + } + #[test] fun test_for_each_ref() { let v = vector[1, 2, 3]; @@ -551,6 +636,97 @@ module std::vector_tests { assert!(v == vector[2, 3, 4], 0) } + #[test] + fun test_zip_ref() { + let v1 = vector[1, 2, 3]; + let v2 = vector[10, 20, 30]; + let s = 0; + V::zip_ref(&v1, &v2, |e1, e2| s = s + *e1 * *e2); + assert!(s == 140, 0); + } + + #[test] + // zip_ref is an inline function so any error code will be reported at the call site. + #[expected_failure(abort_code = V::EVECTORS_LENGTH_MISMATCH, location = Self)] + fun test_zip_ref_mismatching_lengths_should_fail() { + let v1 = vector[1]; + let v2 = vector[10, 20]; + let s = 0; + V::zip_ref(&v1, &v2, |e1, e2| s = s + *e1 * *e2); + } + + #[test] + fun test_zip_mut() { + let v1 = vector[1, 2, 3]; + let v2 = vector[10, 20, 30]; + V::zip_mut(&mut v1, &mut v2, |e1, e2| { + let e1: &mut u64 = e1; + let e2: &mut u64 = e2; + *e1 = *e1 + 1; + *e2 = *e2 + 10; + }); + assert!(v1 == vector[2, 3, 4], 0); + assert!(v2 == vector[20, 30, 40], 0); + } + + #[test] + fun test_zip_map() { + let v1 = vector[1, 2, 3]; + let v2 = vector[10, 20, 30]; + let result = V::zip_map(v1, v2, |e1, e2| e1 + e2); + assert!(result == vector[11, 22, 33], 0); + } + + #[test] + fun test_zip_map_ref() { + let v1 = vector[1, 2, 3]; + let v2 = vector[10, 20, 30]; + let result = V::zip_map_ref(&v1, &v2, |e1, e2| *e1 + *e2); + assert!(result == vector[11, 22, 33], 0); + } + + #[test] + // zip_mut is an inline function so any error code will be reported at the call site. + #[expected_failure(abort_code = V::EVECTORS_LENGTH_MISMATCH, location = Self)] + fun test_zip_mut_mismatching_lengths_should_fail() { + let v1 = vector[1]; + let v2 = vector[10, 20]; + let s = 0; + V::zip_mut(&mut v1, &mut v2, |e1, e2| s = s + *e1 * *e2); + } + + #[test] + // zip_map is an inline function so any error code will be reported at the call site. + #[expected_failure(abort_code = V::EVECTORS_LENGTH_MISMATCH, location = Self)] + fun test_zip_map_mismatching_lengths_should_fail() { + let v1 = vector[1]; + let v2 = vector[10, 20]; + V::zip_map(v1, v2, |e1, e2| e1 * e2); + } + + #[test] + // zip_map_ref is an inline function so any error code will be reported at the call site. + #[expected_failure(abort_code = V::EVECTORS_LENGTH_MISMATCH, location = Self)] + fun test_zip_map_ref_mismatching_lengths_should_fail() { + let v1 = vector[1]; + let v2 = vector[10, 20]; + V::zip_map_ref(&v1, &v2, |e1, e2| *e1 * *e2); + } + + #[test] + fun test_enumerate_mut() { + let v = vector[1, 2, 3]; + let i_s = 0; + let s = 2; + V::enumerate_mut(&mut v, |i, e| { + i_s = i_s + i; + *e = s; + s = s + 1 + }); + assert!(i_s == 3, 0); + assert!(v == vector[2, 3, 4], 0); + } + #[test] fun test_fold() { let v = vector[1, 2, 3]; diff --git a/aptos-move/framework/src/natives/object.rs b/aptos-move/framework/src/natives/object.rs index d5c4fd6b869ba..169e25ec52b2b 100644 --- a/aptos-move/framework/src/natives/object.rs +++ b/aptos-move/framework/src/natives/object.rs @@ -53,15 +53,7 @@ fn native_exists_at( })?; if let Some(num_bytes) = num_bytes { - match num_bytes { - Some(num_bytes) => { - context - .charge(gas_params.per_item_loaded + num_bytes * gas_params.per_byte_loaded)?; - }, - None => { - context.charge(gas_params.per_item_loaded)?; - }, - } + context.charge(gas_params.per_item_loaded + num_bytes * gas_params.per_byte_loaded)?; } Ok(smallvec![Value::bool(exists)]) diff --git a/aptos-move/move-examples/cli-e2e-tests/README.md b/aptos-move/move-examples/cli-e2e-tests/README.md new file mode 100644 index 0000000000000..8929f80118e3b --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/README.md @@ -0,0 +1,4 @@ +# CLI E2E tests +These packages, one per production network, are used by the CLI E2E tests to test the correctness of the `aptos move` subcommand group. As such there is no particular rhyme or reason to what goes into these, it is meant to be an expressive selection of different, new features we might want to assert. + +As it is now the 3 packages share the same source code. Down the line we might want to use these tests to confirm that the CLI works with a new feature as it lands in devnet, then testnet, then mainnet. For that we'd need to separate the source. diff --git a/aptos-move/move-examples/cli-e2e-tests/common/sources/cli_e2e_tests.move b/aptos-move/move-examples/cli-e2e-tests/common/sources/cli_e2e_tests.move new file mode 100644 index 0000000000000..613056d72f1be --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/common/sources/cli_e2e_tests.move @@ -0,0 +1,327 @@ +module addr::cli_e2e_tests { + use std::error; + use std::option::{Self, Option}; + use std::signer; + use std::string::{Self, String}; + + use aptos_framework::object::{Self, ConstructorRef, Object}; + + use aptos_token_objects::collection; + use aptos_token_objects::token; + use aptos_std::string_utils; + + const ENOT_A_HERO: u64 = 1; + const ENOT_A_WEAPON: u64 = 2; + const ENOT_A_GEM: u64 = 3; + const ENOT_CREATOR: u64 = 4; + const EINVALID_WEAPON_UNEQUIP: u64 = 5; + const EINVALID_GEM_UNEQUIP: u64 = 6; + const EINVALID_TYPE: u64 = 7; + + struct OnChainConfig has key { + collection: String, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct Hero has key { + armor: Option>, + gender: String, + race: String, + shield: Option>, + weapon: Option>, + mutator_ref: token::MutatorRef, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct Armor has key { + defense: u64, + gem: Option>, + weight: u64, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct Gem has key { + attack_modifier: u64, + defense_modifier: u64, + magic_attribute: String, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct Shield has key { + defense: u64, + gem: Option>, + weight: u64, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct Weapon has key { + attack: u64, + gem: Option>, + weapon_type: String, + weight: u64, + } + + fun init_module(account: &signer) { + let collection = string::utf8(b"Hero Quest!"); + collection::create_unlimited_collection( + account, + string::utf8(b"collection description"), + collection, + option::none(), + string::utf8(b"collection uri"), + ); + + let on_chain_config = OnChainConfig { + collection: string::utf8(b"Hero Quest!"), + }; + move_to(account, on_chain_config); + } + + fun create( + creator: &signer, + description: String, + name: String, + uri: String, + ): ConstructorRef acquires OnChainConfig { + let on_chain_config = borrow_global(signer::address_of(creator)); + token::create_named_token( + creator, + on_chain_config.collection, + description, + name, + option::none(), + uri, + ) + } + + // Creation methods + + public fun create_hero( + creator: &signer, + description: String, + gender: String, + name: String, + race: String, + uri: String, + ): Object acquires OnChainConfig { + let constructor_ref = create(creator, description, name, uri); + let token_signer = object::generate_signer(&constructor_ref); + + let hero = Hero { + armor: option::none(), + gender, + race, + shield: option::none(), + weapon: option::none(), + mutator_ref: token::generate_mutator_ref(&constructor_ref), + }; + move_to(&token_signer, hero); + + object::address_to_object(signer::address_of(&token_signer)) + } + + public fun create_weapon( + creator: &signer, + attack: u64, + description: String, + name: String, + uri: String, + weapon_type: String, + weight: u64, + ): Object acquires OnChainConfig { + let constructor_ref = create(creator, description, name, uri); + let token_signer = object::generate_signer(&constructor_ref); + + let weapon = Weapon { + attack, + gem: option::none(), + weapon_type, + weight, + }; + move_to(&token_signer, weapon); + + object::address_to_object(signer::address_of(&token_signer)) + } + + public fun create_gem( + creator: &signer, + attack_modifier: u64, + defense_modifier: u64, + description: String, + magic_attribute: String, + name: String, + uri: String, + ): Object acquires OnChainConfig { + let constructor_ref = create(creator, description, name, uri); + let token_signer = object::generate_signer(&constructor_ref); + + let gem = Gem { + attack_modifier, + defense_modifier, + magic_attribute, + }; + move_to(&token_signer, gem); + + object::address_to_object(signer::address_of(&token_signer)) + } + + // Transfer wrappers + + public fun hero_equip_weapon(owner: &signer, hero: Object, weapon: Object) acquires Hero { + let hero_obj = borrow_global_mut(object::object_address(&hero)); + option::fill(&mut hero_obj.weapon, weapon); + object::transfer_to_object(owner, weapon, hero); + } + + public fun hero_unequip_weapon(owner: &signer, hero: Object, weapon: Object) acquires Hero { + let hero_obj = borrow_global_mut(object::object_address(&hero)); + let stored_weapon = option::extract(&mut hero_obj.weapon); + assert!(stored_weapon == weapon, error::not_found(EINVALID_WEAPON_UNEQUIP)); + object::transfer(owner, weapon, signer::address_of(owner)); + } + + public fun weapon_equip_gem(owner: &signer, weapon: Object, gem: Object) acquires Weapon { + let weapon_obj = borrow_global_mut(object::object_address(&weapon)); + option::fill(&mut weapon_obj.gem, gem); + object::transfer_to_object(owner, gem, weapon); + } + + public fun weapon_unequip_gem(owner: &signer, weapon: Object, gem: Object) acquires Weapon { + let weapon_obj = borrow_global_mut(object::object_address(&weapon)); + let stored_gem = option::extract(&mut weapon_obj.gem); + assert!(stored_gem == gem, error::not_found(EINVALID_GEM_UNEQUIP)); + object::transfer(owner, gem, signer::address_of(owner)); + } + + // Entry functions + + entry fun mint_hero( + account: &signer, + description: String, + gender: String, + name: String, + race: String, + uri: String, + ) acquires OnChainConfig { + create_hero(account, description, gender, name, race, uri); + } + + entry fun set_hero_description( + creator: &signer, + collection: String, + name: String, + description: String, + ) acquires Hero { + let (hero_obj, hero) = get_hero( + &signer::address_of(creator), + &collection, + &name, + ); + let creator_addr = token::creator(hero_obj); + assert!(creator_addr == signer::address_of(creator), error::permission_denied(ENOT_CREATOR)); + token::set_description(&hero.mutator_ref, description); + } + + // View functions + #[view] + fun view_hero(creator: address, collection: String, name: String): Hero acquires Hero { + let token_address = token::create_token_address( + &creator, + &collection, + &name, + ); + move_from(token_address) + } + + #[view] + fun view_hero_by_object(hero_obj: Object): Hero acquires Hero { + let token_address = object::object_address(&hero_obj); + move_from(token_address) + } + + #[view] + fun view_object(obj: Object): String acquires Armor, Gem, Hero, Shield, Weapon { + let token_address = object::object_address(&obj); + if (exists(token_address)) { + string_utils::to_string(borrow_global(token_address)) + } else if (exists(token_address)) { + string_utils::to_string(borrow_global(token_address)) + } else if (exists(token_address)) { + string_utils::to_string(borrow_global(token_address)) + } else if (exists(token_address)) { + string_utils::to_string(borrow_global(token_address)) + } else if (exists(token_address)) { + string_utils::to_string(borrow_global(token_address)) + } else { + abort EINVALID_TYPE + } + } + + inline fun get_hero(creator: &address, collection: &String, name: &String): (Object, &Hero) { + let token_address = token::create_token_address( + creator, + collection, + name, + ); + (object::address_to_object(token_address), borrow_global(token_address)) + } + + #[test(account = @0x3)] + fun test_hero_with_gem_weapon(account: &signer) acquires Hero, OnChainConfig, Weapon { + init_module(account); + + let hero = create_hero( + account, + string::utf8(b"The best hero ever!"), + string::utf8(b"Male"), + string::utf8(b"Wukong"), + string::utf8(b"Monkey God"), + string::utf8(b""), + ); + + let weapon = create_weapon( + account, + 32, + string::utf8(b"A magical staff!"), + string::utf8(b"Ruyi Jingu Bang"), + string::utf8(b""), + string::utf8(b"staff"), + 15, + ); + + let gem = create_gem( + account, + 32, + 32, + string::utf8(b"Beautiful specimen!"), + string::utf8(b"earth"), + string::utf8(b"jade"), + string::utf8(b""), + ); + + let account_address = signer::address_of(account); + assert!(object::is_owner(hero, account_address), 0); + assert!(object::is_owner(weapon, account_address), 1); + assert!(object::is_owner(gem, account_address), 2); + + hero_equip_weapon(account, hero, weapon); + assert!(object::is_owner(hero, account_address), 3); + assert!(object::is_owner(weapon, object::object_address(&hero)), 4); + assert!(object::is_owner(gem, account_address), 5); + + weapon_equip_gem(account, weapon, gem); + assert!(object::is_owner(hero, account_address), 6); + assert!(object::is_owner(weapon, object::object_address(&hero)), 7); + assert!(object::is_owner(gem, object::object_address(&weapon)), 8); + + hero_unequip_weapon(account, hero, weapon); + assert!(object::is_owner(hero, account_address), 9); + assert!(object::is_owner(weapon, account_address), 10); + assert!(object::is_owner(gem, object::object_address(&weapon)), 11); + + weapon_unequip_gem(account, weapon, gem); + assert!(object::is_owner(hero, account_address), 12); + assert!(object::is_owner(weapon, account_address), 13); + assert!(object::is_owner(gem, account_address), 14); + } +} diff --git a/aptos-move/move-examples/cli-e2e-tests/devnet/Move.toml b/aptos-move/move-examples/cli-e2e-tests/devnet/Move.toml new file mode 100644 index 0000000000000..29db695268495 --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/devnet/Move.toml @@ -0,0 +1,16 @@ +[package] +name = "cli_e2e_tests" +version = "0.0.1" + +[addresses] +addr = "_" + +[dependencies.AptosFramework] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "devnet" +subdir = "aptos-move/framework/aptos-framework" + +[dependencies.AptosTokenObjects] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "devnet" +subdir = "aptos-move/framework/aptos-token-objects" diff --git a/aptos-move/move-examples/cli-e2e-tests/devnet/sources b/aptos-move/move-examples/cli-e2e-tests/devnet/sources new file mode 120000 index 0000000000000..3fcdf678f47c5 --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/devnet/sources @@ -0,0 +1 @@ +../common/sources/ \ No newline at end of file diff --git a/aptos-move/move-examples/cli-e2e-tests/mainnet/Move.toml b/aptos-move/move-examples/cli-e2e-tests/mainnet/Move.toml new file mode 100644 index 0000000000000..b74dc02f72f5a --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/mainnet/Move.toml @@ -0,0 +1,16 @@ +[package] +name = "cli_e2e_tests" +version = "0.0.1" + +[addresses] +addr = "_" + +[dependencies.AptosFramework] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "mainnet" +subdir = "aptos-move/framework/aptos-framework" + +[dependencies.AptosTokenObjects] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "mainnet" +subdir = "aptos-move/framework/aptos-token-objects" diff --git a/aptos-move/move-examples/cli-e2e-tests/mainnet/sources b/aptos-move/move-examples/cli-e2e-tests/mainnet/sources new file mode 120000 index 0000000000000..3fcdf678f47c5 --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/mainnet/sources @@ -0,0 +1 @@ +../common/sources/ \ No newline at end of file diff --git a/aptos-move/move-examples/cli-e2e-tests/testnet/Move.toml b/aptos-move/move-examples/cli-e2e-tests/testnet/Move.toml new file mode 100644 index 0000000000000..0ffe8f1734786 --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/testnet/Move.toml @@ -0,0 +1,16 @@ +[package] +name = "cli_e2e_tests" +version = "0.0.1" + +[addresses] +addr = "_" + +[dependencies.AptosFramework] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "testnet" +subdir = "aptos-move/framework/aptos-framework" + +[dependencies.AptosTokenObjects] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "testnet" +subdir = "aptos-move/framework/aptos-token-objects" diff --git a/aptos-move/move-examples/cli-e2e-tests/testnet/sources b/aptos-move/move-examples/cli-e2e-tests/testnet/sources new file mode 120000 index 0000000000000..3fcdf678f47c5 --- /dev/null +++ b/aptos-move/move-examples/cli-e2e-tests/testnet/sources @@ -0,0 +1 @@ +../common/sources/ \ No newline at end of file diff --git a/aptos-move/move-examples/dao/nft_dao/sources/bucket_table.move b/aptos-move/move-examples/dao/nft_dao/sources/bucket_table.move index c27e8f48529b9..97727ff5037f6 100644 --- a/aptos-move/move-examples/dao/nft_dao/sources/bucket_table.move +++ b/aptos-move/move-examples/dao/nft_dao/sources/bucket_table.move @@ -68,13 +68,10 @@ module dao_platform::bucket_table { let hash = sip_hash_from_value(&key); let index = bucket_index(map.level, map.num_buckets, hash); let bucket = table_with_length::borrow_mut(&mut map.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow(bucket, i); + vector::for_each_ref(bucket, |entry| { + let entry: &Entry = entry; assert!(&entry.key != &key, error::invalid_argument(EALREADY_EXIST)); - i = i + 1; - }; + }); vector::push_back(bucket, Entry {hash, key, value}); map.len = map.len + 1; @@ -177,16 +174,10 @@ module dao_platform::bucket_table { public fun contains(map: &BucketTable, key: &K): bool { let index = bucket_index(map.level, map.num_buckets, sip_hash_from_value(key)); let bucket = table_with_length::borrow(&map.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow(bucket, i); - if (&entry.key == key) { - return true - }; - i = i + 1; - }; - false + vector::any(bucket, |entry| { + let entry: &Entry = entry; + &entry.key == key + }) } /// Remove from `table` and return the value which `key` maps to. diff --git a/aptos-move/move-examples/dao/nft_dao/sources/nft_dao.move b/aptos-move/move-examples/dao/nft_dao/sources/nft_dao.move index 7cd9e2dbaa2b0..659d0858b2be5 100644 --- a/aptos-move/move-examples/dao/nft_dao/sources/nft_dao.move +++ b/aptos-move/move-examples/dao/nft_dao/sources/nft_dao.move @@ -343,9 +343,7 @@ module dao_platform::nft_dao { }; let function_args = vector::empty(); - let cnt = 0; - while (cnt < fcnt) { - let fname = vector::borrow(&function_names, cnt); + vector::enumerate_ref(&function_names, |cnt, fname| { let arg_names = vector::borrow(&arg_names, cnt); let arg_values = vector::borrow(&arg_values, cnt); let arg_types = vector::borrow(&arg_types, cnt); @@ -353,8 +351,7 @@ module dao_platform::nft_dao { let pm = property_map::new(*arg_names, *arg_values, *arg_types); assert_function_valid(*fname, &pm); vector::push_back(&mut function_args, pm); - cnt = cnt + 1; - }; + }); // verify the start_time is in future let now = timestamp::now_seconds(); @@ -429,10 +426,9 @@ module dao_platform::nft_dao { let stats = table::borrow_mut(&mut prop_stats.proposals, proposal_id); let voter_addr = signer::address_of(account); - let i = 0; // loop through all NFTs used for voting and update the voting result - while (i < vector::length(&token_names)) { - let token_name = *vector::borrow(&token_names, i); + vector::enumerate_ref(&token_names, |i, token_name| { + let token_name = *token_name; let property_version = *vector::borrow(&property_versions, i); let token_id = token::create_token_id_raw(gtoken.creator, gtoken.collection, token_name, property_version); // check if this token already voted @@ -448,8 +444,7 @@ module dao_platform::nft_dao { stats.total_no = stats.total_no + 1; bucket_table::add(&mut stats.no_votes, token_id, voter_addr); }; - i = i + 1; - }; + }); nft_dao_events::emit_voting_event( voter_addr, @@ -691,10 +686,7 @@ module dao_platform::nft_dao { /// Internal function for executing a DAO's proposal fun execute_proposal(proposal: &Proposal, dao: &DAO){ - let fcnt = vector::length(&proposal.function_names); - let i = 0; - while (i < fcnt) { - let function_name = vector::borrow(&proposal.function_names, i); + vector::enumerate_ref(&proposal.function_names, |i, function_name| { let args = vector::borrow(&proposal.function_args, i); if (function_name == &string::utf8(b"transfer_fund")) { let res_signer = create_signer_with_capability(&dao.dao_signer_capability); @@ -712,8 +704,7 @@ module dao_platform::nft_dao { } else { assert!(function_name == &string::utf8(b"no_op"), error::invalid_argument(ENOT_SUPPROTED_FUNCTION)); }; - i = i + 1; - }; + }); } /// Resolve an proposal @@ -764,18 +755,16 @@ module dao_platform::nft_dao { dao: &DAO ): u64 { let gtoken = &dao.governance_token; - let i = 0; let used_token_ids = vector::empty(); let total = vector::length(token_names); - while (i < total) { - let token_name = *vector::borrow(token_names, i); + vector::enumerate_ref(token_names, |i, token_name| { + let token_name = *token_name; let property_version = *vector::borrow(property_versions, i); let token_id = token::create_token_id_raw(gtoken.creator, gtoken.collection, token_name, property_version); assert!(!vector::contains(&used_token_ids, &token_id), error::already_exists(ETOKEN_USED_FOR_CREATING_PROPOSAL)); vector::push_back(&mut used_token_ids, token_id); assert!(token::balance_of(signer::address_of(account), token_id) == 1, error::permission_denied(ENOT_OWN_THE_VOTING_DAO_TOKEN)); - i = i + 1; - }; + }); total } diff --git a/aptos-move/move-examples/defi/sources/locked_coins.move b/aptos-move/move-examples/defi/sources/locked_coins.move index 8790bf7d3966f..adc13d861c5d6 100644 --- a/aptos-move/move-examples/defi/sources/locked_coins.move +++ b/aptos-move/move-examples/defi/sources/locked_coins.move @@ -156,13 +156,10 @@ module defi::locked_coins { sponsor: &signer, recipients: vector
, amounts: vector, unlock_time_secs: u64) acquires Locks { let len = vector::length(&recipients); assert!(len == vector::length(&amounts), error::invalid_argument(EINVALID_RECIPIENTS_LIST_LENGTH)); - let i = 0; - while (i < len) { - let recipient = *vector::borrow(&recipients, i); + vector::enumerate_ref(&recipients, |i, recipient| { let amount = *vector::borrow(&amounts, i); - add_locked_coins(sponsor, recipient, amount, unlock_time_secs); - i = i + 1; - } + add_locked_coins(sponsor, *recipient, amount, unlock_time_secs); + }); } /// `Sponsor` can add locked coins for `recipient` with given unlock timestamp (in seconds). @@ -213,13 +210,9 @@ module defi::locked_coins { let sponsor_address = signer::address_of(sponsor); assert!(exists>(sponsor_address), error::not_found(ESPONSOR_ACCOUNT_NOT_INITIALIZED)); - let len = vector::length(&recipients); - let i = 0; - while (i < len) { - let recipient = *vector::borrow(&recipients, i); - update_lockup(sponsor, recipient, new_unlock_time_secs); - i = i + 1; - }; + vector::for_each_ref(&recipients, |recipient| { + update_lockup(sponsor, *recipient, new_unlock_time_secs); + }); } /// Sponsor can update the lockup of an existing lock. @@ -246,13 +239,9 @@ module defi::locked_coins { let sponsor_address = signer::address_of(sponsor); assert!(exists>(sponsor_address), error::not_found(ESPONSOR_ACCOUNT_NOT_INITIALIZED)); - let len = vector::length(&recipients); - let i = 0; - while (i < len) { - let recipient = *vector::borrow(&recipients, i); - cancel_lockup(sponsor, recipient); - i = i + 1; - }; + vector::for_each_ref(&recipients, |recipient| { + cancel_lockup(sponsor, *recipient); + }); } /// Sponsor can cancel an existing lock. diff --git a/aptos-move/move-examples/fungible_asset/Move.toml b/aptos-move/move-examples/fungible_asset/Move.toml deleted file mode 100644 index 50d6e3404a053..0000000000000 --- a/aptos-move/move-examples/fungible_asset/Move.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "FungibleAsset" -version = "0.0.0" - -[addresses] -aptos_framework = "0x1" -fungible_asset_extension = "_" - -[dependencies] -AptosFramework = { local = "../../framework/aptos-framework" } diff --git a/aptos-move/move-examples/fungible_asset/README.md b/aptos-move/move-examples/fungible_asset/README.md new file mode 100644 index 0000000000000..e4e0e0419099c --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/README.md @@ -0,0 +1,6 @@ +# Fungible Asset + +* Managed fungible asset: A full-fledged fungible asset with customizable management capabilities and associated functions, based on which a light example is provided to show how to issue coin. +* Managed fungible token: A fungible token example that adds token resource to the metadata object. +* Simple managed coin: an all-in-one module implementing managed coin using fungible asset with limited functionalities (only deal with primary fungible stores). +* Pre-minted managed coin: An example issuing pre-minting coin based on managed fungible asset. \ No newline at end of file diff --git a/aptos-move/move-examples/fungible_asset/managed_fungible_asset/Move.toml b/aptos-move/move-examples/fungible_asset/managed_fungible_asset/Move.toml new file mode 100644 index 0000000000000..13f5646be9c3e --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/managed_fungible_asset/Move.toml @@ -0,0 +1,12 @@ +[package] +name = "ManagedFungibleAsset" +version = "0.0.0" + +[addresses] +aptos_framework = "0x1" +aptos_token_objects = "0x4" +example_addr = "_" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } +AptosTokenObjects = { local = "../../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/fungible_asset/sources/coin_example.move b/aptos-move/move-examples/fungible_asset/managed_fungible_asset/sources/coin_example.move similarity index 59% rename from aptos-move/move-examples/fungible_asset/sources/coin_example.move rename to aptos-move/move-examples/fungible_asset/managed_fungible_asset/sources/coin_example.move index fbd67f9e7dc56..77a893cbc6ce3 100644 --- a/aptos-move/move-examples/fungible_asset/sources/coin_example.move +++ b/aptos-move/move-examples/fungible_asset/managed_fungible_asset/sources/coin_example.move @@ -1,9 +1,10 @@ -/// A coin example using managed_fungible_asset to create a fungible "coin". -module fungible_asset_extension::coin_example { +/// A coin example using managed_fungible_asset to create a fungible "coin" and helper functions to only interact with +/// primary fungible stores only. +module example_addr::coin_example { use aptos_framework::object; - use aptos_framework::fungible_asset::{Metadata, FungibleAsset}; + use aptos_framework::fungible_asset::{Self, Metadata, FungibleAsset}; use aptos_framework::object::Object; - use fungible_asset_extension::managed_fungible_asset; + use example_addr::managed_fungible_asset; use std::string::utf8; const ASSET_SYMBOL: vector = b"YOLO"; @@ -19,49 +20,63 @@ module fungible_asset_extension::coin_example { 8, /* decimals */ utf8(b"http://example.com/favicon.ico"), /* icon */ utf8(b"http://example.com"), /* project */ + vector[true, true, true], /* mint_ref, transfer_ref, burn_ref */ ); } #[view] /// Return the address of the metadata that's created when this module is deployed. public fun get_metadata(): Object { - let metadata_address = object::create_object_address(&@fungible_asset_extension, ASSET_SYMBOL); + let metadata_address = object::create_object_address(&@example_addr, ASSET_SYMBOL); object::address_to_object(metadata_address) } /// Mint as the owner of metadata object. - public entry fun mint(admin: &signer, amount: u64, to: address) { - managed_fungible_asset::mint(admin, get_metadata(), amount, to); + public entry fun mint(admin: &signer, to: address, amount: u64) { + managed_fungible_asset::mint_to_primary_stores(admin, get_metadata(), vector[to], vector[amount]); } /// Transfer as the owner of metadata object ignoring `frozen` field. public entry fun transfer(admin: &signer, from: address, to: address, amount: u64) { - managed_fungible_asset::transfer(admin, get_metadata(), from, to, amount); + managed_fungible_asset::transfer_between_primary_stores( + admin, + get_metadata(), + vector[from], + vector[to], + vector[amount] + ); } /// Burn fungible assets as the owner of metadata object. public entry fun burn(admin: &signer, from: address, amount: u64) { - managed_fungible_asset::burn(admin, get_metadata(), from, amount); + managed_fungible_asset::burn_from_primary_stores(admin, get_metadata(), vector[from], vector[amount]); } /// Freeze an account so it cannot transfer or receive fungible assets. public entry fun freeze_account(admin: &signer, account: address) { - managed_fungible_asset::freeze_account(admin, get_metadata(), account); + managed_fungible_asset::set_primary_stores_frozen_status(admin, get_metadata(), vector[account], true); } /// Unfreeze an account so it can transfer or receive fungible assets. public entry fun unfreeze_account(admin: &signer, account: address) { - managed_fungible_asset::unfreeze_account(admin, get_metadata(), account); + managed_fungible_asset::set_primary_stores_frozen_status(admin, get_metadata(), vector[account], false); } /// Withdraw as the owner of metadata object ignoring `frozen` field. - public fun withdraw(admin: &signer, amount: u64, from: address): FungibleAsset { - managed_fungible_asset::withdraw(admin, get_metadata(), amount, from) + public fun withdraw(admin: &signer, from: address, amount: u64): FungibleAsset { + managed_fungible_asset::withdraw_from_primary_stores(admin, get_metadata(), vector[from], vector[amount]) } /// Deposit as the owner of metadata object ignoring `frozen` field. - public fun deposit(admin: &signer, to: address, fa: FungibleAsset) { - managed_fungible_asset::deposit(admin, get_metadata(), to, fa); + public fun deposit(admin: &signer, fa: FungibleAsset, to: address) { + let amount = fungible_asset::amount(&fa); + managed_fungible_asset::deposit_to_primary_stores( + admin, + &mut fa, + vector[to], + vector[amount] + ); + fungible_asset::destroy_zero(fa); } #[test_only] @@ -69,13 +84,13 @@ module fungible_asset_extension::coin_example { #[test_only] use std::signer; - #[test(creator = @0xcafe)] + #[test(creator = @example_addr)] fun test_basic_flow(creator: &signer) { init_module(creator); let creator_address = signer::address_of(creator); let aaron_address = @0xface; - mint(creator, 100, creator_address); + mint(creator, creator_address, 100); let metadata = get_metadata(); assert!(primary_fungible_store::balance(creator_address, metadata) == 100, 4); freeze_account(creator, creator_address); @@ -88,11 +103,11 @@ module fungible_asset_extension::coin_example { burn(creator, creator_address, 90); } - #[test(creator = @0xcafe, aaron = @0xface)] - #[expected_failure(abort_code = 0x50001, location = fungible_asset_extension::managed_fungible_asset)] + #[test(creator = @example_addr, aaron = @0xface)] + #[expected_failure(abort_code = 0x50001, location = example_addr::managed_fungible_asset)] fun test_permission_denied(creator: &signer, aaron: &signer) { init_module(creator); let creator_address = signer::address_of(creator); - mint(aaron, 100, creator_address); + mint(aaron, creator_address, 100); } } diff --git a/aptos-move/move-examples/fungible_asset/managed_fungible_asset/sources/managed_fungible_asset.move b/aptos-move/move-examples/fungible_asset/managed_fungible_asset/sources/managed_fungible_asset.move new file mode 100644 index 0000000000000..316d022642496 --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/managed_fungible_asset/sources/managed_fungible_asset.move @@ -0,0 +1,419 @@ +/// This module provides a managed fungible asset that allows the owner of the metadata object to +/// mint, transfer and burn fungible assets. +/// +/// The functionalities offered by this module are: +/// 1. Mint fungible assets to fungible stores as the owner of metadata object. +/// 2. Transfer fungible assets as the owner of metadata object ignoring `frozen` field between fungible stores. +/// 3. Burn fungible assets from fungible stores as the owner of metadata object. +/// 4. Withdraw the merged fungible assets from fungible stores as the owner of metadata object. +/// 5. Deposit fungible assets to fungible stores. +module example_addr::managed_fungible_asset { + use aptos_framework::fungible_asset::{Self, MintRef, TransferRef, BurnRef, Metadata, FungibleStore, FungibleAsset}; + use aptos_framework::object::{Self, Object, ConstructorRef}; + use aptos_framework::primary_fungible_store; + use std::error; + use std::signer; + use std::string::String; + use std::option; + + /// Only fungible asset metadata owner can make changes. + const ERR_NOT_OWNER: u64 = 1; + /// The length of ref_flags is not 3. + const ERR_INVALID_REF_FLAGS_LENGTH: u64 = 2; + /// The lengths of two vector do not equal. + const ERR_VECTORS_LENGTH_MISMATCH: u64 = 3; + /// MintRef error. + const ERR_MINT_REF: u64 = 4; + /// TransferRef error. + const ERR_TRANSFER_REF: u64 = 5; + /// BurnRef error. + const ERR_BURN_REF: u64 = 6; + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Hold refs to control the minting, transfer and burning of fungible assets. + struct ManagingRefs has key { + mint_ref: Option, + transfer_ref: Option, + burn_ref: Option, + } + + /// Initialize metadata object and store the refs specified by `ref_flags`. + public fun initialize( + constructor_ref: &ConstructorRef, + maximum_supply: u128, + name: String, + symbol: String, + decimals: u8, + icon_uri: String, + project_uri: String, + ref_flags: vector, + ) { + assert!(vector::length(&ref_flags) == 3, error::invalid_argument(ERR_INVALID_REF_FLAGS_LENGTH)); + let supply = if (maximum_supply != 0) { + option::some(maximum_supply) + } else { + option::none() + }; + primary_fungible_store::create_primary_store_enabled_fungible_asset( + constructor_ref, + supply, + name, + symbol, + decimals, + icon_uri, + project_uri, + ); + + // Optionally create mint/burn/transfer refs to allow creator to manage the fungible asset. + let mint_ref = if (*vector::borrow(&ref_flags, 0)) { + option::some(fungible_asset::generate_mint_ref(constructor_ref)) + } else { + option::none() + }; + let transfer_ref = if (*vector::borrow(&ref_flags, 1)) { + option::some(fungible_asset::generate_transfer_ref(constructor_ref)) + } else { + option::none() + }; + let burn_ref = if (*vector::borrow(&ref_flags, 2)) { + option::some(fungible_asset::generate_burn_ref(constructor_ref)) + } else { + option::none() + }; + let metadata_object_signer = object::generate_signer(constructor_ref); + move_to( + &metadata_object_signer, + ManagingRefs { mint_ref, transfer_ref, burn_ref } + ) + } + + /// Mint as the owner of metadata object to the primary fungible stores of the accounts with amounts of FAs. + public entry fun mint_to_primary_stores( + admin: &signer, + asset: Object, + to: vector
, + amounts: vector + ) acquires ManagingRefs { + let receiver_primary_stores = vector::map( + to, + |addr| primary_fungible_store::ensure_primary_store_exists(addr, asset) + ); + mint(admin, asset, receiver_primary_stores, amounts); + } + + + /// Mint as the owner of metadata object to multiple fungible stores with amounts of FAs. + public entry fun mint( + admin: &signer, + asset: Object, + stores: vector>, + amounts: vector, + ) acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let mint_ref = authorized_borrow_mint_ref(admin, asset); + let i = 0; + while (i < length) { + fungible_asset::mint_to(mint_ref, *vector::borrow(&stores, i), *vector::borrow(&amounts, i)); + i = i + 1; + } + } + + /// Transfer as the owner of metadata object ignoring `frozen` field from primary stores to primary stores of + /// accounts. + public entry fun transfer_between_primary_stores( + admin: &signer, + asset: Object, + from: vector
, + to: vector
, + amounts: vector + ) acquires ManagingRefs { + let sender_primary_stores = vector::map( + from, + |addr| primary_fungible_store::primary_store(addr, asset) + ); + let receiver_primary_stores = vector::map( + to, + |addr| primary_fungible_store::ensure_primary_store_exists(addr, asset) + ); + transfer(admin, asset, sender_primary_stores, receiver_primary_stores, amounts); + } + + /// Transfer as the owner of metadata object ignoring `frozen` field between fungible stores. + public entry fun transfer( + admin: &signer, + asset: Object, + sender_stores: vector>, + receiver_stores: vector>, + amounts: vector, + ) acquires ManagingRefs { + let length = vector::length(&sender_stores); + assert!(length == vector::length(&receiver_stores), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let transfer_ref = authorized_borrow_transfer_ref(admin, asset); + let i = 0; + while (i < length) { + fungible_asset::transfer_with_ref( + transfer_ref, + *vector::borrow(&sender_stores, i), + *vector::borrow(&receiver_stores, i), + *vector::borrow(&amounts, i) + ); + i = i + 1; + } + } + + /// Burn fungible assets as the owner of metadata object from the primary stores of accounts. + public entry fun burn_from_primary_stores( + admin: &signer, + asset: Object, + from: vector
, + amounts: vector + ) acquires ManagingRefs { + let primary_stores = vector::map( + from, + |addr| primary_fungible_store::primary_store(addr, asset) + ); + burn(admin, asset, primary_stores, amounts); + } + + /// Burn fungible assets as the owner of metadata object from fungible stores. + public entry fun burn( + admin: &signer, + asset: Object, + stores: vector>, + amounts: vector + ) acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let burn_ref = authorized_borrow_burn_ref(admin, asset); + let i = 0; + while (i < length) { + fungible_asset::burn_from(burn_ref, *vector::borrow(&stores, i), *vector::borrow(&amounts, i)); + i = i + 1; + }; + } + + + /// Freeze/unfreeze the primary stores of accounts so they cannot transfer or receive fungible assets. + public entry fun set_primary_stores_frozen_status( + admin: &signer, + asset: Object, + accounts: vector
, + frozen: bool + ) acquires ManagingRefs { + let primary_stores = vector::map(accounts, |acct| { + primary_fungible_store::ensure_primary_store_exists(acct, asset) + }); + set_frozen_status(admin, asset, primary_stores, frozen); + } + + /// Freeze/unfreeze the fungible stores so they cannot transfer or receive fungible assets. + public entry fun set_frozen_status( + admin: &signer, + asset: Object, + stores: vector>, + frozen: bool + ) acquires ManagingRefs { + let transfer_ref = authorized_borrow_transfer_ref(admin, asset); + vector::for_each(stores, |store| { + fungible_asset::set_frozen_flag(transfer_ref, store, frozen); + }); + } + + /// Withdraw as the owner of metadata object ignoring `frozen` field from primary fungible stores of accounts. + public fun withdraw_from_primary_stores( + admin: &signer, + asset: Object, + from: vector
, + amounts: vector + ): FungibleAsset acquires ManagingRefs { + let primary_stores = vector::map( + from, + |addr| primary_fungible_store::primary_store(addr, asset) + ); + withdraw(admin, asset, primary_stores, amounts) + } + + /// Withdraw as the owner of metadata object ignoring `frozen` field from fungible stores. + /// return a fungible asset `fa` where `fa.amount = sum(amounts)`. + public fun withdraw( + admin: &signer, + asset: Object, + stores: vector>, + amounts: vector + ): FungibleAsset acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let transfer_ref = authorized_borrow_transfer_ref(admin, asset); + let i = 0; + let sum = fungible_asset::zero(asset); + while (i < length) { + let fa = fungible_asset::withdraw_with_ref( + transfer_ref, + *vector::borrow(&stores, i), + *vector::borrow(&amounts, i) + ); + fungible_asset::merge(&mut sum, fa); + i = i + 1; + }; + sum + } + + /// Deposit as the owner of metadata object ignoring `frozen` field to primary fungible stores of accounts from a + /// single source of fungible asset. + public fun deposit_to_primary_stores( + admin: &signer, + fa: &mut FungibleAsset, + from: vector
, + amounts: vector, + ) acquires ManagingRefs { + let primary_stores = vector::map( + from, + |addr| primary_fungible_store::ensure_primary_store_exists(addr, fungible_asset::asset_metadata(fa)) + ); + deposit(admin, fa, primary_stores, amounts); + } + + /// Deposit as the owner of metadata object ignoring `frozen` field from fungible stores. The amount left in `fa` + /// is `fa.amount - sum(amounts)`. + public fun deposit( + admin: &signer, + fa: &mut FungibleAsset, + stores: vector>, + amounts: vector + ) acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let transfer_ref = authorized_borrow_transfer_ref(admin, fungible_asset::asset_metadata(fa)); + let i = 0; + while (i < length) { + let split_fa = fungible_asset::extract(fa, *vector::borrow(&amounts, i)); + fungible_asset::deposit_with_ref( + transfer_ref, + *vector::borrow(&stores, i), + split_fa, + ); + i = i + 1; + }; + } + + /// Borrow the immutable reference of the refs of `metadata`. + /// This validates that the signer is the metadata object's owner. + inline fun authorized_borrow_refs( + owner: &signer, + asset: Object, + ): &ManagingRefs acquires ManagingRefs { + assert!(object::is_owner(asset, signer::address_of(owner)), error::permission_denied(ERR_NOT_OWNER)); + borrow_global(object::object_address(&asset)) + } + + /// Check the existence and borrow `MintRef`. + inline fun authorized_borrow_mint_ref( + owner: &signer, + asset: Object, + ): &MintRef acquires ManagingRefs { + let refs = authorized_borrow_refs(owner, asset); + assert!(option::is_some(&refs.mint_ref), error::not_found(ERR_MINT_REF)); + option::borrow(&refs.mint_ref) + } + + /// Check the existence and borrow `TransferRef`. + inline fun authorized_borrow_transfer_ref( + owner: &signer, + asset: Object, + ): &TransferRef acquires ManagingRefs { + let refs = authorized_borrow_refs(owner, asset); + assert!(option::is_some(&refs.transfer_ref), error::not_found(ERR_TRANSFER_REF)); + option::borrow(&refs.transfer_ref) + } + + /// Check the existence and borrow `BurnRef`. + inline fun authorized_borrow_burn_ref( + owner: &signer, + asset: Object, + ): &BurnRef acquires ManagingRefs { + let refs = authorized_borrow_refs(owner, asset); + assert!(option::is_some(&refs.mint_ref), error::not_found(ERR_BURN_REF)); + option::borrow(&refs.burn_ref) + } + + #[test_only] + use aptos_framework::object::object_from_constructor_ref; + #[test_only] + use std::string::utf8; + use std::vector; + use std::option::Option; + + #[test_only] + fun create_test_mfa(creator: &signer): Object { + let constructor_ref = &object::create_named_object(creator, b"APT"); + initialize( + constructor_ref, + 0, + utf8(b"Aptos Token"), /* name */ + utf8(b"APT"), /* symbol */ + 8, /* decimals */ + utf8(b"http://example.com/favicon.ico"), /* icon */ + utf8(b"http://example.com"), /* project */ + vector[true, true, true] + ); + object_from_constructor_ref(constructor_ref) + } + + #[test(creator = @example_addr)] + fun test_basic_flow( + creator: &signer, + ) acquires ManagingRefs { + let metadata = create_test_mfa(creator); + let creator_address = signer::address_of(creator); + let aaron_address = @0xface; + + mint_to_primary_stores(creator, metadata, vector[creator_address, aaron_address], vector[100, 50]); + assert!(primary_fungible_store::balance(creator_address, metadata) == 100, 1); + assert!(primary_fungible_store::balance(aaron_address, metadata) == 50, 2); + + set_primary_stores_frozen_status(creator, metadata, vector[creator_address, aaron_address], true); + assert!(primary_fungible_store::is_frozen(creator_address, metadata), 3); + assert!(primary_fungible_store::is_frozen(aaron_address, metadata), 4); + + transfer_between_primary_stores( + creator, + metadata, + vector[creator_address, aaron_address], + vector[aaron_address, creator_address], + vector[10, 5] + ); + assert!(primary_fungible_store::balance(creator_address, metadata) == 95, 5); + assert!(primary_fungible_store::balance(aaron_address, metadata) == 55, 6); + + set_primary_stores_frozen_status(creator, metadata, vector[creator_address, aaron_address], false); + assert!(!primary_fungible_store::is_frozen(creator_address, metadata), 7); + assert!(!primary_fungible_store::is_frozen(aaron_address, metadata), 8); + + let fa = withdraw_from_primary_stores( + creator, + metadata, + vector[creator_address, aaron_address], + vector[25, 15] + ); + assert!(fungible_asset::amount(&fa) == 40, 9); + deposit_to_primary_stores(creator, &mut fa, vector[creator_address, aaron_address], vector[30, 10]); + fungible_asset::destroy_zero(fa); + + burn_from_primary_stores(creator, metadata, vector[creator_address, aaron_address], vector[100, 50]); + assert!(primary_fungible_store::balance(creator_address, metadata) == 0, 10); + assert!(primary_fungible_store::balance(aaron_address, metadata) == 0, 11); + } + + #[test(creator = @example_addr, aaron = @0xface)] + #[expected_failure(abort_code = 0x50001, location = Self)] + fun test_permission_denied( + creator: &signer, + aaron: &signer + ) acquires ManagingRefs { + let metadata = create_test_mfa(creator); + let creator_address = signer::address_of(creator); + mint_to_primary_stores(aaron, metadata, vector[creator_address], vector[100]); + } +} diff --git a/aptos-move/move-examples/fungible_asset/managed_fungible_token/Move.toml b/aptos-move/move-examples/fungible_asset/managed_fungible_token/Move.toml new file mode 100644 index 0000000000000..f45cd7800e185 --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/managed_fungible_token/Move.toml @@ -0,0 +1,13 @@ +[package] +name = "ManagedFungibleToken" +version = "0.0.0" + +[addresses] +aptos_framework = "0x1" +aptos_token_objects = "0x4" +example_addr = "_" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } +AptosTokenObjects = { local = "../../../framework/aptos-token-objects" } +ManagedFungibleAsset = { local = "../managed_fungible_asset" } diff --git a/aptos-move/move-examples/fungible_asset/managed_fungible_token/sources/managed_fungible_token.move b/aptos-move/move-examples/fungible_asset/managed_fungible_token/sources/managed_fungible_token.move new file mode 100644 index 0000000000000..028be76f94b92 --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/managed_fungible_token/sources/managed_fungible_token.move @@ -0,0 +1,63 @@ +/// An example combining fungible assets with token as fungible token. In this example, a token object is used as +/// metadata to create fungible units, aka, fungible tokens. +module example_addr::managed_fungible_token { + use aptos_framework::fungible_asset::Metadata; + use aptos_framework::object::{Self, Object}; + use std::string::{utf8, String}; + use std::option; + use aptos_token_objects::token::{create_named_token, create_token_seed}; + use aptos_token_objects::collection::create_fixed_collection; + use example_addr::managed_fungible_asset; + + const ASSET_SYMBOL: vector = b"YOLO"; + + /// Initialize metadata object and store the refs. + fun init_module(admin: &signer) { + let collection_name: String = utf8(b"test collection name"); + let token_name: String = utf8(b"test token name"); + create_fixed_collection( + admin, + utf8(b"test collection description"), + 1, + collection_name, + option::none(), + utf8(b"http://aptoslabs.com/collection"), + ); + let constructor_ref = &create_named_token(admin, + collection_name, + utf8(b"test token description"), + token_name, + option::none(), + utf8(b"http://aptoslabs.com/token"), + ); + + managed_fungible_asset::initialize( + constructor_ref, + 0, /* maximum_supply. 0 means no maximum */ + utf8(b"test fungible token"), /* name */ + utf8(ASSET_SYMBOL), /* symbol */ + 0, /* decimals */ + utf8(b"http://example.com/favicon.ico"), /* icon */ + utf8(b"http://example.com"), /* project */ + vector[true, true, true], /* mint_ref, transfer_ref, burn_ref */ + ); + } + + #[view] + /// Return the address of the managed fungible asset that's created when this module is deployed. + /// This function is optional as a helper function for offline applications. + public fun get_metadata(): Object { + let collection_name: String = utf8(b"test collection name"); + let token_name: String = utf8(b"test token name"); + let asset_address = object::create_object_address( + &@example_addr, + create_token_seed(&collection_name, &token_name) + ); + object::address_to_object(asset_address) + } + + #[test(creator = @example_addr)] + fun test_init(creator: &signer) { + init_module(creator); + } +} diff --git a/aptos-move/move-examples/fungible_asset/preminted_managed_coin/Move.toml b/aptos-move/move-examples/fungible_asset/preminted_managed_coin/Move.toml new file mode 100644 index 0000000000000..29cae9c2511fe --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/preminted_managed_coin/Move.toml @@ -0,0 +1,11 @@ +[package] +name = "ManagedFungibleToken" +version = "0.0.0" + +[addresses] +aptos_framework = "0x1" +example_addr = "_" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } +ManagedFungibleAsset = { local = "../managed_fungible_asset" } diff --git a/aptos-move/move-examples/fungible_asset/preminted_managed_coin/sources/preminted_managed_coin.move b/aptos-move/move-examples/fungible_asset/preminted_managed_coin/sources/preminted_managed_coin.move new file mode 100644 index 0000000000000..8993acebcecd5 --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/preminted_managed_coin/sources/preminted_managed_coin.move @@ -0,0 +1,63 @@ +/// This module shows an example how to issue preminted coin with only `transfer` and `burn` managing capabilities. +/// It leveraged `managed_fungible_asset` module with only `TransferRef` and `BurnRef` stored after pre-minting a +/// pre-defined totally supply to a reserve account. After the initialization, the total supply can increase by no means +/// since `MintRef` of this fungible asset does not exist anymore. +/// The `init_module()` code can be modified to customize the managing refs as needed. +module example_addr::preminted_managed_coin { + use aptos_framework::fungible_asset::{Self, Metadata}; + use aptos_framework::object::{Self, Object}; + use aptos_framework::primary_fungible_store; + use example_addr::managed_fungible_asset; + use std::signer; + use std::string::utf8; + + const ASSET_SYMBOL: vector = b"MEME"; + const PRE_MINTED_TOTAL_SUPPLY: u64 = 10000; + + /// Initialize metadata object and store the refs. + fun init_module(admin: &signer) { + let constructor_ref = &object::create_named_object(admin, ASSET_SYMBOL); + managed_fungible_asset::initialize( + constructor_ref, + 1000000000, /* maximum_supply */ + utf8(b"preminted coin"), /* name */ + utf8(ASSET_SYMBOL), /* symbol */ + 8, /* decimals */ + utf8(b"http://example.com/favicon.ico"), /* icon */ + utf8(b"http://example.com"), /* project */ + vector[false, true, true], /* mint_ref, transfer_ref, burn_ref */ + ); + + // Create mint ref to premint fungible asset with a fixed supply volume into a specific account. + // This account can be any account including normal user account, resource account, multi-sig account, etc. + // We just use the creator account to show the proof of concept. + let mint_ref = fungible_asset::generate_mint_ref(constructor_ref); + let admin_primary_store = primary_fungible_store::ensure_primary_store_exists( + signer::address_of(admin), + get_metadata() + ); + fungible_asset::mint_to(&mint_ref, admin_primary_store, PRE_MINTED_TOTAL_SUPPLY); + } + + #[view] + /// Return the address of the metadata that's created when this module is deployed. + /// This function is optional as a helper function for offline applications. + public fun get_metadata(): Object { + let metadata_address = object::create_object_address(&@example_addr, ASSET_SYMBOL); + object::address_to_object(metadata_address) + } + + #[test_only] + use std::option; + + #[test(creator = @example_addr)] + #[expected_failure(abort_code = 0x60004, location = example_addr::managed_fungible_asset)] + fun test_basic_flow(creator: &signer) { + init_module(creator); + let creator_address = signer::address_of(creator); + let metadata = get_metadata(); + + assert!(option::destroy_some(fungible_asset::supply(metadata)) == (PRE_MINTED_TOTAL_SUPPLY as u128), 1); + managed_fungible_asset::mint_to_primary_stores(creator, metadata, vector[creator_address], vector[100]); + } +} diff --git a/aptos-move/move-examples/fungible_asset/simple_managed_coin/Move.toml b/aptos-move/move-examples/fungible_asset/simple_managed_coin/Move.toml new file mode 100644 index 0000000000000..77d11359f270d --- /dev/null +++ b/aptos-move/move-examples/fungible_asset/simple_managed_coin/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "SimpleManagedCoin" +version = "0.0.0" + +[addresses] +aptos_framework = "0x1" +example_addr = "_" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } diff --git a/aptos-move/move-examples/fungible_asset/sources/managed_coin.move b/aptos-move/move-examples/fungible_asset/simple_managed_coin/sources/simple_managed_coin.move similarity index 93% rename from aptos-move/move-examples/fungible_asset/sources/managed_coin.move rename to aptos-move/move-examples/fungible_asset/simple_managed_coin/sources/simple_managed_coin.move index 1d4ac7efca315..3a0c77fff44e3 100644 --- a/aptos-move/move-examples/fungible_asset/sources/managed_coin.move +++ b/aptos-move/move-examples/fungible_asset/simple_managed_coin/sources/simple_managed_coin.move @@ -1,7 +1,7 @@ /// A 2-in-1 module that combines managed_fungible_asset and coin_example into one module that when deployed, the /// deployer will be creating a new managed fungible asset with the hardcoded supply config, name, symbol, and decimals. -/// The address of the asset can be obtained via get_metadata(). -module fungible_asset_extension::managed_coin { +/// The address of the asset can be obtained via get_metadata(). As a simple version, it only deal with primary stores. +module example_addr::simple_managed_coin { use aptos_framework::fungible_asset::{Self, MintRef, TransferRef, BurnRef, Metadata, FungibleAsset}; use aptos_framework::object::{Self, Object}; use aptos_framework::primary_fungible_store; @@ -50,7 +50,7 @@ module fungible_asset_extension::managed_coin { #[view] /// Return the address of the managed fungible asset that's created when this module is deployed. public fun get_metadata(): Object { - let asset_address = object::create_object_address(&@fungible_asset_extension, ASSET_SYMBOL); + let asset_address = object::create_object_address(&@example_addr, ASSET_SYMBOL); object::address_to_object(asset_address) } @@ -67,7 +67,7 @@ module fungible_asset_extension::managed_coin { public entry fun transfer(admin: &signer, from: address, to: address, amount: u64) acquires ManagedFungibleAsset { let asset = get_metadata(); let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, asset); + let from_wallet = primary_fungible_store::primary_store(from, asset); let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, asset); fungible_asset::transfer_with_ref(transfer_ref, from_wallet, to_wallet, amount); } @@ -76,7 +76,7 @@ module fungible_asset_extension::managed_coin { public entry fun burn(admin: &signer, from: address, amount: u64) acquires ManagedFungibleAsset { let asset = get_metadata(); let burn_ref = &authorized_borrow_refs(admin, asset).burn_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, asset); + let from_wallet = primary_fungible_store::primary_store(from, asset); fungible_asset::burn_from(burn_ref, from_wallet, amount); } @@ -100,7 +100,7 @@ module fungible_asset_extension::managed_coin { public fun withdraw(admin: &signer, amount: u64, from: address): FungibleAsset acquires ManagedFungibleAsset { let asset = get_metadata(); let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, asset); + let from_wallet = primary_fungible_store::primary_store(from, asset); fungible_asset::withdraw_with_ref(transfer_ref, from_wallet, amount) } @@ -122,7 +122,7 @@ module fungible_asset_extension::managed_coin { borrow_global(object::object_address(&asset)) } - #[test(creator = @0xcafe)] + #[test(creator = @example_addr)] fun test_basic_flow( creator: &signer, ) acquires ManagedFungibleAsset { @@ -143,7 +143,7 @@ module fungible_asset_extension::managed_coin { burn(creator, creator_address, 90); } - #[test(creator = @0xcafe, aaron = @0xface)] + #[test(creator = @example_addr, aaron = @0xface)] #[expected_failure(abort_code = 0x50001, location = Self)] fun test_permission_denied( creator: &signer, diff --git a/aptos-move/move-examples/fungible_asset/sources/managed_fungible_asset.move b/aptos-move/move-examples/fungible_asset/sources/managed_fungible_asset.move deleted file mode 100644 index 685e531c80478..0000000000000 --- a/aptos-move/move-examples/fungible_asset/sources/managed_fungible_asset.move +++ /dev/null @@ -1,209 +0,0 @@ -/// This module provides a managed fungible asset that allows the owner of the metadata object to -/// mint, transfer and burn fungible assets. -/// -/// The functionalities offered by this module are: -/// 1. Mint fungible assets as the owner of metadata object. -/// 2. Transfer fungible assets as the owner of metadata object ignoring `frozen` field. -/// 3. Burn fungible assets as the owner of metadata object. -module fungible_asset_extension::managed_fungible_asset { - use aptos_framework::fungible_asset::{Self, MintRef, TransferRef, BurnRef, FungibleAsset, Metadata}; - use aptos_framework::object::{Self, Object, ConstructorRef}; - use aptos_framework::primary_fungible_store; - use std::error; - use std::signer; - use std::string::String; - use std::option; - - /// Only fungible asset metadata owner can make changes. - const ENOT_OWNER: u64 = 1; - - #[resource_group_member(group = aptos_framework::object::ObjectGroup)] - /// Hold refs to control the minting, transfer and burning of fungible assets. - struct ManagedFungibleAsset has key { - mint_ref: MintRef, - transfer_ref: TransferRef, - burn_ref: BurnRef, - } - - /// Initialize metadata object and store the refs. - public fun initialize( - constructor_ref: &ConstructorRef, - maximum_supply: u128, - name: String, - symbol: String, - decimals: u8, - icon_uri: String, - project_uri: String, - ) { - let supply = if (maximum_supply != 0) { - option::some(maximum_supply) - } else { - option::none() - }; - primary_fungible_store::create_primary_store_enabled_fungible_asset( - constructor_ref, - supply, - name, - symbol, - decimals, - icon_uri, - project_uri, - ); - - // Create mint/burn/transfer refs to allow creator to manage the fungible asset. - let mint_ref = fungible_asset::generate_mint_ref(constructor_ref); - let burn_ref = fungible_asset::generate_burn_ref(constructor_ref); - let transfer_ref = fungible_asset::generate_transfer_ref(constructor_ref); - let metadata_object_signer = object::generate_signer(constructor_ref); - move_to( - &metadata_object_signer, - ManagedFungibleAsset { mint_ref, transfer_ref, burn_ref } - ) - } - - /// Mint as the owner of metadata object. - public entry fun mint( - admin: &signer, - metadata: Object, - amount: u64, - to: address - ) acquires ManagedFungibleAsset { - let managed_fungible_asset = authorized_borrow_refs(admin, metadata); - let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, metadata); - let fa = fungible_asset::mint(&managed_fungible_asset.mint_ref, amount); - fungible_asset::deposit_with_ref(&managed_fungible_asset.transfer_ref, to_wallet, fa); - } - - /// Transfer as the owner of metadata object ignoring `frozen` field. - public entry fun transfer( - admin: &signer, - metadata: Object, - from: address, - to: address, - amount: u64 - ) acquires ManagedFungibleAsset { - let transfer_ref = &authorized_borrow_refs(admin, metadata).transfer_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, metadata); - let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, metadata); - fungible_asset::transfer_with_ref(transfer_ref, from_wallet, to_wallet, amount); - } - - /// Burn fungible assets as the owner of metadata object. - public entry fun burn( - admin: &signer, - metadata: Object, - from: address, - amount: u64 - ) acquires ManagedFungibleAsset { - let burn_ref = &authorized_borrow_refs(admin, metadata).burn_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, metadata); - fungible_asset::burn_from(burn_ref, from_wallet, amount); - } - - /// Freeze an account so it cannot transfer or receive fungible assets. - public entry fun freeze_account( - admin: &signer, - metadata: Object, - account: address - ) acquires ManagedFungibleAsset { - let transfer_ref = &authorized_borrow_refs(admin, metadata).transfer_ref; - let wallet = primary_fungible_store::ensure_primary_store_exists(account, metadata); - fungible_asset::set_frozen_flag(transfer_ref, wallet, true); - } - - /// Unfreeze an account so it can transfer or receive fungible assets. - public entry fun unfreeze_account( - admin: &signer, - metadata: Object, - account: address, - ) acquires ManagedFungibleAsset { - let transfer_ref = &authorized_borrow_refs(admin, metadata).transfer_ref; - let wallet = primary_fungible_store::ensure_primary_store_exists(account, metadata); - fungible_asset::set_frozen_flag(transfer_ref, wallet, false); - } - - /// Withdraw as the owner of metadata object ignoring `frozen` field. - public fun withdraw( - admin: &signer, - metadata: Object, - amount: u64, - from: address, - ): FungibleAsset acquires ManagedFungibleAsset { - let transfer_ref = &authorized_borrow_refs(admin, metadata).transfer_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, metadata); - fungible_asset::withdraw_with_ref(transfer_ref, from_wallet, amount) - } - - /// Deposit as the owner of metadata object ignoring `frozen` field. - public fun deposit( - admin: &signer, - metadata: Object, - to: address, - fa: FungibleAsset, - ) acquires ManagedFungibleAsset { - let transfer_ref = &authorized_borrow_refs(admin, metadata).transfer_ref; - let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, metadata); - fungible_asset::deposit_with_ref(transfer_ref, to_wallet, fa); - } - - /// Borrow the immutable reference of the refs of `metadata`. - /// This validates that the signer is the metadata object's owner. - inline fun authorized_borrow_refs( - owner: &signer, - asset: Object, - ): &ManagedFungibleAsset acquires ManagedFungibleAsset { - assert!(object::is_owner(asset, signer::address_of(owner)), error::permission_denied(ENOT_OWNER)); - borrow_global(object::object_address(&asset)) - } - - #[test_only] - use aptos_framework::object::object_from_constructor_ref; - #[test_only] - use std::string::utf8; - - #[test_only] - fun create_test_mfa(creator: &signer): Object { - let constructor_ref = &object::create_named_object(creator, b"APT"); - initialize( - constructor_ref, - 0, - utf8(b"Aptos Token"), /* name */ - utf8(b"APT"), /* symbol */ - 8, /* decimals */ - utf8(b"http://example.com/favicon.ico"), /* icon */ - utf8(b"http://example.com"), /* project */ - ); - object_from_constructor_ref(constructor_ref) - } - - #[test(creator = @0xcafe)] - fun test_basic_flow( - creator: &signer, - ) acquires ManagedFungibleAsset { - let metadata = create_test_mfa(creator); - let creator_address = signer::address_of(creator); - let aaron_address = @0xface; - - mint(creator, metadata, 100, creator_address); - assert!(primary_fungible_store::balance(creator_address, metadata) == 100, 4); - freeze_account(creator, metadata, creator_address); - assert!(primary_fungible_store::is_frozen(creator_address, metadata), 5); - transfer(creator, metadata, creator_address, aaron_address, 10); - assert!(primary_fungible_store::balance(aaron_address, metadata) == 10, 6); - - unfreeze_account(creator, metadata, creator_address); - assert!(!primary_fungible_store::is_frozen(creator_address, metadata), 7); - burn(creator, metadata, creator_address, 90); - } - - #[test(creator = @0xcafe, aaron = @0xface)] - #[expected_failure(abort_code = 0x50001, location = Self)] - fun test_permission_denied( - creator: &signer, - aaron: &signer - ) acquires ManagedFungibleAsset { - let metadata = create_test_mfa(creator); - let creator_address = signer::address_of(creator); - mint(aaron, metadata, 100, creator_address); - } -} diff --git a/aptos-move/move-examples/fungible_token/Move.toml b/aptos-move/move-examples/fungible_token/Move.toml deleted file mode 100644 index 08298dd4627eb..0000000000000 --- a/aptos-move/move-examples/fungible_token/Move.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "FungibleToken" -version = "0.0.0" - -[addresses] -aptos_framework = "0x1" -aptos_token_objects = "0x4" -fungible_token = "_" - -[dependencies] -AptosFramework = { local = "../../framework/aptos-framework" } -AptosTokenObjects = { local = "../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/fungible_token/sources/managed_fungible_token.move b/aptos-move/move-examples/fungible_token/sources/managed_fungible_token.move deleted file mode 100644 index 5f576d8daa48a..0000000000000 --- a/aptos-move/move-examples/fungible_token/sources/managed_fungible_token.move +++ /dev/null @@ -1,179 +0,0 @@ -/// An example combining fungible assets with token as fungible token. In this example, a token object is used as -/// metadata to create fungible units, aka, fungible tokens. -module fungible_token::managed_fungible_token { - use aptos_framework::fungible_asset::{Self, MintRef, TransferRef, BurnRef, Metadata, FungibleAsset}; - use aptos_framework::object::{Self, Object}; - use aptos_framework::primary_fungible_store; - use std::error; - use std::signer; - use std::string::{utf8, String}; - use std::option; - use aptos_token_objects::token::{create_named_token, create_token_seed}; - use aptos_token_objects::collection::create_fixed_collection; - - /// Only fungible asset metadata owner can make changes. - const ENOT_OWNER: u64 = 1; - - const ASSET_SYMBOL: vector = b"TEST"; - - #[resource_group_member(group = aptos_framework::object::ObjectGroup)] - /// Hold refs to control the minting, transfer and burning of fungible assets. - struct ManagedFungibleAsset has key { - mint_ref: MintRef, - transfer_ref: TransferRef, - burn_ref: BurnRef, - } - - /// Initialize metadata object and store the refs. - fun init_module(admin: &signer) { - let collection_name: String = utf8(b"test collection name"); - let token_name: String = utf8(b"test token name"); - create_fixed_collection( - admin, - utf8(b"test collection description"), - 1, - collection_name, - option::none(), - utf8(b"http://aptoslabs.com/collection"), - ); - let constructor_ref = &create_named_token(admin, - collection_name, - utf8(b"test token description"), - token_name, - option::none(), - utf8(b"http://aptoslabs.com/token"), - ); - - primary_fungible_store::create_primary_store_enabled_fungible_asset( - constructor_ref, - option::none(), - utf8(b"test fungible asset name"), /* name */ - utf8(ASSET_SYMBOL), /* symbol */ - 2, /* decimals */ - utf8(b"http://aptoslabs.com/favicon.ico"), - utf8(b"http://aptoslabs.com/") - ); - - // Create mint/burn/transfer refs to allow creator to manage the fungible asset. - let mint_ref = fungible_asset::generate_mint_ref(constructor_ref); - let burn_ref = fungible_asset::generate_burn_ref(constructor_ref); - let transfer_ref = fungible_asset::generate_transfer_ref(constructor_ref); - let metadata_object_signer = object::generate_signer(constructor_ref); - move_to( - &metadata_object_signer, - ManagedFungibleAsset { mint_ref, transfer_ref, burn_ref } - ) - } - - #[view] - /// Return the address of the managed fungible asset that's created when this module is deployed. - public fun get_metadata(): Object { - let collection_name: String = utf8(b"test collection name"); - let token_name: String = utf8(b"test token name"); - let asset_address = object::create_object_address( - &@fungible_token, - create_token_seed(&collection_name, &token_name) - ); - object::address_to_object(asset_address) - } - - /// Mint as the owner of metadata object. - public entry fun mint(admin: &signer, amount: u64, to: address) acquires ManagedFungibleAsset { - let asset = get_metadata(); - let managed_fungible_asset = authorized_borrow_refs(admin, asset); - let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, asset); - let fa = fungible_asset::mint(&managed_fungible_asset.mint_ref, amount); - fungible_asset::deposit_with_ref(&managed_fungible_asset.transfer_ref, to_wallet, fa); - } - - /// Transfer as the owner of metadata object ignoring `allow_ungated_transfer` field. - public entry fun transfer(admin: &signer, from: address, to: address, amount: u64) acquires ManagedFungibleAsset { - let asset = get_metadata(); - let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, asset); - let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, asset); - fungible_asset::transfer_with_ref(transfer_ref, from_wallet, to_wallet, amount); - } - - /// Burn fungible assets as the owner of metadata object. - public entry fun burn(admin: &signer, from: address, amount: u64) acquires ManagedFungibleAsset { - let asset = get_metadata(); - let burn_ref = &authorized_borrow_refs(admin, asset).burn_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, asset); - fungible_asset::burn_from(burn_ref, from_wallet, amount); - } - - /// Freeze an account so it cannot transfer or receive fungible assets. - public entry fun freeze_account(admin: &signer, account: address) acquires ManagedFungibleAsset { - let asset = get_metadata(); - let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let wallet = primary_fungible_store::ensure_primary_store_exists(account, asset); - fungible_asset::set_frozen_flag(transfer_ref, wallet, true); - } - - /// Unfreeze an account so it can transfer or receive fungible assets. - public entry fun unfreeze_account(admin: &signer, account: address) acquires ManagedFungibleAsset { - let asset = get_metadata(); - let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let wallet = primary_fungible_store::ensure_primary_store_exists(account, asset); - fungible_asset::set_frozen_flag(transfer_ref, wallet, false); - } - - /// Withdraw as the owner of metadata object ignoring `allow_ungated_transfer` field. - public fun withdraw(admin: &signer, amount: u64, from: address): FungibleAsset acquires ManagedFungibleAsset { - let asset = get_metadata(); - let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let from_wallet = primary_fungible_store::ensure_primary_store_exists(from, asset); - fungible_asset::withdraw_with_ref(transfer_ref, from_wallet, amount) - } - - /// Deposit as the owner of metadata object ignoring `allow_ungated_transfer` field. - public fun deposit(admin: &signer, to: address, fa: FungibleAsset) acquires ManagedFungibleAsset { - let asset = get_metadata(); - let transfer_ref = &authorized_borrow_refs(admin, asset).transfer_ref; - let to_wallet = primary_fungible_store::ensure_primary_store_exists(to, asset); - fungible_asset::deposit_with_ref(transfer_ref, to_wallet, fa); - } - - /// Borrow the immutable reference of the refs of `metadata`. - /// This validates that the signer is the metadata object's owner. - inline fun authorized_borrow_refs( - owner: &signer, - asset: Object, - ): &ManagedFungibleAsset acquires ManagedFungibleAsset { - assert!(object::is_owner(asset, signer::address_of(owner)), error::permission_denied(ENOT_OWNER)); - borrow_global(object::object_address(&asset)) - } - - #[test(creator = @fungible_token)] - fun test_basic_flow( - creator: &signer, - ) acquires ManagedFungibleAsset { - init_module(creator); - let creator_address = signer::address_of(creator); - let aaron_address = @0xface; - - mint(creator, 100, creator_address); - let asset = get_metadata(); - assert!(primary_fungible_store::balance(creator_address, asset) == 100, 4); - freeze_account(creator, creator_address); - assert!(primary_fungible_store::is_frozen(creator_address, asset), 5); - transfer(creator, creator_address, aaron_address, 10); - assert!(primary_fungible_store::balance(aaron_address, asset) == 10, 6); - - unfreeze_account(creator, creator_address); - assert!(!primary_fungible_store::is_frozen(creator_address, asset), 7); - burn(creator, creator_address, 90); - } - - #[test(creator = @fungible_token, aaron = @0xface)] - #[expected_failure(abort_code = 0x50001, location = Self)] - fun test_permission_denied( - creator: &signer, - aaron: &signer - ) acquires ManagedFungibleAsset { - init_module(creator); - let creator_address = signer::address_of(creator); - mint(aaron, 100, creator_address); - } -} diff --git a/aptos-move/move-examples/marketplace/Move.toml b/aptos-move/move-examples/marketplace/Move.toml index 4c1cb19902176..ec5c8cc9933fa 100644 --- a/aptos-move/move-examples/marketplace/Move.toml +++ b/aptos-move/move-examples/marketplace/Move.toml @@ -1,11 +1,11 @@ [package] -name = "MarketPlace" -version = "0.0.1" +name = "Marketplace" +version = "0.0.0" + +[addresses] +marketplace = "_" [dependencies] AptosFramework = { local = "../../framework/aptos-framework" } AptosToken = { local = "../../framework/aptos-token" } - -[addresses] -std = "0x1" -marketplace = "_" \ No newline at end of file +AptosTokenObjects = { local = "../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/marketplace/README.md b/aptos-move/move-examples/marketplace/README.md new file mode 100644 index 0000000000000..3822e01b912b4 --- /dev/null +++ b/aptos-move/move-examples/marketplace/README.md @@ -0,0 +1,33 @@ +This introduces the core for a potential Aptos standard around marketplace for assets on-chain. + +The goals of this package are to +* Separate core logical components for readability and expansion over time. +* Where possible leverage function APIs as the layer of compatibility instead of exposing data structures. +* Leverage of objects and resource groups to unify common logic without wasting storage. +* Single definition of a fee schedule for the marketplace where the listing was created. +* Unified framework for auctions and fixed-price listings. +* Support for TokenV1, TokenV2, and Object based assets. +* Support for receiving funds in either Coin or FungibleAsset. + +FeeSchedule includes: +* Listing, bidding, and commission +* Clean interface that allows newer business logic to be added over time, by passing in current pricing information + +All listings support: +* Ability to specify a fixed purchase price +* Define when purchasing may begin +* Embed a fee schedule for the hosting marketplace +* Holding container for tokenv1 if the recipient does not have direct deposit enabled + +Auctions support: +* Buy-it-now +* Incremental end times based upon the last bid time +* Minimum bid increments + +Fixed-price support: +* Seller can end at any time. + +Collection offer: +* Offerer can end at any time. + +This is intended as an exploration into the ideal marketplace framework. Please make pull requests to extend it and generalize our use cases. This may never actually be deployed on Mainnet unless the community rallies behind a common marketplace and harness. diff --git a/aptos-move/move-examples/marketplace/readme.md b/aptos-move/move-examples/marketplace/readme.md deleted file mode 100644 index d4e925ada6be1..0000000000000 --- a/aptos-move/move-examples/marketplace/readme.md +++ /dev/null @@ -1,57 +0,0 @@ -Aptos NFT Marketplace Example ------------- - -NOTE: THIS IS AN EXAMPLE AND HAS NOT BEEN FULLY AUDITED. THESE CONTRACTS ARE FOR COLLECTING FEEDBACK FROM OUR BUILDERS. ONCE WE ARE CONFIDENT THAT IT IS BENEFICIAL TO THE TOKEN ECOSYSTEM, WE WILL ADD IT TO the 0x3::aptos-token PACKAGE. - -Introduction ------------- - -The package contains two parts: - -- marketplace utility functions: these contracts specify the basic function and data structures for building a fixed-price marketplace and the auction house. The two contracts are: (1) marketplace_bid_utils and (2) marketplace_listing_utils -- example marketplace contracts: these contracts show two examples of building a marketplace leveraging the marketplace utility functions. - -Design principles ------------------ - -We want to have a minimal required example to improve the liquidity of the token ecosystem - -- Provide a unified Listing so that the same listing can be used across different marketplaces and aggregators. -- Provide a unified buy and bid functions so that people can buy or bid for listed NFT across different marketplaces -- Provide unified events so that downstream applications can have a clear overview of what is happening in the token ecosystem across all marketplaces - -We want app developers to be creative with how they run their marketplace and auction house. - -- We separate the listing, buy and bid from other business logic to put them in utility functions. -- We only provide example marketplace contracts for demonstration. Each marketplace is supposed to deploy its own contracts to its account. They decide how to charge the fee and how to select the bids that win the auction. - -**Design Choices** ------------------- - -We also made the following design choices when we implemented the marketplace utility contracts. Any feedback on these choices is highly appreciated and helps the community. - -- Escrow-less listing: the seller can keep their tokens in their token stores and use the token (eg: show in the wallet, use the token, etc) before their token is sold. -- The seller can choose who owns their listings. The listing can be stored under a marketplace account or stored under sellers' accounts. If the seller wants to work with a particular marketplace, they can give the listing to the marketplace to store after creating the listing. The marketplace can then decide how to expose the listing to the buyers. If the seller stores the listing under their own account, anyone can buy from these listings and these listings can be aggregated by aggregators. -- Bidders have to lock their coins during the auction and can only withdraw the coin after the auction expires. Bidder can only increase their bid while the auction is still active. This ensures the bid is valid and the bidder cannot withdraw the coin while the auction is still active. - -FAQ: ----- - -**Why not store the token in the listing to guarantee the token exists?** - -We want to achieve two goals here, first, the token exists in the owner's token store before it is sold. second, the listed token should be available for transfer. - -It is important to keep the token in the token store so that downstream apps, indexer, wallets can easily know the tokens owned by an account. The owner of the token can then use these listed tokens before the token is sold, as a listing can exist for a long time. - -To check whether the listed token is available, there are many ways to handle this problem. For example, tracking the lister's token store events or using an indexer to verify if the owner still has the listed tokens. The marketplace can cancel the listing if the token balance is not enough. - -Meanwhile, we will enhance the token store in our token standard to provide options to lock the token so that these tokens cannot be transferred out during the locking period. - -**How to support new features in this marketplace?** - -We will continuously collect new common features from the community and add them to the contracts in a backward-compatible way. - -**What is the plan for this package?** - -We plan to have these contracts in the move-example and collect the feedbacks from community. -Once we have gone through enough iterations and be confident that it is beneficial to the token ecosystem, we will propose it to include them in the 0x3 aptos-token package. diff --git a/aptos-move/move-examples/marketplace/sources/coin_listing.move b/aptos-move/move-examples/marketplace/sources/coin_listing.move new file mode 100644 index 0000000000000..27297c21c7b54 --- /dev/null +++ b/aptos-move/move-examples/marketplace/sources/coin_listing.move @@ -0,0 +1,1156 @@ +address marketplace { +/// Defines a single listing or an item for sale or auction. This is an escrow service that +/// enables two parties to exchange one asset for another. +/// Each listing has the following properties: +/// * FeeSchedule specifying payment flows +/// * Owner or the person that can end the sale or auction +/// * Optional buy it now price +/// * Ending time at which point it can be claimed by the highest bidder or left in escrow. +/// * For auctions, the minimum bid rate and optional increase in duration of the auction if bids +/// are made toward the end of the auction. +module coin_listing { + use std::error; + use std::option::{Self, Option}; + use std::signer; + use std::string::String; + + use aptos_framework::coin::{Self, Coin}; + use aptos_framework::event::{Self, EventHandle}; + use aptos_framework::object::{Self, ConstructorRef, Object, ObjectCore}; + use aptos_framework::timestamp; + + use marketplace::fee_schedule::{Self, FeeSchedule}; + use marketplace::listing::{Self, Listing}; + + #[test_only] + friend marketplace::listing_tests; + + /// There exists no listing. + const ENO_LISTING: u64 = 1; + /// This is an auction without buy it now. + const ENO_BUY_IT_NOW: u64 = 2; + /// The proposed bid is insufficient. + const EBID_TOO_LOW: u64 = 3; + /// The auction has not yet ended. + const EAUCTION_NOT_ENDED: u64 = 4; + /// The auction has already ended. + const EAUCTION_ENDED: u64 = 5; + /// The entity is not the seller. + const ENOT_SELLER: u64 = 6; + + // Core data structures + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Fixed-price market place listing. + struct FixedPriceListing has key { + /// The price to purchase the item up for listing. + price: u64, + /// Purchase event -- as it is only ever executed once. + purchase_event: EventHandle, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// An auction-based listing with optional buy it now semantics. + struct AuctionListing has key { + /// Starting bid price. + starting_bid: u64, + /// Price increment from the current bid. + bid_increment: u64, + /// Current bid, if one exists. + current_bid: Option>, + /// Auction end time in Unix time as seconds. + auction_end_time: u64, + /// If a bid time comes within this amount of time before the end bid, extend the end bid + /// to the current time plus this amount. + minimum_bid_time_before_end: u64, + /// Buy it now price, ends auction immediately. + buy_it_now_price: Option, + /// Bid events + bid_events: EventHandle, + /// Purchase event -- as it is only ever executed once. + purchase_event: EventHandle, + } + + /// Represents a single bid within this auction house. + struct Bid has store { + bidder: address, + coins: Coin, + } + + /// An event triggered upon each bid. + struct BidEvent has drop, store { + new_bidder: address, + new_bid: u64, + new_end_time: u64, + previous_bidder: Option
, + previous_bid: Option, + previous_end_time: u64, + } + + /// An event triggered upon the sale of an item. Note, the amount given to the seller is the + /// price - commission - royalties. In the case there was no sale, purchaser is equal to seller + /// and the amounts will all be zero. + struct PurchaseEvent has drop, store { + purchaser: address, + price: u64, + commission: u64, + royalties: u64, + } + + // Init functions + + public entry fun init_fixed_price( + seller: &signer, + object: Object, + fee_schedule: Object, + start_time: u64, + price: u64, + ) { + init_fixed_price_internal(seller, object, fee_schedule, start_time, price); + } + + public(friend) fun init_fixed_price_internal( + seller: &signer, + object: Object, + fee_schedule: Object, + start_time: u64, + price: u64, + ): Object { + let (listing_signer, constructor_ref) = init( + seller, + object, + fee_schedule, + start_time, + price, + ); + + let fixed_price_listing = FixedPriceListing { + price, + purchase_event: object::new_event_handle(&listing_signer), + }; + move_to(&listing_signer, fixed_price_listing); + + object::object_from_constructor_ref(&constructor_ref) + } + + public entry fun init_fixed_price_for_tokenv1( + seller: &signer, + token_creator: address, + token_collection: String, + token_name: String, + token_property_version: u64, + fee_schedule: Object, + start_time: u64, + price: u64, + ) { + init_fixed_price_for_tokenv1_internal( + seller, + token_creator, + token_collection, + token_name, + token_property_version, + fee_schedule, + start_time, + price, + ); + } + + public(friend) fun init_fixed_price_for_tokenv1_internal( + seller: &signer, + token_creator: address, + token_collection: String, + token_name: String, + token_property_version: u64, + fee_schedule: Object, + start_time: u64, + price: u64, + ): Object { + let object = listing::create_tokenv1_container( + seller, + token_creator, + token_collection, + token_name, + token_property_version, + ); + init_fixed_price_internal( + seller, + object::convert(object), + fee_schedule, + start_time, + price, + ) + } + + public entry fun init_auction( + seller: &signer, + object: Object, + fee_schedule: Object, + start_time: u64, + starting_bid: u64, + bid_increment: u64, + auction_end_time: u64, + minimum_bid_time_before_end: u64, + buy_it_now_price: Option, + ) { + init_auction_internal( + seller, + object, + fee_schedule, + start_time, + starting_bid, + bid_increment, + auction_end_time, + minimum_bid_time_before_end, + buy_it_now_price, + ); + } + + public(friend) fun init_auction_internal( + seller: &signer, + object: Object, + fee_schedule: Object, + start_time: u64, + starting_bid: u64, + bid_increment: u64, + auction_end_time: u64, + minimum_bid_time_before_end: u64, + buy_it_now_price: Option, + ): Object { + let (listing_signer, constructor_ref) = init( + seller, + object, + fee_schedule, + start_time, + starting_bid, + ); + + let auction_listing = AuctionListing { + starting_bid, + bid_increment, + current_bid: option::none(), + auction_end_time, + minimum_bid_time_before_end, + buy_it_now_price, + bid_events: object::new_event_handle(&listing_signer), + purchase_event: object::new_event_handle(&listing_signer), + }; + move_to(&listing_signer, auction_listing); + object::object_from_constructor_ref(&constructor_ref) + } + + public entry fun init_auction_for_tokenv1( + seller: &signer, + token_creator: address, + token_collection: String, + token_name: String, + token_property_version: u64, + fee_schedule: Object, + start_time: u64, + starting_bid: u64, + bid_increment: u64, + auction_end_time: u64, + minimum_bid_time_before_end: u64, + buy_it_now_price: Option, + ) { + init_auction_for_tokenv1_internal( + seller, + token_creator, + token_collection, + token_name, + token_property_version, + fee_schedule, + start_time, + starting_bid, + bid_increment, + auction_end_time, + minimum_bid_time_before_end, + buy_it_now_price, + ); + } + + public(friend) fun init_auction_for_tokenv1_internal( + seller: &signer, + token_creator: address, + token_collection: String, + token_name: String, + token_property_version: u64, + fee_schedule: Object, + start_time: u64, + starting_bid: u64, + bid_increment: u64, + auction_end_time: u64, + minimum_bid_time_before_end: u64, + buy_it_now_price: Option, + ): Object { + let object = listing::create_tokenv1_container( + seller, + token_creator, + token_collection, + token_name, + token_property_version, + ); + init_auction_internal( + seller, + object::convert(object), + fee_schedule, + start_time, + starting_bid, + bid_increment, + auction_end_time, + minimum_bid_time_before_end, + buy_it_now_price, + ) + } + + inline fun init( + seller: &signer, + object: Object, + fee_schedule: Object, + start_time: u64, + initial_price: u64, + ): (signer, ConstructorRef) { + coin::transfer( + seller, + fee_schedule::fee_address(fee_schedule), + fee_schedule::listing_fee(fee_schedule, initial_price), + ); + + listing::init(seller, object, fee_schedule, start_time) + } + + // Mutators + + /// Purchase outright an item from an auction or a fixed price listing. + public entry fun purchase( + purchaser: &signer, + object: Object, + ) acquires AuctionListing, FixedPriceListing { + let listing_addr = listing::assert_started(&object); + + // Retrieve the purchase price if the auction has buy it now or this is a fixed listing. + let (price, purchase_event) = if (exists>(listing_addr)) { + let AuctionListing { + starting_bid: _, + bid_increment: _, + current_bid, + auction_end_time, + minimum_bid_time_before_end: _, + buy_it_now_price, + bid_events, + purchase_event, + } = move_from>(listing_addr); + + let now = timestamp::now_seconds(); + assert!(now < auction_end_time, error::invalid_state(EAUCTION_ENDED)); + + assert!(option::is_some(&buy_it_now_price), error::invalid_argument(ENO_BUY_IT_NOW)); + if (option::is_some(¤t_bid)) { + let Bid { bidder, coins } = option::destroy_some(current_bid); + coin::deposit(bidder, coins); + } else { + option::destroy_none(current_bid); + }; + event::destroy_handle(bid_events); + (option::destroy_some(buy_it_now_price), purchase_event) + } else if (exists>(listing_addr)) { + let FixedPriceListing { + price, + purchase_event, + } = move_from>(listing_addr); + (price, purchase_event) + } else { + // This should just be an abort but the compiler errors. + abort(error::not_found(ENO_LISTING)) + }; + + let coins = coin::withdraw(purchaser, price); + + complete_purchase(signer::address_of(purchaser), object, purchase_event, coins) + } + + /// End a fixed price listing early. + public entry fun end_fixed_price( + seller: &signer, + object: Object, + ) acquires FixedPriceListing { + let expected_seller_addr = signer::address_of(seller); + let (actual_seller_addr, _fee_schedule) = listing::close(object, expected_seller_addr); + assert!(expected_seller_addr == actual_seller_addr, error::permission_denied(ENOT_SELLER)); + + let listing_addr = object::object_address(&object); + assert!(exists>(listing_addr), error::not_found(ENO_LISTING)); + let FixedPriceListing { + price: _, + purchase_event, + } = move_from>(listing_addr); + + let purchase_event_data = PurchaseEvent { + purchaser: expected_seller_addr, + price: 0, + commission: 0, + royalties: 0, + }; + event::emit_event(&mut purchase_event, purchase_event_data); + event::destroy_handle(purchase_event); + } + + /// Make a bid on a listing. If the listing comes in near the end of an auction, the auction + /// may be extended to give at least minimum_bid_time_before_end time remaining in the auction. + public entry fun bid( + bidder: &signer, + object: Object, + bid_amount: u64, + ) acquires AuctionListing { + let listing_addr = listing::assert_started(&object); + assert!(exists>(listing_addr), error::not_found(ENO_LISTING)); + let auction_listing = borrow_global_mut>(listing_addr); + + let now = timestamp::now_seconds(); + assert!(now < auction_listing.auction_end_time, error::invalid_state(EAUCTION_ENDED)); + + let (previous_bidder, previous_bid, minimum_bid) = if (option::is_some(&auction_listing.current_bid)) { + let Bid { bidder, coins } = option::extract(&mut auction_listing.current_bid); + let current_bid = coin::value(&coins); + coin::deposit(bidder, coins); + (option::some(bidder), option::some(current_bid), current_bid + auction_listing.bid_increment) + } else { + (option::none(), option::none(), auction_listing.starting_bid) + }; + + assert!(bid_amount >= minimum_bid, error::invalid_argument(EBID_TOO_LOW)); + let coins = coin::withdraw(bidder, bid_amount); + let bid = Bid { + bidder: signer::address_of(bidder), + coins, + }; + option::fill(&mut auction_listing.current_bid, bid); + + let fee_schedule = listing::fee_schedule(object); + coin::transfer( + bidder, + fee_schedule::fee_address(fee_schedule), + fee_schedule::bidding_fee(fee_schedule, bid_amount), + ); + + let now = timestamp::now_seconds(); + let current_end_time = auction_listing.auction_end_time; + let minimum_end_time = now + auction_listing.minimum_bid_time_before_end; + + if (current_end_time < minimum_end_time) { + auction_listing.auction_end_time = minimum_end_time + }; + + let bid_event_data = BidEvent { + new_bidder: signer::address_of(bidder), + new_bid: bid_amount, + new_end_time: auction_listing.auction_end_time, + previous_bidder, + previous_bid, + previous_end_time: current_end_time, + }; + event::emit_event(&mut auction_listing.bid_events, bid_event_data); + } + + /// Once the current time has elapsed the auctions run time, allow the auction to be settled by + /// distributing out the asset to the winner or the auction seller if no one bid as well as + /// giving any fees to the marketplace that hosted the auction. + public entry fun complete_auction( + object: Object, + ) acquires AuctionListing { + let listing_addr = listing::assert_started(&object); + assert!(exists>(listing_addr), error::not_found(ENO_LISTING)); + + let AuctionListing { + starting_bid: _, + bid_increment: _, + current_bid, + auction_end_time, + minimum_bid_time_before_end: _, + buy_it_now_price: _, + bid_events, + purchase_event, + } = move_from>(listing_addr); + + let now = timestamp::now_seconds(); + assert!(auction_end_time <= now, error::invalid_state(EAUCTION_NOT_ENDED)); + + let seller = listing::seller(object); + + let (purchaser, coins) = if (option::is_some(¤t_bid)) { + let Bid { bidder, coins } = option::destroy_some(current_bid); + (bidder, coins) + } else { + option::destroy_none(current_bid); + (seller, coin::zero()) + }; + + complete_purchase(purchaser, object, purchase_event, coins); + event::destroy_handle(bid_events); + } + + inline fun complete_purchase( + purchaser_addr: address, + object: Object, + purchase_event: EventHandle, + coins: Coin, + ) { + let price = coin::value(&coins); + let (royalty_addr, royalty_charge) = listing::compute_royalty(object, price); + let (seller, fee_schedule) = listing::close(object, purchaser_addr); + + let commission_charge = fee_schedule::commission(fee_schedule, price); + let commission = coin::extract(&mut coins, commission_charge); + coin::deposit(fee_schedule::fee_address(fee_schedule), commission); + + if (royalty_charge != 0) { + let royalty = coin::extract(&mut coins, royalty_charge); + coin::deposit(royalty_addr, royalty); + }; + + coin::deposit(seller, coins); + + let purchase_event_data = PurchaseEvent { + purchaser: purchaser_addr, + price, + commission: commission_charge, + royalties: royalty_charge, + }; + event::emit_event(&mut purchase_event, purchase_event_data); + event::destroy_handle(purchase_event); + } + + // View + + #[view] + public fun price( + object: Object, + ): Option acquires AuctionListing, FixedPriceListing { + let listing_addr = object::object_address(&object); + if (exists>(listing_addr)) { + let fixed_price = borrow_global>(listing_addr); + option::some(fixed_price.price) + } else if (exists>(listing_addr)) { + borrow_global>(listing_addr).buy_it_now_price + } else { + // This should just be an abort but the compiler errors. + assert!(false, error::not_found(ENO_LISTING)); + option::none() + } + } + + #[view] + public fun is_auction(object: Object): bool { + let obj_addr = object::object_address(&object); + exists>(obj_addr) + } + + #[view] + public fun starting_bid(object: Object): u64 acquires AuctionListing { + let auction = borrow_auction(object); + auction.starting_bid + } + + #[view] + public fun bid_increment(object: Object): u64 acquires AuctionListing { + let auction = borrow_auction(object); + auction.bid_increment + } + + #[view] + public fun auction_end_time(object: Object): u64 acquires AuctionListing { + let auction = borrow_auction(object); + auction.auction_end_time + } + + #[view] + public fun minimum_bid_time_before_end( + object: Object, + ): u64 acquires AuctionListing { + let auction = borrow_auction(object); + auction.minimum_bid_time_before_end + } + + #[view] + public fun current_bidder( + object: Object, + ): Option
acquires AuctionListing { + let auction = borrow_auction(object); + if (option::is_some(&auction.current_bid)) { + option::some(option::borrow(&auction.current_bid).bidder) + } else { + option::none() + } + } + + #[view] + public fun current_amount( + object: Object, + ): Option acquires AuctionListing { + let auction = borrow_auction(object); + if (option::is_some(&auction.current_bid)) { + let coins = &option::borrow(&auction.current_bid).coins; + option::some(coin::value(coins)) + } else { + option::none() + } + } + + inline fun borrow_auction( + object: Object, + ): &AuctionListing acquires AuctionListing { + let obj_addr = object::object_address(&object); + assert!(exists>(obj_addr), error::not_found(ENO_LISTING)); + borrow_global>(obj_addr) + } + + inline fun borrow_fixed_price( + object: Object, + ): &FixedPriceListing acquires FixedPriceListing { + let obj_addr = object::object_address(&object); + assert!(exists>(obj_addr), error::not_found(ENO_LISTING)); + borrow_global>(obj_addr) + } +} + +// Tests + +#[test_only] +module listing_tests { + use std::option; + + use aptos_framework::aptos_coin::AptosCoin; + use aptos_framework::coin; + use aptos_framework::object::{Self, Object}; + use aptos_framework::timestamp; + + use aptos_token::token as tokenv1; + + use aptos_token_objects::token::Token; + + use marketplace::coin_listing; + use marketplace::fee_schedule::FeeSchedule; + use marketplace::listing::{Self, Listing}; + use marketplace::test_utils; + + fun test_fixed_price( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token, fee_schedule, listing) = fixed_price_listing(marketplace, seller); + + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + assert!(listing::listed_object(listing) == object::convert(token), 0); + assert!(listing::fee_schedule(listing) == fee_schedule, 0); + assert!(coin_listing::price(listing) == option::some(500), 0); + assert!(!coin_listing::is_auction(listing), 0); + + coin_listing::purchase(purchaser, listing); + + assert!(object::owner(token) == purchaser_addr, 0); + assert!(coin::balance(marketplace_addr) == 6, 0); + assert!(coin::balance(seller_addr) == 10494, 0); + assert!(coin::balance(purchaser_addr) == 9500, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_fixed_price_end( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, _purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token, _fee_schedule, listing) = fixed_price_listing(marketplace, seller); + + assert!(coin::balance(marketplace_addr) == 1, 0); + coin_listing::end_fixed_price(seller, listing); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + assert!(object::owner(token) == seller_addr, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_auction_purchase( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token, fee_schedule, listing) = auction_listing(marketplace, seller); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + assert!(listing::listed_object(listing) == object::convert(token), 0); + assert!(listing::fee_schedule(listing) == fee_schedule, 0); + assert!(coin_listing::price(listing) == option::some(500), 0); + assert!(coin_listing::is_auction(listing), 0); + assert!(coin_listing::starting_bid(listing) == 100, 0); + assert!(coin_listing::bid_increment(listing) == 50, 0); + assert!(coin_listing::auction_end_time(listing) == timestamp::now_seconds() + 200, 0); + assert!(coin_listing::minimum_bid_time_before_end(listing) == 150, 0); + assert!(coin_listing::current_amount(listing) == option::none(), 0); + assert!(coin_listing::current_bidder(listing) == option::none(), 0); + + coin_listing::purchase(purchaser, listing); + + assert!(object::owner(token) == purchaser_addr, 0); + assert!(coin::balance(marketplace_addr) == 6, 0); + assert!(coin::balance(seller_addr) == 10494, 0); + assert!(coin::balance(purchaser_addr) == 9500, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_auction_bid_then_purchase( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token, _fee_schedule, listing) = auction_listing(marketplace, seller); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + + coin_listing::bid(seller, listing, 100); + assert!(coin_listing::current_amount(listing) == option::some(100), 0); + assert!(coin_listing::current_bidder(listing) == option::some(seller_addr), 0); + assert!(coin::balance(marketplace_addr) == 3, 0); + assert!(coin::balance(seller_addr) == 9897, 0); + + // Return the bid and insert a new bid + coin_listing::bid(purchaser, listing, 150); + assert!(coin_listing::current_amount(listing) == option::some(150), 0); + assert!(coin_listing::current_bidder(listing) == option::some(purchaser_addr), 0); + assert!(coin::balance(marketplace_addr) == 5, 0); + assert!(coin::balance(seller_addr) == 9997, 0); + assert!(coin::balance(purchaser_addr) == 9848, 0); + + // Return the bid and replace with a purchase + coin_listing::purchase(purchaser, listing); + assert!(object::owner(token) == purchaser_addr, 0); + assert!(coin::balance(marketplace_addr) == 10, 0); + assert!(coin::balance(purchaser_addr) == 9498, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_auction_bidding( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token, _fee_schedule, listing) = auction_listing(marketplace, seller); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + let end_time = timestamp::now_seconds() + 200; + assert!(coin_listing::auction_end_time(listing) == end_time, 0); + + // Bid but do not affect end timing + coin_listing::bid(seller, listing, 100); + assert!(coin::balance(marketplace_addr) == 3, 0); + assert!(coin::balance(seller_addr) == 9897, 0); + assert!(coin_listing::auction_end_time(listing) == end_time, 0); + + // Return the bid and insert a new bid and affect end timing + test_utils::increment_timestamp(150); + coin_listing::bid(purchaser, listing, 150); + assert!(coin::balance(marketplace_addr) == 5, 0); + assert!(coin::balance(seller_addr) == 9997, 0); + assert!(coin::balance(purchaser_addr) == 9848, 0); + assert!(coin_listing::auction_end_time(listing) != end_time, 0); + + // End the auction as out of time + test_utils::increment_timestamp(150); + coin_listing::complete_auction(listing); + assert!(object::owner(token) == purchaser_addr, 0); + assert!(coin::balance(marketplace_addr) == 6, 0); + assert!(coin::balance(seller_addr) == 10146, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_ended_auction_no_bid( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, _purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token, _fee_schedule, listing) = auction_listing(marketplace, seller); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + + test_utils::increment_timestamp(200); + coin_listing::complete_auction(listing); + + assert!(object::owner(token) == seller_addr, 0); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(seller_addr) == 9999, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x30002, location = marketplace::listing)] + fun test_not_started_fixed_price( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_fixed_price_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds() + 1, + 500, + ); + + coin_listing::purchase(purchaser, listing); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x30002, location = marketplace::listing)] + fun test_not_started_auction( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_auction_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds() + 1, + 100, + 50, + timestamp::now_seconds() + 200, + 150, + option::some(500), + ); + + coin_listing::bid(purchaser, listing, 1000); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x30005, location = marketplace::coin_listing)] + fun test_ended_auction_bid( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (_token, _fee_schedule, listing) = auction_listing(marketplace, seller); + test_utils::increment_timestamp(200); + coin_listing::bid(purchaser, listing, 1000); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x30005, location = marketplace::coin_listing)] + fun test_ended_auction_purchase( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (_token, _fee_schedule, listing) = auction_listing(marketplace, seller); + test_utils::increment_timestamp(200); + coin_listing::purchase(purchaser, listing); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10006, location = aptos_framework::coin)] + fun test_not_enough_coin_fixed_price( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_fixed_price_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds(), + 100000, + ); + + coin_listing::purchase(purchaser, listing); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10006, location = aptos_framework::coin)] + fun test_not_enough_coin_auction_bid( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (_token, _fee_schedule, listing) = auction_listing(marketplace, seller); + coin_listing::bid(purchaser, listing, 100000); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10003, location = marketplace::coin_listing)] + fun test_bid_too_low( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (_token, _fee_schedule, listing) = auction_listing(marketplace, seller); + coin_listing::bid(purchaser, listing, 100); + coin_listing::bid(purchaser, listing, 125); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10006, location = aptos_framework::coin)] + fun test_not_enough_coin_auction_purchase( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_auction_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds(), + 100, + 50, + timestamp::now_seconds() + 200, + 150, + option::some(50000), + ); + + coin_listing::purchase(purchaser, listing); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x60001, location = marketplace::coin_listing)] + fun test_auction_view_on_fixed_price( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (_token, _fee_schedule, listing) = fixed_price_listing(marketplace, seller); + coin_listing::auction_end_time(listing); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10002, location = marketplace::coin_listing)] + fun test_purchase_on_auction_without_buy_it_now( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_auction_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds(), + 100, + 50, + timestamp::now_seconds() + 200, + 150, + option::none(), + ); + + coin_listing::purchase(purchaser, listing); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x50006, location = marketplace::coin_listing)] + fun test_bad_fixed_price_end( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (_token, _fee_schedule, listing) = fixed_price_listing(marketplace, seller); + coin_listing::end_fixed_price(purchaser, listing); + } + + // Objects and TokenV2 stuff + + inline fun fixed_price_listing( + marketplace: &signer, + seller: &signer, + ): (Object, Object, Object) { + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_fixed_price_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds(), + 500, + ); + (token, fee_schedule, listing) + } + + inline fun auction_listing( + marketplace: &signer, + seller: &signer, + ): (Object, Object, Object) { + let token = test_utils::mint_tokenv2(seller); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_auction_internal( + seller, + object::convert(token), + fee_schedule, + timestamp::now_seconds(), + 100, + 50, + timestamp::now_seconds() + 200, + 150, + option::some(500), + ); + (token, fee_schedule, listing) + } + + // TokenV1 + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_fixed_price_for_token_v1( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (_marketplace_addr, _seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + tokenv1::opt_in_direct_transfer(purchaser, true); + + let (token_id, _fee_schedule, listing) = fixed_price_listing_for_tokenv1(marketplace, seller); + coin_listing::purchase(purchaser, listing); + assert!(tokenv1::balance_of(purchaser_addr, token_id) == 1, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_auction_purchase_for_tokenv1( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (_marketplace_addr, _seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + tokenv1::opt_in_direct_transfer(purchaser, true); + + let (token_id, _fee_schedule, listing) = auction_listing_for_tokenv1(marketplace, seller); + coin_listing::purchase(purchaser, listing); + assert!(tokenv1::balance_of(purchaser_addr, token_id) == 1, 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_auction_purchase_for_tokenv1_without_direct_transfer( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (_marketplace_addr, _seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let (token_id, _fee_schedule, listing) = auction_listing_for_tokenv1(marketplace, seller); + let token_object = listing::listed_object(listing); + coin_listing::purchase(purchaser, listing); + listing::extract_tokenv1(purchaser, object::convert(token_object)); + assert!(tokenv1::balance_of(purchaser_addr, token_id) == 1, 0); + } + + inline fun fixed_price_listing_for_tokenv1( + marketplace: &signer, + seller: &signer, + ): (tokenv1::TokenId, Object, Object) { + let token_id = test_utils::mint_tokenv1(seller); + let (creator_addr, collection_name, token_name, property_version) = + tokenv1::get_token_id_fields(&token_id); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_fixed_price_for_tokenv1_internal( + seller, + creator_addr, + collection_name, + token_name, + property_version, + fee_schedule, + timestamp::now_seconds(), + 500, + ); + (token_id, fee_schedule, listing) + } + + inline fun auction_listing_for_tokenv1( + marketplace: &signer, + seller: &signer, + ): (tokenv1::TokenId, Object, Object) { + let token_id = test_utils::mint_tokenv1(seller); + let (creator_addr, collection_name, token_name, property_version) = + tokenv1::get_token_id_fields(&token_id); + let fee_schedule = test_utils::fee_schedule(marketplace); + let listing = coin_listing::init_auction_for_tokenv1_internal( + seller, + creator_addr, + collection_name, + token_name, + property_version, + fee_schedule, + timestamp::now_seconds(), + 100, + 50, + timestamp::now_seconds() + 200, + 150, + option::some(500), + ); + (token_id, fee_schedule, listing) + } +} +} diff --git a/aptos-move/move-examples/marketplace/sources/collection_offer.move b/aptos-move/move-examples/marketplace/sources/collection_offer.move new file mode 100644 index 0000000000000..08848dfcacd83 --- /dev/null +++ b/aptos-move/move-examples/marketplace/sources/collection_offer.move @@ -0,0 +1,744 @@ +address marketplace { +/// Provides the ability to make collection offers to both Tokenv1 and Tokenv2 collections. +/// A collection offer allows an entity to buy up to N assets within a collection at their +/// specified amount. The amount offered is extracted from their account and stored at an +/// escrow. A seller can then exchange the token for the escrowed payment. If it is a +/// a tokenv2 or the recipient has enabled direct deposit, the token is immediately +/// transferred. If it is tokenv1 without direct deposit, it is stored in a container +/// until the recipient extracts it. +module collection_offer { + use std::error; + use std::option::{Self, Option}; + use std::signer; + use std::string::String; + + use aptos_framework::coin::{Self, Coin}; + use aptos_framework::event::{Self, EventHandle}; + use aptos_framework::object::{Self, DeleteRef, Object}; + use aptos_framework::timestamp; + + use aptos_token::token as tokenv1; + + use aptos_token_objects::collection::Collection; + use aptos_token_objects::royalty; + use aptos_token_objects::token::{Self as tokenv2, Token as TokenV2}; + + use marketplace::fee_schedule::{Self, FeeSchedule}; + use marketplace::listing::{Self, TokenV1Container}; + + /// No collection offer defined. + const ENO_COLLECTION_OFFER: u64 = 1; + /// No coin offer defined. + const ENO_COIN_OFFER: u64 = 2; + /// No token offer defined. + const ENO_TOKEN_OFFER: u64 = 3; + /// This is not the owner of the collection offer. + const ENOT_OWNER: u64 = 4; + /// The offered token is not within the expected collection. + const EINCORRECT_COLLECTION: u64 = 5; + /// The collection offer has expired. + const EEXPIRED: u64 = 6; + + // Core data structures + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Create a limed lifetime offer to buy tokens from a collection. The collection and + /// assets used to buy are stored in other resources within the object. + struct CollectionOffer has key { + fee_schedule: Object, + item_price: u64, + remaining: u64, + expiration_time: u64, + delete_ref: DeleteRef, + events: EventHandle, + } + + /// An event for when a collection offer has been met. + struct CollectionOfferEvent has drop, store { + seller: address, + price: u64, + royalties: u64, + commission: u64, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Stores coins for a collection offer. + struct CoinOffer has key { + coins: Coin, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Stores the metadata associated with a tokenv1 collection offer. + struct CollectionOfferTokenV1 has copy, drop, key { + creator_address: address, + collection_name: String, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Stores the metadata associated with a tokenv2 collection offer. + struct CollectionOfferTokenV2 has copy, drop, key { + collection: Object, + } + + // Initializers + + #[legacy_entry_fun] + /// Create a tokenv1 collection offer. + public entry fun init_for_tokenv1( + purchaser: &signer, + creator_address: address, + collection_name: String, + fee_schedule: Object, + item_price: u64, + amount: u64, + expiration_time: u64, + ): Object { + let offer_signer = init_offer(purchaser, fee_schedule, item_price, amount, expiration_time); + init_coin_holder(purchaser, &offer_signer, fee_schedule, item_price * amount); + move_to(&offer_signer, CollectionOfferTokenV1 { creator_address, collection_name }); + object::address_to_object(signer::address_of(&offer_signer)) + } + + #[legacy_entry_fun] + /// Create a tokenv2 collection offer. + public entry fun init_for_tokenv2( + purchaser: &signer, + collection: Object, + fee_schedule: Object, + item_price: u64, + amount: u64, + expiration_time: u64, + ): Object { + let offer_signer = init_offer(purchaser, fee_schedule, item_price, amount, expiration_time); + init_coin_holder(purchaser, &offer_signer, fee_schedule, item_price * amount); + move_to(&offer_signer, CollectionOfferTokenV2 { collection }); + object::address_to_object(signer::address_of(&offer_signer)) + } + + inline fun init_offer( + purchaser: &signer, + fee_schedule: Object, + item_price: u64, + amount: u64, + expiration_time: u64, + ): signer { + let constructor_ref = object::create_object_from_account(purchaser); + // Once we construct this, both the listing and its contents are soulbound until the conclusion. + let transfer_ref = object::generate_transfer_ref(&constructor_ref); + object::disable_ungated_transfer(&transfer_ref); + + let offer_signer = object::generate_signer(&constructor_ref); + let offer = CollectionOffer { + fee_schedule, + item_price, + remaining: amount, + expiration_time, + delete_ref: object::generate_delete_ref(&constructor_ref), + events: object::new_event_handle(&offer_signer), + }; + move_to(&offer_signer, offer); + + offer_signer + } + + inline fun init_coin_holder( + purchaser: &signer, + offer_signer: &signer, + fee_schedule: Object, + total_to_extract: u64, + ) { + let fee = fee_schedule::listing_fee(fee_schedule, total_to_extract); + let fee_address = fee_schedule::fee_address(fee_schedule); + coin::transfer(purchaser, fee_address, fee); + + let coins = coin::withdraw(purchaser, total_to_extract); + move_to(offer_signer, CoinOffer { coins }); + } + + // Mutators + + /// + public entry fun cancel( + purchaser: &signer, + collection_offer: Object, + ) acquires CoinOffer, CollectionOffer, CollectionOfferTokenV1, CollectionOfferTokenV2 { + let collection_offer_addr = object::object_address(&collection_offer); + assert!( + exists(collection_offer_addr), + error::not_found(ENO_COLLECTION_OFFER), + ); + assert!( + object::is_owner(collection_offer, signer::address_of(purchaser)), + error::permission_denied(ENOT_OWNER), + ); + + cleanup(collection_offer); + } + + #[legacy_entry_fun] + /// Sell a tokenv1 to a collection offer. + public entry fun sell_tokenv1( + seller: &signer, + collection_offer: Object, + token_name: String, + property_version: u64, + ): Option> + acquires + CoinOffer, + CollectionOffer, + CollectionOfferTokenV1, + CollectionOfferTokenV2 + { + let collection_offer_addr = object::object_address(&collection_offer); + assert!( + exists(collection_offer_addr), + error::not_found(ENO_TOKEN_OFFER), + ); + let collection_offer_tokenv1_offer = + borrow_global_mut(collection_offer_addr); + + // Move the token to its destination + + let token_id = tokenv1::create_token_id_raw( + collection_offer_tokenv1_offer.creator_address, + collection_offer_tokenv1_offer.collection_name, + token_name, + property_version, + ); + + let token = tokenv1::withdraw_token(seller, token_id, 1); + + let recipient = object::owner(collection_offer); + let container = if (tokenv1::get_direct_transfer(recipient)) { + tokenv1::direct_deposit_with_opt_in(recipient, token); + option::none() + } else { + let container = listing::create_tokenv1_container_with_token(seller, token); + object::transfer(seller, container, recipient); + option::some(container) + }; + + // Pay fees + + let royalty = tokenv1::get_royalty(token_id); + settle_payments( + signer::address_of(seller), + collection_offer_addr, + tokenv1::get_royalty_payee(&royalty), + tokenv1::get_royalty_denominator(&royalty), + tokenv1::get_royalty_numerator(&royalty), + ); + + container + } + + /// Sell a tokenv2 to a collection offer. + public entry fun sell_tokenv2( + seller: &signer, + collection_offer: Object, + token: Object, + ) acquires CoinOffer, CollectionOffer, CollectionOfferTokenV1, CollectionOfferTokenV2 { + let collection_offer_addr = object::object_address(&collection_offer); + assert!( + exists(collection_offer_addr), + error::not_found(ENO_TOKEN_OFFER), + ); + let collection_offer_token_v2 = + borrow_global_mut(collection_offer_addr); + + // Move the token to its destination + + assert!( + tokenv2::collection_object(token) == collection_offer_token_v2.collection, + error::invalid_argument(EINCORRECT_COLLECTION), + ); + let recipient = object::owner(collection_offer); + object::transfer(seller, token, recipient); + + // Pay fees + + let royalty = tokenv2::royalty(token); + let (royalty_payee, royalty_denominator, royalty_numerator) = if (option::is_some(&royalty)) { + let royalty = option::destroy_some(royalty); + let payee_address = royalty::payee_address(&royalty); + let denominator = royalty::denominator(&royalty); + let numerator = royalty::numerator(&royalty); + (payee_address, denominator, numerator) + } else { + (signer::address_of(seller), 1, 0) + }; + + settle_payments( + signer::address_of(seller), + collection_offer_addr, + royalty_payee, + royalty_denominator, + royalty_numerator, + ); + } + + /// From the coin offer remove appropriate payment for the token and distribute to the seller, + /// the creator for royalties, and the marketplace for commission. If there are no more slots, + /// cleanup the offer. + inline fun settle_payments( + seller: address, + collection_offer_addr: address, + royalty_payee: address, + royalty_denominator: u64, + royalty_numerator: u64, + ) acquires CoinOffer, CollectionOffer, CollectionOfferTokenV1, CollectionOfferTokenV2 { + assert!(exists(collection_offer_addr), 0); + let collection_offer_obj = borrow_global_mut(collection_offer_addr); + assert!( + timestamp::now_seconds() < collection_offer_obj.expiration_time, + error::invalid_state(EEXPIRED), + ); + let price = collection_offer_obj.item_price; + + assert!( + exists>(collection_offer_addr), + error::not_found(ENO_COIN_OFFER), + ); + let coin_offer = borrow_global_mut>(collection_offer_addr); + let coins = coin::extract(&mut coin_offer.coins, price); + + let royalty_charge = price * royalty_numerator / royalty_denominator; + let royalties = coin::extract(&mut coins, royalty_charge); + coin::deposit(royalty_payee, royalties); + + let fee_schedule = collection_offer_obj.fee_schedule; + let commission_charge = fee_schedule::commission(fee_schedule, price); + let commission = coin::extract(&mut coins, commission_charge); + coin::deposit(fee_schedule::fee_address(fee_schedule), commission); + + coin::deposit(seller, coins); + + let event = CollectionOfferEvent { + seller, + price, + royalties: royalty_charge, + commission: commission_charge, + }; + event::emit_event(&mut collection_offer_obj.events, event); + + collection_offer_obj.remaining = collection_offer_obj.remaining - 1; + if (collection_offer_obj.remaining == 0) { + cleanup(object::address_to_object(collection_offer_addr)); + }; + } + + /// Cleanup the offer by deleting it and returning the remaining funds to the collection offer + /// creator. + inline fun cleanup( + collection_offer: Object, + ) acquires CoinOffer, CollectionOffer, CollectionOfferTokenV1, CollectionOfferTokenV2 { + let collection_offer_addr = object::object_address(&collection_offer); + let CoinOffer { coins } = move_from(collection_offer_addr); + coin::deposit(object::owner(collection_offer), coins); + + let CollectionOffer { + fee_schedule: _, + item_price: _, + remaining: _, + expiration_time: _, + delete_ref, + events, + } = move_from(collection_offer_addr); + event::destroy_handle(events); + object::delete(delete_ref); + + if (exists(collection_offer_addr)) { + move_from(collection_offer_addr); + } else if (exists(collection_offer_addr)) { + move_from(collection_offer_addr); + }; + } + + // View + + #[view] + public fun exists_at(collection_offer: Object): bool { + exists(object::object_address(&collection_offer)) + } + + #[view] + public fun expired(collection_offer: Object): bool acquires CollectionOffer { + borrow_collection_offer(collection_offer).expiration_time < timestamp::now_seconds() + } + + #[view] + public fun expiration_time( + collection_offer: Object, + ): u64 acquires CollectionOffer { + borrow_collection_offer(collection_offer).expiration_time + } + + #[view] + public fun fee_schedule( + collection_offer: Object, + ): Object acquires CollectionOffer { + borrow_collection_offer(collection_offer).fee_schedule + } + + #[view] + public fun price(collection_offer: Object): u64 acquires CollectionOffer { + borrow_collection_offer(collection_offer).item_price + } + + #[view] + public fun remaining(collection_offer: Object): u64 acquires CollectionOffer { + borrow_collection_offer(collection_offer).remaining + } + + #[view] + public fun collectionv1( + collection_offer: Object, + ): CollectionOfferTokenV1 acquires CollectionOfferTokenV1 { + let collection_offer_addr = object::object_address(&collection_offer); + assert!( + exists(collection_offer_addr), + error::not_found(ENO_TOKEN_OFFER), + ); + *borrow_global(collection_offer_addr) + } + + #[view] + public fun collectionv2( + collection_offer: Object, + ): CollectionOfferTokenV2 acquires CollectionOfferTokenV2 { + let collection_offer_addr = object::object_address(&collection_offer); + assert!( + exists(collection_offer_addr), + error::not_found(ENO_COLLECTION_OFFER), + ); + *borrow_global(collection_offer_addr) + } + + inline fun borrow_collection_offer( + collection_offer: Object, + ): &CollectionOffer acquires CollectionOffer { + let collection_offer_addr = object::object_address(&collection_offer); + assert!( + exists(collection_offer_addr), + error::not_found(ENO_COLLECTION_OFFER), + ); + borrow_global(collection_offer_addr) + } +} + +#[test_only] +module collection_offer_tests { + use std::string; + + use aptos_framework::aptos_coin::AptosCoin; + use aptos_framework::coin; + use aptos_framework::object; + use aptos_framework::option; + use aptos_framework::timestamp; + + use aptos_token::token as tokenv1; + + use aptos_token_objects::collection as collectionv2; + use aptos_token_objects::token as tokenv2; + + use marketplace::collection_offer; + use marketplace::listing; + use marketplace::test_utils; + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_token_v2( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + let token = test_utils::mint_tokenv2(seller); + assert!(object::is_owner(token, seller_addr), 0); + let collection_offer = collection_offer::init_for_tokenv2( + purchaser, + tokenv2::collection_object(token), + test_utils::fee_schedule(marketplace), + 500, + 2, + timestamp::now_seconds() + 200, + ); + assert!(!collection_offer::expired(collection_offer), 0); + assert!(collection_offer::expiration_time(collection_offer) == timestamp::now_seconds() + 200, 0); + assert!(collection_offer::price(collection_offer) == 500, 0); + + assert!(collection_offer::remaining(collection_offer) == 2, 0); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(purchaser_addr) == 8999, 0); + assert!(coin::balance(seller_addr) == 10000, 0); + + collection_offer::sell_tokenv2(seller, collection_offer, token); + assert!(coin::balance(marketplace_addr) == 6, 0); + assert!(coin::balance(purchaser_addr) == 8999, 0); + assert!(coin::balance(seller_addr) == 10495, 0); + assert!(object::is_owner(token, purchaser_addr), 0); + assert!(collection_offer::remaining(collection_offer) == 1, 0); + + collection_offer::sell_tokenv2(purchaser, collection_offer, token); + assert!(coin::balance(marketplace_addr) == 11, 0); + assert!(coin::balance(purchaser_addr) == 9489, 0); + assert!(coin::balance(seller_addr) == 10500, 0); + assert!(object::is_owner(token, purchaser_addr), 0); + assert!(!collection_offer::exists_at(collection_offer), 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_token_v1_direct_deposit( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + tokenv1::opt_in_direct_transfer(purchaser, true); + tokenv1::opt_in_direct_transfer(seller, true); + + let token_id = test_utils::mint_tokenv1(seller); + assert!(tokenv1::balance_of(seller_addr, token_id) == 1, 0); + + let (creator_addr, collection_name, token_name, property_version) = + tokenv1::get_token_id_fields(&token_id); + + let collection_offer = collection_offer::init_for_tokenv1( + purchaser, + creator_addr, + collection_name, + test_utils::fee_schedule(marketplace), + 500, + 2, + timestamp::now_seconds() + 200, + ); + + assert!(collection_offer::remaining(collection_offer) == 2, 0); + assert!(coin::balance(marketplace_addr) == 1, 0); + assert!(coin::balance(purchaser_addr) == 8999, 0); + assert!(coin::balance(seller_addr) == 10000, 0); + + collection_offer::sell_tokenv1(seller, collection_offer, token_name, property_version); + assert!(coin::balance(marketplace_addr) == 6, 0); + assert!(coin::balance(purchaser_addr) == 8999, 0); + assert!(coin::balance(seller_addr) == 10495, 0); + assert!(tokenv1::balance_of(purchaser_addr, token_id) == 1, 0); + assert!(collection_offer::remaining(collection_offer) == 1, 0); + + collection_offer::sell_tokenv1(purchaser, collection_offer, token_name, property_version); + assert!(coin::balance(marketplace_addr) == 11, 0); + assert!(coin::balance(purchaser_addr) == 9489, 0); + assert!(coin::balance(seller_addr) == 10500, 0); + assert!(tokenv1::balance_of(purchaser_addr, token_id) == 1, 0); + assert!(!collection_offer::exists_at(collection_offer), 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + fun test_token_v1_indirect( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (_marketplace_addr, seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + let token_id = test_utils::mint_tokenv1(seller); + assert!(tokenv1::balance_of(seller_addr, token_id) == 1, 0); + + let (creator_addr, collection_name, token_name, property_version) = + tokenv1::get_token_id_fields(&token_id); + + let collection_offer = collection_offer::init_for_tokenv1( + purchaser, + creator_addr, + collection_name, + test_utils::fee_schedule(marketplace), + 500, + 1, + timestamp::now_seconds() + 200, + ); + + let token_container = collection_offer::sell_tokenv1( + seller, + collection_offer, + token_name, + property_version, + ); + listing::extract_tokenv1(purchaser, option::destroy_some(token_container)); + assert!(tokenv1::balance_of(purchaser_addr, token_id) == 1, 0); + assert!(!collection_offer::exists_at(collection_offer), 0); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x50004, location = aptos_framework::object)] + fun test_token_v2_has_none( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + let token = test_utils::mint_tokenv2(seller); + let collection_offer = collection_offer::init_for_tokenv2( + purchaser, + tokenv2::collection_object(token), + test_utils::fee_schedule(marketplace), + 500, + 2, + timestamp::now_seconds() + 200, + ); + collection_offer::sell_tokenv2(marketplace, collection_offer, token); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10005, location = aptos_token::token)] + fun test_token_v1_has_none( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + let token_id = test_utils::mint_tokenv1(seller); + let (creator_addr, collection_name, token_name, property_version) = + tokenv1::get_token_id_fields(&token_id); + + let collection_offer = collection_offer::init_for_tokenv1( + purchaser, + creator_addr, + collection_name, + test_utils::fee_schedule(marketplace), + 500, + 1, + timestamp::now_seconds() + 200, + ); + + collection_offer::sell_tokenv1( + marketplace, + collection_offer, + token_name, + property_version, + ); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x30006, location = marketplace::collection_offer)] + fun test_token_v2_expired( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + let token = test_utils::mint_tokenv2(seller); + let collection_offer = collection_offer::init_for_tokenv2( + purchaser, + tokenv2::collection_object(token), + test_utils::fee_schedule(marketplace), + 500, + 2, + timestamp::now_seconds() + 200, + ); + test_utils::increment_timestamp(200); + collection_offer::sell_tokenv2(seller, collection_offer, token); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x60003, location = marketplace::collection_offer)] + fun test_token_v2_exhausted( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + let token = test_utils::mint_tokenv2(seller); + let collection_offer = collection_offer::init_for_tokenv2( + purchaser, + tokenv2::collection_object(token), + test_utils::fee_schedule(marketplace), + 500, + 2, + timestamp::now_seconds() + 200, + ); + collection_offer::sell_tokenv2(seller, collection_offer, token); + collection_offer::sell_tokenv2(purchaser, collection_offer, token); + collection_offer::sell_tokenv2(purchaser, collection_offer, token); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10005, location = marketplace::collection_offer)] + fun test_token_v2_other_collection( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + let token = test_utils::mint_tokenv2(seller); + + let other_collection = collectionv2::create_unlimited_collection( + purchaser, + string::utf8(b"..."), + string::utf8(b"..."), + option::none(), + string::utf8(b"..."), + ); + + let collection_offer = collection_offer::init_for_tokenv2( + purchaser, + object::object_from_constructor_ref(&other_collection), + test_utils::fee_schedule(marketplace), + 500, + 2, + timestamp::now_seconds() + 200, + ); + collection_offer::sell_tokenv2(marketplace, collection_offer, token); + } + + #[test(aptos_framework = @0x1, marketplace = @0x111, seller = @0x222, purchaser = @0x333)] + #[expected_failure(abort_code = 0x10005, location = aptos_token::token)] + fun test_token_v1_other_collection( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ) { + let (_marketplace_addr, _seller_addr, purchaser_addr) = + test_utils::setup(aptos_framework, marketplace, seller, purchaser); + + tokenv1::create_collection( + purchaser, + string::utf8(b"..."), + string::utf8(b"..."), + string::utf8(b"..."), + 1, + vector[true, true, true], + ); + + let collection_offer = collection_offer::init_for_tokenv1( + purchaser, + purchaser_addr, + string::utf8(b"..."), + test_utils::fee_schedule(marketplace), + 500, + 1, + timestamp::now_seconds() + 200, + ); + + let token_id = test_utils::mint_tokenv1(seller); + let (_creator_addr, _collection_name, token_name, property_version) = + tokenv1::get_token_id_fields(&token_id); + collection_offer::sell_tokenv1( + marketplace, + collection_offer, + token_name, + property_version, + ); + } +} +} diff --git a/aptos-move/move-examples/marketplace/sources/fee_schedule.move b/aptos-move/move-examples/marketplace/sources/fee_schedule.move new file mode 100644 index 0000000000000..08904909dc01f --- /dev/null +++ b/aptos-move/move-examples/marketplace/sources/fee_schedule.move @@ -0,0 +1,495 @@ +/// Defines the charges associated with using a marketplace, namely: +/// * Listing rate, the units charged for creating a listing. +/// * Bidding rate, the units per bid made by a potential buyer. +/// * Commission, the units transferred to the marketplace upon sale. +/// +/// Note: this is ony a schedule, a higher layer needs to implement the type of asset class +/// and the transferring of that asset for each scheduled event. +module marketplace::fee_schedule { + use std::error; + use std::signer; + use std::string::{Self, String}; + + use aptos_std::type_info; + + use aptos_framework::event::{Self, EventHandle}; + use aptos_framework::object::{Self, ConstructorRef, ExtendRef, Object}; + + /// FeeSchedule does not exist. + const ENO_FEE_SCHEDULE: u64 = 1; + /// The denominator in a fraction cannot be zero. + const EDENOMINATOR_IS_ZERO: u64 = 2; + /// The value represented by a fraction cannot be greater than 1. + const EEXCEEDS_MAXIMUM: u64 = 3; + /// The passed in signer is not the owner of the marketplace. + const ENOT_OWNER: u64 = 4; + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Defines marketplace fees + struct FeeSchedule has key { + /// Address to send fees to + fee_address: address, + /// Ref for changing the configuration of the marketplace + extend_ref: ExtendRef, + /// An event stream of changes to the fee schedule + mutation_events: EventHandle, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Fixed rate for bidding + struct FixedRateBiddingFee has drop, key { + /// Fixed rate for bidding + bidding_fee: u64, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Fixed rate for listing + struct FixedRateListingFee has drop, key { + /// Fixed rate for listing + listing_fee: u64, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Fixed rate for commission + struct FixedRateCommission has drop, key { + /// Fixed rate for commission + commission: u64, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Percentage-based rate for commission + struct PercentageRateCommission has drop, key { + /// Denominator for the commission rate + denominator: u64, + /// Numerator for the commission rate + numerator: u64, + } + + /// Event representing a change to the marketplace configuration + struct MutationEvent has drop, store { + /// The type info of the struct that was updated. + updated_resource: String, + } + + // Initializers + + /// Create a marketplace with a fixed bidding and listing rate and a percentage commission. + public entry fun init( + creator: &signer, + fee_address: address, + bidding_fee: u64, + listing_fee: u64, + commission_denominator: u64, + commission_numerator: u64, + ) { + init_internal( + creator, + fee_address, + bidding_fee, + listing_fee, + commission_denominator, + commission_numerator, + ); + } + + + public fun init_internal( + creator: &signer, + fee_address: address, + bidding_fee: u64, + listing_fee: u64, + commission_denominator: u64, + commission_numerator: u64, + ): Object { + assert!( + commission_numerator <= commission_denominator, + error::invalid_argument(EEXCEEDS_MAXIMUM), + ); + assert!( + commission_denominator != 0, + error::out_of_range(EDENOMINATOR_IS_ZERO), + ); + + let (constructor_ref, fee_schedule_signer) = empty_init(creator, fee_address); + move_to(&fee_schedule_signer, FixedRateBiddingFee { bidding_fee }); + move_to(&fee_schedule_signer, FixedRateListingFee { listing_fee }); + let commission_rate = PercentageRateCommission { + denominator: commission_denominator, + numerator: commission_numerator, + }; + move_to(&fee_schedule_signer, commission_rate); + object::object_from_constructor_ref(&constructor_ref) + } + + /// Create a marketplace with no fees. + public entry fun empty(creator: &signer, fee_address: address) { + empty_init(creator, fee_address); + } + + inline fun empty_init(creator: &signer, fee_address: address): (ConstructorRef, signer) { + let constructor_ref = object::create_object_from_account(creator); + let extend_ref = object::generate_extend_ref(&constructor_ref); + let fee_schedule_signer = object::generate_signer(&constructor_ref); + + let marketplace = FeeSchedule { + fee_address, + extend_ref, + mutation_events: object::new_event_handle(&fee_schedule_signer), + }; + move_to(&fee_schedule_signer, marketplace); + + (constructor_ref, fee_schedule_signer) + } + + // Mutators + + /// Set the fee address + public entry fun set_fee_address( + creator: &signer, + marketplace: Object, + fee_address: address, + ) acquires FeeSchedule { + let fee_schedule_addr = assert_exists_internal(&marketplace); + assert!( + object::is_owner(marketplace, signer::address_of(creator)), + error::permission_denied(ENOT_OWNER), + ); + let fee_schedule_obj = borrow_global_mut(fee_schedule_addr); + fee_schedule_obj.fee_address = fee_address; + let updated_resource = string::utf8(b"fee_address"); + event::emit_event(&mut fee_schedule_obj.mutation_events, MutationEvent { updated_resource }); + } + + /// Remove any existing listing fees and set a fixed rate listing fee. + public entry fun set_fixed_rate_listing_fee( + creator: &signer, + marketplace: Object, + fee: u64, + ) acquires FeeSchedule, FixedRateListingFee { + let fee_schedule_signer = remove_listing_fee(creator, marketplace); + move_to(&fee_schedule_signer, FixedRateListingFee { listing_fee: fee }); + let updated_resource = type_info::type_name(); + emit_mutation_event(signer::address_of(&fee_schedule_signer), updated_resource); + } + + inline fun remove_listing_fee( + creator: &signer, + marketplace: Object, + ): signer acquires FeeSchedule, FixedRateListingFee { + let (fee_schedule_signer, fee_schedule_addr) = assert_access(creator, marketplace); + if (exists(fee_schedule_addr)) { + move_from(fee_schedule_addr); + }; + fee_schedule_signer + } + + /// Remove any existing bidding fees and set a fixed rate bidding fee. + public entry fun set_fixed_rate_bidding_fee( + creator: &signer, + marketplace: Object, + fee: u64, + ) acquires FeeSchedule, FixedRateBiddingFee { + let fee_schedule_signer = remove_bidding_fee(creator, marketplace); + move_to(&fee_schedule_signer, FixedRateBiddingFee { bidding_fee: fee }); + let updated_resource = type_info::type_name(); + emit_mutation_event(signer::address_of(&fee_schedule_signer), updated_resource); + } + + inline fun remove_bidding_fee( + creator: &signer, + marketplace: Object, + ): signer acquires FeeSchedule, FixedRateBiddingFee { + let (fee_schedule_signer, fee_schedule_addr) = assert_access(creator, marketplace); + if (exists(fee_schedule_addr)) { + move_from(fee_schedule_addr); + }; + fee_schedule_signer + } + + /// Remove any existing commission and set a fixed rate commission. + public entry fun set_fixed_rate_commission( + creator: &signer, + marketplace: Object, + commission: u64, + ) acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + let fee_schedule_signer = remove_commission(creator, marketplace); + move_to(&fee_schedule_signer, FixedRateCommission { commission }); + let updated_resource = type_info::type_name(); + emit_mutation_event(signer::address_of(&fee_schedule_signer), updated_resource); + } + + /// Remove any existing commission and set a percentage rate commission. + public entry fun set_percentage_rate_commission( + creator: &signer, + marketplace: Object, + denominator: u64, + numerator: u64, + ) acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + assert!( + numerator <= denominator, + error::invalid_argument(EEXCEEDS_MAXIMUM), + ); + assert!( + denominator != 0, + error::out_of_range(EDENOMINATOR_IS_ZERO), + ); + + let fee_schedule_signer = remove_commission(creator, marketplace); + move_to(&fee_schedule_signer, PercentageRateCommission { denominator, numerator }); + let updated_resource = type_info::type_name(); + emit_mutation_event(signer::address_of(&fee_schedule_signer), updated_resource); + } + + inline fun remove_commission( + creator: &signer, + marketplace: Object, + ): signer acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + let (fee_schedule_signer, fee_schedule_addr) = assert_access(creator, marketplace); + if (exists(fee_schedule_addr)) { + move_from(fee_schedule_addr); + } else if (exists(fee_schedule_addr)) { + move_from(fee_schedule_addr); + }; + fee_schedule_signer + } + + inline fun assert_access( + creator: &signer, + marketplace: Object, + ): (signer, address) acquires FeeSchedule { + let fee_schedule_addr = assert_exists_internal(&marketplace); + assert!( + object::is_owner(marketplace, signer::address_of(creator)), + error::permission_denied(ENOT_OWNER), + ); + let fee_schedule_obj = borrow_global(fee_schedule_addr); + let fee_schedule_signer = object::generate_signer_for_extending(&fee_schedule_obj.extend_ref); + (fee_schedule_signer, fee_schedule_addr) + } + + inline fun emit_mutation_event( + fee_schedule_addr: address, + updated_resource: String, + ) acquires FeeSchedule { + let marketplace = borrow_global_mut(fee_schedule_addr); + event::emit_event(&mut marketplace.mutation_events, MutationEvent { updated_resource }); + } + + // View functions + + #[view] + public fun fee_address(marketplace: Object): address acquires FeeSchedule { + let fee_schedule_addr = assert_exists_internal(&marketplace); + borrow_global(fee_schedule_addr).fee_address + } + + #[view] + public fun listing_fee( + marketplace: Object, + _base: u64, + ): u64 acquires FixedRateListingFee { + let fee_schedule_addr = assert_exists_internal(&marketplace); + if (exists(fee_schedule_addr)) { + borrow_global(fee_schedule_addr).listing_fee + } else { + 0 + } + } + + #[view] + public fun bidding_fee( + marketplace: Object, + _bid: u64, + ): u64 acquires FixedRateBiddingFee { + let fee_schedule_addr = assert_exists_internal(&marketplace); + if (exists(fee_schedule_addr)) { + borrow_global(fee_schedule_addr).bidding_fee + } else { + 0 + } + } + + #[view] + public fun commission( + marketplace: Object, + price: u64, + ): u64 acquires FixedRateCommission, PercentageRateCommission { + let fee_schedule_addr = assert_exists_internal(&marketplace); + if (exists(fee_schedule_addr)) { + borrow_global(fee_schedule_addr).commission + } else if (exists(fee_schedule_addr)) { + let fees = borrow_global(fee_schedule_addr); + ((price as u128) * (fees.numerator as u128) / (fees.denominator as u128) as u64) + } else { + 0 + } + } + + public fun assert_exists(marketplace: &Object) { + assert_exists_internal(marketplace); + } + + inline fun assert_exists_internal(marketplace: &Object): address { + let fee_schedule_addr = object::object_address(marketplace); + assert!( + exists(fee_schedule_addr), + error::not_found(ENO_FEE_SCHEDULE), + ); + fee_schedule_addr + } + + // Tests + + #[test_only] + use aptos_framework::account; + + #[test(creator = @0x123)] + fun test_init( + creator: &signer, + ) acquires FeeSchedule, FixedRateBiddingFee, FixedRateCommission, FixedRateListingFee, PercentageRateCommission { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + + assert!(fee_address(obj) == creator_addr, 0); + assert!(listing_fee(obj, 5) == 0, 0); + assert!(bidding_fee(obj, 5) == 0, 0); + assert!(commission(obj, 5) == 0, 0); + + set_fee_address(creator, obj, @0x0); + set_fixed_rate_listing_fee(creator, obj, 5); + set_fixed_rate_bidding_fee(creator, obj, 6); + set_percentage_rate_commission(creator, obj, 10, 1); + + assert!(fee_address(obj) == @0x0, 0); + assert!(listing_fee(obj, 5) == 5, 0); + assert!(bidding_fee(obj, 5) == 6, 0); + assert!(commission(obj, 20) == 2, 0); + + set_fixed_rate_commission(creator, obj, 8); + assert!(commission(obj, 20) == 8, 0); + } + + #[test(creator = @0x123)] + fun test_empty_init( + creator: &signer, + ) acquires FeeSchedule, FixedRateBiddingFee, FixedRateCommission, FixedRateListingFee, PercentageRateCommission { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let (constructor_ref, _fee_schedule_signer) = empty_init(creator, creator_addr); + let obj = object::object_from_constructor_ref(&constructor_ref); + + assert!(fee_address(obj) == creator_addr, 0); + assert!(listing_fee(obj, 5) == 0, 0); + assert!(bidding_fee(obj, 5) == 0, 0); + assert!(commission(obj, 5) == 0, 0); + + set_fee_address(creator, obj, @0x0); + set_fixed_rate_listing_fee(creator, obj, 5); + set_fixed_rate_bidding_fee(creator, obj, 6); + set_percentage_rate_commission(creator, obj, 10, 1); + + assert!(fee_address(obj) == @0x0, 0); + assert!(listing_fee(obj, 5) == 5, 0); + assert!(bidding_fee(obj, 5) == 6, 0); + assert!(commission(obj, 20) == 2, 0); + + set_fixed_rate_commission(creator, obj, 8); + assert!(commission(obj, 20) == 8, 0); + } + + #[test(creator = @0x123, non_creator = @0x223)] + #[expected_failure(abort_code = 0x50004, location = Self)] + fun test_non_creator_fee_address(creator: &signer, non_creator: &signer) acquires FeeSchedule { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_fee_address(non_creator, obj, @0x0); + } + + #[test(creator = @0x123, non_creator = @0x223)] + #[expected_failure(abort_code = 0x50004, location = Self)] + fun test_non_creator_fixed_listing( + creator: &signer, + non_creator: &signer, + ) acquires FeeSchedule, FixedRateListingFee { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_fixed_rate_listing_fee(non_creator, obj, 5); + } + + #[test(creator = @0x123, non_creator = @0x223)] + #[expected_failure(abort_code = 0x50004, location = Self)] + fun test_non_creator_fixed_bidding( + creator: &signer, + non_creator: &signer, + ) acquires FeeSchedule, FixedRateBiddingFee { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_fixed_rate_bidding_fee(non_creator, obj, 6); + } + + #[test(creator = @0x123, non_creator = @0x223)] + #[expected_failure(abort_code = 0x50004, location = Self)] + fun test_non_creator_percentage_commission( + creator: &signer, + non_creator: &signer, + ) acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_percentage_rate_commission(non_creator, obj, 10, 1); + } + + #[test(creator = @0x123, non_creator = @0x223)] + #[expected_failure(abort_code = 0x50004, location = Self)] + fun test_non_creator_fixed_commission( + creator: &signer, + non_creator: &signer, + ) acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_fixed_rate_commission(non_creator, obj, 8); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x20002, location = Self)] + fun test_init_zero_denominator_percentage_commission(creator: &signer) { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + init_internal(creator, creator_addr, 0, 0, 0, 0); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x20002, location = Self)] + fun test_set_zero_denominator_percentage_commission( + creator: &signer, + ) acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_percentage_rate_commission(creator, obj, 0, 0); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x10003, location = Self)] + fun test_init_too_big_percentage_commission(creator: &signer) { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + init_internal(creator, creator_addr, 0, 0, 1, 2); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x10003, location = Self)] + fun test_set_too_big_percentage_commission( + creator: &signer, + ) acquires FeeSchedule, FixedRateCommission, PercentageRateCommission { + let creator_addr = signer::address_of(creator); + account::create_account_for_test(creator_addr); + let obj = init_internal(creator, creator_addr, 0, 0, 1, 0); + set_percentage_rate_commission(creator, obj, 1, 2); + } +} diff --git a/aptos-move/move-examples/marketplace/sources/listing.move b/aptos-move/move-examples/marketplace/sources/listing.move new file mode 100644 index 0000000000000..e2f135f3f0996 --- /dev/null +++ b/aptos-move/move-examples/marketplace/sources/listing.move @@ -0,0 +1,272 @@ +/// Defines a single listing or an item for sale or auction. This is an escrow service that +/// enables two parties to exchange one asset for another. +/// Each listing has the following properties: +/// * FeeSchedule specifying payment flows +/// * Owner or the person that can end the sale or auction +/// * Starting time +/// * Logic for cleanup +module marketplace::listing { + use std::error; + use std::option; + use std::signer; + use std::string::String; + + use aptos_std::math64; + + use aptos_framework::object::{Self, ConstructorRef, DeleteRef, ExtendRef, Object, ObjectCore, TransferRef}; + use aptos_framework::timestamp; + + use aptos_token::token::{Self as tokenv1, Token as TokenV1}; + use aptos_token_objects::token as tokenv2; + use aptos_token_objects::royalty; + + use marketplace::fee_schedule::FeeSchedule; + + friend marketplace::coin_listing; + + /// There exists no listing. + const ENO_LISTING: u64 = 1; + /// The listing is not yet live. + const ELISTING_NOT_STARTED: u64 = 2; + /// The entity is not the creator. + const ENOT_CREATOR: u64 = 3; + /// The entity is not the owner of the wrapped token. + const ENOT_OWNER: u64 = 4; + + // Core data structures + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Corner-stone for all listings, represents the core utility layer including object + /// cleanup. + struct Listing has key { + /// The item owned by this listing, transferred to the new owner at the end. + object: Object, + /// The seller of the object + seller: address, + /// The fees associated with claiming this listing. + fee_schedule: Object, + /// The Unix timestamp in seconds at which point bidding and purchasing can occur + start_time: u64, + /// Used to clean-up at the end. + delete_ref: DeleteRef, + /// Used to create a signer to transfer the listed item, ideally the TransferRef would + /// support this. + extend_ref: ExtendRef, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Contains a tokenv1 as an object + struct TokenV1Container has key { + /// The stored token. + token: TokenV1, + /// Used to cleanup the object at the end + delete_ref: DeleteRef, + /// Used to transfer the tokenv1 at the conclusion of a purchase. + transfer_ref: TransferRef, + } + + // Init functions + + public(friend) fun init( + creator: &signer, + object: Object, + fee_schedule: Object, + start_time: u64, + ): (signer, ConstructorRef) { + let constructor_ref = object::create_object_from_account(creator); + // Once we construct this, both the listing and its contents are soulbound until the conclusion. + let transfer_ref = object::generate_transfer_ref(&constructor_ref); + object::disable_ungated_transfer(&transfer_ref); + let listing_signer = object::generate_signer(&constructor_ref); + + let listing = Listing { + object, + seller: signer::address_of(creator), + fee_schedule, + start_time, + delete_ref: object::generate_delete_ref(&constructor_ref), + extend_ref: object::generate_extend_ref(&constructor_ref), + }; + move_to(&listing_signer, listing); + + let listing_addr = object::address_from_constructor_ref(&constructor_ref); + object::transfer(creator, object, listing_addr); + + (listing_signer, constructor_ref) + } + + public(friend) fun create_tokenv1_container( + seller: &signer, + token_creator: address, + token_collection: String, + token_name: String, + token_property_version: u64, + ): Object { + let token_id = tokenv1::create_token_id_raw( + token_creator, + token_collection, + token_name, + token_property_version, + ); + let token = tokenv1::withdraw_token(seller, token_id, 1); + create_tokenv1_container_with_token(seller, token) + } + + public fun create_tokenv1_container_with_token( + seller: &signer, + token: TokenV1, + ): Object { + let constructor_ref = object::create_object_from_account(seller); + let container_signer = object::generate_signer(&constructor_ref); + let delete_ref = object::generate_delete_ref(&constructor_ref); + let transfer_ref = object::generate_transfer_ref(&constructor_ref); + + move_to(&container_signer, TokenV1Container { token, delete_ref, transfer_ref }); + object::object_from_constructor_ref(&constructor_ref) + } + + // Mutators + + /// This should be called at the end of a listing. + public(friend) fun extract_or_transfer_tokenv1( + recipient: address, + object: Object, + ) acquires TokenV1Container { + let direct_transfer_enabled = tokenv1::get_direct_transfer(recipient); + let object_addr = object::object_address(&object); + if (direct_transfer_enabled) { + let TokenV1Container { + token, + delete_ref, + transfer_ref: _, + } = move_from(object_addr); + tokenv1::direct_deposit_with_opt_in(recipient, token); + object::delete(delete_ref); + } else { + let tokenv1_container = borrow_global(object_addr); + let linear_transfer_ref = + object::generate_linear_transfer_ref(&tokenv1_container.transfer_ref); + object::transfer_with_ref(linear_transfer_ref, recipient); + }; + } + + /// If the account did not have tokenv1 enabled, then it must call this after making the + /// purchase to extract the token. + public entry fun extract_tokenv1( + owner: &signer, + object: Object, + ) acquires TokenV1Container { + let object_addr = object::object_address(&object); + assert!( + object::is_owner(object, signer::address_of(owner)), + error::permission_denied(ENOT_OWNER), + ); + let TokenV1Container { + token, + delete_ref, + transfer_ref: _, + } = move_from(object_addr); + object::delete(delete_ref); + tokenv1::deposit_token(owner, token); + } + + /// The listing has concluded, transfer the asset and delete the listing. Returns the seller + /// for depositing any profit and the fee schedule for the marketplaces commission. + public(friend) fun close( + object: Object, + recipient: address, + ): (address, Object) acquires Listing, TokenV1Container { + let listing_addr = object::object_address(&object); + let Listing { + object, + seller, + fee_schedule, + start_time: _, + delete_ref, + extend_ref, + } = move_from(listing_addr); + + let obj_signer = object::generate_signer_for_extending(&extend_ref); + if (exists(object::object_address(&object))) { + extract_or_transfer_tokenv1(recipient, object::convert(object)); + } else { + object::transfer(&obj_signer, object, recipient); + }; + object::delete(delete_ref); + + (seller, fee_schedule) + } + + public(friend) fun assert_started(object: &Object): address acquires Listing { + let listing_addr = object::object_address(object); + assert!(exists(listing_addr), error::not_found(ENO_LISTING)); + + let listing = borrow_global(listing_addr); + let now = timestamp::now_seconds(); + assert!(listing.start_time <= now, error::invalid_state(ELISTING_NOT_STARTED)); + listing_addr + } + + // View + + #[view] + public fun seller(object: Object): address acquires Listing { + let listing = borrow_listing(object); + listing.seller + } + + #[view] + public fun listed_object(object: Object): Object acquires Listing { + let listing = borrow_listing(object); + listing.object + } + + #[view] + public fun fee_schedule(object: Object): Object acquires Listing { + let listing = borrow_listing(object); + listing.fee_schedule + } + + #[view] + /// Compute the royalty either from the internal TokenV1, TokenV2 if it exists, or return + /// no royalty. + public fun compute_royalty( + object: Object, + amount: u64, + ): (address, u64) acquires Listing, TokenV1Container { + let listing = borrow_listing(object); + let obj_addr = object::object_address(&listing.object); + if (exists(obj_addr)) { + let token_container = borrow_global(obj_addr); + let token_id = tokenv1::get_token_id(&token_container.token); + let royalty = tokenv1::get_royalty(token_id); + + let payee_address = tokenv1::get_royalty_payee(&royalty); + let numerator = tokenv1::get_royalty_numerator(&royalty); + let denominator = tokenv1::get_royalty_denominator(&royalty); + + let royalty_amount = math64::mul_div(amount, numerator, denominator); + (payee_address, royalty_amount) + } else { + let royalty = tokenv2::royalty(listing.object); + if (option::is_some(&royalty)) { + let royalty = option::destroy_some(royalty); + let payee_address = royalty::payee_address(&royalty); + let royalty_amount = math64::mul_div( + amount, + royalty::numerator(&royalty), + royalty::denominator(&royalty) + ); + (payee_address, royalty_amount) + } else { + (@0x0, 0) + } + } + } + + inline fun borrow_listing(object: Object): &Listing acquires Listing { + let obj_addr = object::object_address(&object); + assert!(exists(obj_addr), error::not_found(ENO_LISTING)); + borrow_global(obj_addr) + } +} diff --git a/aptos-move/move-examples/marketplace/sources/marketplace_auction_example.move b/aptos-move/move-examples/marketplace/sources/marketplace_auction_example.move deleted file mode 100644 index 6b03ebfb9c746..0000000000000 --- a/aptos-move/move-examples/marketplace/sources/marketplace_auction_example.move +++ /dev/null @@ -1,638 +0,0 @@ -/// This is an example demonstrating how to use marketplace_bid_utils and market_place_listing_utils to build an auction house -/// The basic flow can be found in test test_listing_one_and_two_bids -/// For more detailed description, check readme -module marketplace::marketplace_auction_example { - - use aptos_framework::account; - use aptos_framework::coin; - use aptos_framework::event::{Self, EventHandle}; - use aptos_framework::timestamp; - use aptos_std::simple_map::{Self, SimpleMap}; - use aptos_std::table::{Self, Table}; - use aptos_token::token::{Self, TokenId}; - use marketplace::marketplace_bid_utils::{Self as bid, BidId, create_bid_id}; - use marketplace::marketplace_listing_utils::{Self as listing, Listing, ListingEvent}; - use std::error; - use std::signer; - use std::string::String; - use std::vector; - use aptos_framework::guid; - use aptos_token::property_map; - - // - // Errors - // - - /// Expiration time is invalid - const EINVALID_EXPIRATION_TIME: u64 = 1; - - /// Start time is invalid - const EINVALID_START_TIME: u64 = 2; - - /// Auction doesn't exist - const EAUCTION_NOT_EXIST: u64 = 3; - - /// Bid increase less than minimal incremental - const EBID_INCREASE_TOO_SMALL: u64 = 4; - - /// Auction ended - const EAUCTION_ENDED: u64 = 5; - - /// Minimal incremental should be bigger than 0 - const EBID_MIN_INCREMENTAL_IS_ZERO: u64 = 6; - - /// Bid not found - const EBID_NOT_FOUND_FOR_AUCTION: u64 = 7; - - /// Reserved operation for auction house owner - const EONLY_AUCTION_HOUSE_OWNER_CAN_PERFORM_THIS_OPERATION: u64 = 8; - - /// Auction not ended - const EAUCTION_NOT_ENDED: u64 = 9; - - /// Bid with same price exists for this auction - const EBID_WITH_SAME_PRICE_EXISTS: u64 = 10; - - /// Bid not match the bid_id in the auction - const EBID_NOT_MATCH_ID_IN_AUCTION: u64 = 11; - - /// Auction has zero bids - const EAUCION_HAS_ZERO_BIDS: u64 = 12; - - /// Auction highest bid is zero - const EAUCTION_HIGHEST_BID_ZERO: u64 = 13; - - struct AuctionHouseConfig has key { - market_fee_numerator: u64, - market_fee_denominator: u64, - fee_address: address, - } - - struct Auctions has key { - cur_auction_id: u64, // this is used to generate next auction_id - all_active_auctions: Table>, - listing_event: EventHandle, - bid_event: EventHandle, - cancel_bid_events: EventHandle - } - - struct BidEvent has copy, drop, store { - market_address: address, - bid_id: BidId, - offer_price: u64, - expiration_sec: u64, - } - - struct CancelBidEvent has copy, drop, store { - market_address: address, - bid_id: BidId, - } - - struct Auction has drop, store { - listing: Listing, - bids: SimpleMap, // mapping between the price and BidId - offer_numbers: vector, // the prices recorded for all the bids - } - - public entry fun initialize_auction_house( - account: &signer, - market_fee_numerator: u64, - market_fee_denominator: u64, - fee_address: address, - ) { - move_to( - account, - AuctionHouseConfig { - market_fee_denominator, - market_fee_numerator, - fee_address, - } - ); - } - - public entry fun initialize_auction(account: &signer) { - move_to( - account, - Auctions { - cur_auction_id: 0, - all_active_auctions: table::new(), - listing_event: account::new_event_handle(account), - bid_event: account::new_event_handle(account), - cancel_bid_events: account::new_event_handle(account), - - } - ); - } - - public fun generate_auction_data( - owner: &signer, - token_id: TokenId, - amount: u64, - min_price: u64, - start_sec: u64, // specify when the auction starts - expiration_sec: u64, // specify when the auction ends - withdraw_expiration_sec: u64, - ): Auction { - let sec = timestamp::now_seconds(); - assert!(sec <= start_sec, error::invalid_argument(EINVALID_START_TIME)); - assert!(start_sec < expiration_sec, error::invalid_argument(EINVALID_EXPIRATION_TIME)); - let listing = listing::create_listing( - owner, - token_id, - amount, - min_price, - false, - start_sec, - expiration_sec, - withdraw_expiration_sec, - vector[], - vector>[], - vector[], - ); - - Auction{ - listing, - bids: simple_map::create(), - offer_numbers: vector::empty(), - } - } - - public entry fun create_auction( - owner: &signer, - creator: address, - collection_name: String, - token_name: String, - property_version: u64, - amount: u64, - min_price: u64, - start_sec: u64, // specify when the auction starts - expiration_sec: u64, // specify when the auction ends - withdraw_expiration_sec: u64, // specify deadline of token withdraw - ) acquires Auctions { - let token_id = token::create_token_id_raw(creator, collection_name, token_name, property_version); - create_auction_with_token_id( - owner, - token_id, - amount, - min_price, - start_sec, - expiration_sec, - withdraw_expiration_sec, - ); - } - - public fun create_auction_with_token_id( - owner: &signer, - token_id: TokenId, - amount: u64, - min_price: u64, - start_sec: u64, // specify when the auction starts - listing_expiration_sec: u64, // specify when the auction ends - withdraw_expiration_sec: u64, // specify deadline of token withdraw - ): u64 acquires Auctions { - - let auction = generate_auction_data( - owner, - token_id, - amount, - min_price, - start_sec, - listing_expiration_sec, - withdraw_expiration_sec, // allow time to withdraw - ); - - // initialized coin store when listing - coin::register(owner); - - let auctions = borrow_global_mut>(@marketplace); - event::emit_event( - &mut auctions.listing_event, - listing::create_listing_event( - listing::get_listing_id(&auction.listing), - token_id, - amount, - min_price, - false, - start_sec, - listing_expiration_sec, - listing_expiration_sec + 50, - @marketplace, - property_map::empty(), - ), - ); - - let next_id = auctions.cur_auction_id + 1; - *(&mut auctions.cur_auction_id) = next_id; - table::add(&mut auctions.all_active_auctions, next_id, auction); - next_id - } - - public entry fun bid( - bidder: &signer, - creator: address, - collection_name: String, - token_name: String, - property_version: u64, - token_amount:u64, - offer_price: u64, - auction_id: u64, - withdraw_expiration_sec: u64, - ) acquires Auctions { - // create bid and store it under the user account - let token_id = token::create_token_id_raw(creator, collection_name, token_name, property_version); - create_bid_with_token_id(bidder, token_id, token_amount, offer_price, auction_id, withdraw_expiration_sec); - } - - /// Allow the bid to increase the coin for an existing bid - public entry fun increase_bid( - bidder: &signer, - price_delta: u64, - auction_id: u64, - ) acquires Auctions { - let auctions = borrow_global_mut>(@marketplace); - assert!(table::contains(&mut auctions.all_active_auctions, auction_id), error::not_found(EAUCTION_NOT_EXIST)); - let auction = table::borrow_mut(&mut auctions.all_active_auctions, auction_id); - let listing_id = listing::get_listing_id(&auction.listing); - let bid_id = bid::create_bid_id(signer::address_of(bidder), listing_id); - increase_bid_price( - bidder, - bid_id, - price_delta, - auction_id, - ) - } - - - /// Increase the offered price for an existing bid - /// The new price should not be same as any existing offered price - public fun increase_bid_price( - bidder: &signer, - bid_id: BidId, - price_delta: u64, - auction_id: u64, - ) acquires Auctions { - let auctions = borrow_global_mut>(@marketplace); - assert!(table::contains(&mut auctions.all_active_auctions, auction_id), error::not_found(EAUCTION_NOT_EXIST)); - let auction = table::borrow_mut(&mut auctions.all_active_auctions, auction_id); - - // get the listing info - // auction is still active - let now = timestamp::now_seconds(); - assert!(now <= listing::get_listing_expiration(&auction.listing), error::invalid_argument(EAUCTION_ENDED)); - - - // assert new offer_price is not duplicate price - let (old_price, _) = bid::get_bid_info(bid_id); - let new_offer_price = old_price + price_delta; - // check if same price exists previously, only bid with a different price can enter the auction - assert!(!simple_map::contains_key(&auction.bids, &new_offer_price), error::already_exists(EBID_WITH_SAME_PRICE_EXISTS)); - - bid::increase_bid( - bidder, - bid_id, - price_delta, - &auction.listing - ); - } - - public fun create_bid_with_token_id( - bidder: &signer, - token_id: TokenId, - token_amount:u64, - offer_price: u64, - auction_id: u64, - withdraw_expiration_sec: u64, - ): BidId acquires Auctions { - let auctions = borrow_global_mut>(@marketplace); - assert!(table::contains(&mut auctions.all_active_auctions, auction_id), error::not_found(EAUCTION_NOT_EXIST)); - let auction = table::borrow_mut(&mut auctions.all_active_auctions, auction_id); - - // initialize token store when bidding - token::initialize_token_store(bidder); - - // get the listing info - // auction is still active - let now = timestamp::now_seconds(); - assert!(now <= listing::get_listing_expiration(&auction.listing), error::invalid_argument(EAUCTION_ENDED)); - - // check if same price exists previously, only bid with a different price can enter the auction - assert!(!simple_map::contains_key(&auction.bids, &offer_price), error::already_exists(EBID_WITH_SAME_PRICE_EXISTS)); - - // allow participant to withdraw coin 60 secs after auction ends, configurable by each marketplace - let bid_id = bid::bid( - bidder, - token_id, - token_amount, - offer_price * token_amount, - &auction.listing, - withdraw_expiration_sec, - vector[], - vector>[], - vector[], - ); - - event::emit_event( - &mut auctions.bid_event, - BidEvent { - market_address: @marketplace, - bid_id, - offer_price, - expiration_sec: withdraw_expiration_sec, - }, - ); - - // store the bid for this auction, only higher bid can enter the auction. - simple_map::add(&mut auction.bids, offer_price, bid_id); - vector::push_back(&mut auction.offer_numbers, offer_price); - bid_id - } - - /// Auction house owner can remove auction from inventory - public fun remove_auction(account: &signer, auction_id: u64): Auction acquires Auctions { - assert!(signer::address_of(account) == @marketplace, error::permission_denied(EONLY_AUCTION_HOUSE_OWNER_CAN_PERFORM_THIS_OPERATION)); - let auctions = borrow_global_mut>(@marketplace); - table::remove(&mut auctions.all_active_auctions, auction_id) - } - - /// Complete the auction, select the highest bid from existing bids and execute the bid against the listing - public entry fun complete_auction(account: &signer, auction_id: u64) acquires Auctions, AuctionHouseConfig { - assert!(signer::address_of(account) == @marketplace, error::permission_denied(EONLY_AUCTION_HOUSE_OWNER_CAN_PERFORM_THIS_OPERATION)); - let auctions = borrow_global_mut>(@marketplace); - assert!(table::contains(&auctions.all_active_auctions, auction_id), error::not_found(EAUCTION_NOT_EXIST)); - - let auction = table::borrow_mut(&mut auctions.all_active_auctions, auction_id); - let expiration_time = listing::get_listing_expiration(&auction.listing); - let now = timestamp::now_seconds(); - assert!(now >= expiration_time, error::invalid_state(EAUCTION_NOT_ENDED)); - - let config = borrow_global(@marketplace); - let auction = remove_auction(account, auction_id); - let highest_bid_id = find_highest_bid(&auction); - - let Auction { - listing, - bids, - offer_numbers: _, - } = auction; - - - if ( simple_map::length(&bids) > 0) { - // get the bid corresponding to highest price - bid::execute_listing_bid( - highest_bid_id, - listing, - config.fee_address, - config.market_fee_numerator, - config.market_fee_denominator, - ); - }; - } - - /// The same function exists in the marketplace bid utils. - /// Have this function here is to make the marketplace feature complete since the marketplace contract should also - /// allow users an entry function to withdraw coin - public entry fun withdraw_coin_from_bid( - bidder: &signer, - lister_addr: address, - listing_creation_number: u64, - ) { - bid::withdraw_coin_from_bid(bidder, lister_addr, listing_creation_number); - } - - /// bidder can remove their bid from the auction so that the bid won't participate in auction - /// This doesn't withdraw the actual coin from the bid - public entry fun cancel_bid_in_auction( - bidder: &signer, - auction_id: u64, - ) acquires Auctions { - let auctions = borrow_global_mut>(@marketplace); - assert!(table::contains(&auctions.all_active_auctions, auction_id), error::not_found(EAUCTION_NOT_EXIST)); - let auction = table::borrow_mut(&mut auctions.all_active_auctions, auction_id); - - let now = timestamp::now_seconds(); - assert!(now < listing::get_listing_expiration(&auction.listing), error::invalid_argument(EAUCTION_ENDED)); - - let listing_id = listing::get_listing_id(&auction.listing); - let bidder_address = signer::address_of(bidder); - let bid_id = create_bid_id(bidder_address, listing_id); - - let (offer_price, _) = bid::get_bid_info(bid_id); - assert!(simple_map::contains_key(&mut auction.bids, &offer_price), error::not_found(EBID_NOT_FOUND_FOR_AUCTION)); - assert!( - *simple_map::borrow(&mut auction.bids, &offer_price) == bid_id, - error::permission_denied(EBID_NOT_MATCH_ID_IN_AUCTION) - ); - - simple_map::remove(&mut auction.bids, &offer_price); - let (found, index) = vector::index_of(&mut auction.offer_numbers, &offer_price); - assert!(found, error::not_found(EBID_NOT_FOUND_FOR_AUCTION)); - vector::swap_remove(&mut auction.offer_numbers, index); - - event::emit_event( - &mut auctions.cancel_bid_events, - CancelBidEvent { - market_address: @marketplace, - bid_id, - }, - ); - } - - /// Get the listing id corresponding to a auction - public fun get_auction_listing_id(auction_id: u64): guid::ID acquires Auctions { - let auctions = borrow_global_mut>(@marketplace); - assert!(table::contains(&auctions.all_active_auctions, auction_id), error::not_found(EAUCTION_NOT_EXIST)); - let auction = table::borrow_mut(&mut auctions.all_active_auctions, auction_id); - - listing::get_listing_id(&auction.listing) - } - - fun find_highest_bid(auction: &Auction): BidId { - assert!(simple_map::length(&auction.bids) > 0, error::invalid_state(EAUCION_HAS_ZERO_BIDS)); - let highest_price = 0; - let ind = 0; - while (ind < vector::length(&auction.offer_numbers)) { - let price = *vector::borrow(&auction.offer_numbers, ind); - if (price > highest_price) { - highest_price = price; - }; - ind = ind + 1; - }; - assert!(highest_price > 0, error::invalid_state(EAUCTION_HIGHEST_BID_ZERO)); - *simple_map::borrow(&auction.bids, &highest_price) - } - - #[test(lister = @marketplace, bidder_a = @0xBB, bidder_b = @0xBA, framework = @0x1, house = @marketplace, fee_account = @0xa)] - public fun test_listing_one_and_two_bids( - lister: signer, - bidder_a: signer, - bidder_b: signer, - framework: signer, - house: signer, - fee_account: signer, - ) acquires Auctions, AuctionHouseConfig { - use aptos_framework::coin; - use aptos_framework::account; - timestamp::set_time_has_started_for_testing(&framework); - timestamp::update_global_time_for_test(1); - account::create_account_for_test(signer::address_of(&lister)); - account::create_account_for_test(signer::address_of(&bidder_a)); - account::create_account_for_test(signer::address_of(&bidder_b)); - account::create_account_for_test(signer::address_of(&framework)); - account::create_account_for_test(signer::address_of(&fee_account)); - - - // setup the auction house global fee config and config for each coin type - initialize_auction_house( - &house, - 1, - 100, - signer::address_of(&fee_account) - ); - initialize_auction(&house); - - // owner creats a listing - let token_id = token::create_collection_and_token( - &lister, - 2, - 2, - 2, - vector[], - vector>[], - vector[], - vector[false, false, false], - vector[false, false, false, false, true], - ); - let (creator, collection, name, version) = token::get_token_id_fields(&token_id); - create_auction( - &lister, - creator, - collection, - name, - version, - 1, - 1, - 1, - 2, - 2 + 10, - ); - - timestamp::update_global_time_for_test(1000000); - - coin::create_fake_money(&framework, &bidder_a, 1000); - coin::register(&bidder_b); - coin::register(&fee_account); - coin::transfer(&framework, signer::address_of(&bidder_a), 500); - coin::transfer(&framework, signer::address_of(&bidder_b), 500); - - bid( - &bidder_a, - creator, - collection, - name, - version, - 1, - 100, - 1, - 1 + 10, - ); - - bid( - &bidder_b, - creator, - collection, - name, - version, - 1, - 300, - 1, - 1 + 10, - ); - - timestamp::update_global_time_for_test(3000000); - - complete_auction(&house, 1); - - // highest bidder bidder B get the token - assert!(token::balance_of(signer::address_of(&bidder_b), token_id) == 1, 1); - assert!(token::balance_of(signer::address_of(&bidder_a), token_id) == 0, 1); - // 3 coin is paid for market fee and remaining is 297 - assert!(coin::balance(signer::address_of(&lister)) == 297, 1); - } - - #[test(lister = @marketplace, bidder_a = @0xBB, framework = @0x1, house = @marketplace)] - public fun test_cancel_bid( - lister: signer, - bidder_a: signer, - framework: signer, - house: signer, - ) acquires Auctions { - use aptos_framework::coin; - use aptos_framework::account; - timestamp::set_time_has_started_for_testing(&framework); - timestamp::update_global_time_for_test(1); - account::create_account_for_test(signer::address_of(&lister)); - account::create_account_for_test(signer::address_of(&bidder_a)); - account::create_account_for_test(signer::address_of(&framework)); - - // setup the auction house global fee config and config for each coin type - initialize_auction_house( - &house, - 1, - 100, - signer::address_of(&house) - ); - initialize_auction(&house); - - // owner creats a listing - let token_id = token::create_collection_and_token( - &lister, - 2, - 2, - 2, - vector[], - vector>[], - vector[], - vector[false, false, false], - vector[false, false, false, false, true], - ); - - let auction_id = create_auction_with_token_id( - &lister, - token_id, - 1, - 1, - 12, - 20, - 20 + 50, - ); - coin::create_fake_money(&framework, &bidder_a, 1000); - coin::transfer(&framework, signer::address_of(&bidder_a), 500); - - timestamp::update_global_time_for_test(12000000); - - create_bid_with_token_id( - &bidder_a, - token_id, - 1, - 100, - auction_id, - 20 + 50, - ); - - // bid_id_creation_number should be shown to users to allow them cancel the bid - cancel_bid_in_auction(&bidder_a, auction_id); - let auction = table::borrow( - &borrow_global>(signer::address_of(&house)).all_active_auctions, - auction_id - ); - assert!(simple_map::length(&auction.bids) == 0, 1); - timestamp::update_global_time_for_test(300000000); - - let listing_id = get_auction_listing_id(auction_id); - withdraw_coin_from_bid( - &bidder_a, - guid::id_creator_address(&listing_id), - guid::id_creation_num(&listing_id) - ); - } -} diff --git a/aptos-move/move-examples/marketplace/sources/marketplace_bid_utils.move b/aptos-move/move-examples/marketplace/sources/marketplace_bid_utils.move deleted file mode 100644 index 88efaecdfb2cf..0000000000000 --- a/aptos-move/move-examples/marketplace/sources/marketplace_bid_utils.move +++ /dev/null @@ -1,780 +0,0 @@ -/// An marketplace library providing basic function for buy and bid -/// To see how to use the library, please check the two example contract in the same folder -module marketplace::marketplace_bid_utils { - - use aptos_framework::account; - use aptos_framework::coin::{Self, Coin}; - use aptos_framework::event::{Self, EventHandle}; - use aptos_framework::timestamp; - use aptos_std::guid::{Self, ID}; - use aptos_std::table::{Self, Table}; - use aptos_token::token::{Self, TokenId}; - use marketplace::marketplace_listing_utils::{Self as listing_util, Listing, create_listing_id_raw}; - use std::signer; - use std::error; - use std::string::String; - use aptos_token::property_map::{Self, PropertyMap}; - - // - // Errors - // - - /// No sufficient fund to bid - const ENO_SUFFICIENT_FUND: u64 = 1; - - /// Token ID doesn't match - const ETOKEN_ID_NOT_MATCH: u64 = 2; - - /// Listing expired - const ELISTING_EXPIRED: u64 = 3; - - /// Listing hasn't started yet - const ELISTING_NOT_STARTED: u64 = 4; - - /// Token amount doesn't match - const ETOKEN_AMOUNT_NOT_MATCH: u64 = 5; - - /// Bid doesn't exist - const EBID_NOT_EXIST: u64 = 6; - - /// Cannot withdraw fund before bid expiration time - const ECANNOT_DRAW_FUND_BEFORE_EXPIRATION_TIME: u64 = 7; - - /// Listing Id doesn't match - const ELISTING_ID_NOT_MATCH: u64 = 8; - - /// The bidder has already bid for the same listing - const EBID_ID_EXISTS: u64 = 9; - - /// Buy from non-instant sale listing - const EBUY_NON_INSTANT_SALE_LISTING: u64 = 10; - - /// Cannot buy from expired listing - const EBUY_FROM_EXPIRED_LISTING: u64 = 11; - - /// Cannot buy from a listing that hasn't started - const EBUY_FROM_NOT_STARTED_LISTING: u64 = 12; - - /// hold the bid info and coin at user account - struct Bid has store { - id: BidId, - coin: Coin, - offer_price: u64, - expiration_sec: u64, - config: PropertyMap, - } - - /// This is the BidId used dedup the bid from the same signer for a listing - struct BidId has copy, drop, store { - bidder: address, - listing_id: ID, - } - - /// store all the bids by the user - struct BidRecords has key { - records: Table>, - bid_event: EventHandle>, - withdraw_bid_event: EventHandle>, - order_executed_event: EventHandle>, - increase_bid_event: EventHandle>, - } - - struct BidEvent has copy, drop, store { - offer_price: u64, - bid_id: BidId, - expiration_sec: u64, - } - - struct IncreaseBidEvent has copy, drop, store { - new_price: u64, - bid_id: BidId, - } - - struct WithdrawBidEvent has copy, drop, store { - bid_id: BidId, - } - - struct OrderExecutedEvent has copy, drop, store { - buyer: address, - lister_address: address, - listing_creation_number: u64, - executed_price: u64, - market_place_address: address, - } - - // - // entry functions - // - - /// Allow buyer to directly buy from a listing directly listed under an account without paying any fee - public entry fun buy_from_owner_with_fee( - buyer: &signer, - lister_address: address, - listing_creation_number: u64, - market_fee_address: address, - fee_numerator: u64, - fee_denominator: u64, - ) acquires BidRecords { - let entry = listing_util::remove_listing(lister_address, listing_creation_number); - buy_from_listing_with_fee(buyer, entry, market_fee_address, fee_numerator, fee_denominator); - } - - /// Bidder can withdraw the bid after the bid expires to get the coin back and store them in the coinstore - public entry fun withdraw_coin_from_bid( - bidder: &signer, - lister_addr: address, - listing_creation_number: u64, - ) acquires BidRecords { - let bidder_address = signer::address_of(bidder); - let listing_id = create_listing_id_raw(lister_addr, listing_creation_number); - let bid_id = create_bid_id(bidder_address, listing_id); - - let bid_records = borrow_global_mut>(bidder_address); - assert!(table::contains(&bid_records.records, bid_id), error::not_found(EBID_NOT_EXIST)); - - let bid = table::remove(&mut bid_records.records, bid_id); - assert!(timestamp::now_seconds() > bid.expiration_sec, error::permission_denied(ECANNOT_DRAW_FUND_BEFORE_EXPIRATION_TIME)); - - coin::deposit(bidder_address, clear_bid(bid)); - event::emit_event>( - &mut bid_records.withdraw_bid_event, - WithdrawBidEvent { - bid_id - }, - ); - } - - // - // public functions - // - - /// Buy from listings. This can be called by marketplace contracts with their own fee config and stored Listing - public fun buy_from_listing_with_fee( - buyer: &signer, - entry: Listing, - market_fund_address: address, - fee_numerator: u64, - fee_denominator: u64, - ) acquires BidRecords { - // assert the listing is active - let ( - id, - token_id, - listed_amount, - min_price, - instant_sale, - start_sec, - expiration_sec, - withdraw_cap, - _, - ) = listing_util::destroy_listing(entry); - let now = timestamp::now_seconds(); - assert!(now > start_sec, error::invalid_argument(EBUY_FROM_NOT_STARTED_LISTING)); - assert!(now < expiration_sec, error::invalid_argument(EBUY_FROM_EXPIRED_LISTING)); - - // listing is instant sale - assert!(instant_sale, error::invalid_argument(EBUY_NON_INSTANT_SALE_LISTING)); - - // assert the buyer has sufficient balance - let buyer_addr = signer::address_of(buyer); - let required_balance = min_price * listed_amount; - // check bidder has sufficient balance - assert!(coin::balance(buyer_addr) >= required_balance, error::invalid_argument(ENO_SUFFICIENT_FUND)); - initialize_bid_records(buyer); - - // swap the coin and token - let token = token::withdraw_with_capability( - withdraw_cap - ); - token::deposit_token(buyer, token); - - let coins = coin::withdraw(buyer, required_balance); - - // deduct royalty fee from the transactions - let royalty = token::get_royalty(token_id); - let royalty_payee = token::get_royalty_payee(&royalty); - let royalty_coin = deduct_fee( - &mut coins, - token::get_royalty_numerator(&royalty), - token::get_royalty_denominator(&royalty) - ); - coin::deposit(royalty_payee, royalty_coin); - - // deduct marketplace fee - let market_fee = deduct_fee(&mut coins, fee_numerator, fee_denominator); - coin::deposit(market_fund_address, market_fee); - - // give the remaining to the seller - let token_owner = guid::id_creator_address(&id); - coin::deposit(token_owner, coins); - - emit_order_executed_event( - buyer_addr, - token_owner, - guid::id_creation_num(&id), - min_price, - market_fund_address, - ); - } - - public fun initialize_bid_records(bidder: &signer) { - let owner_addr = signer::address_of(bidder); - - if (!exists>(owner_addr)) { - move_to( - bidder, - BidRecords { - records: table::new(), - bid_event: account::new_event_handle>(bidder), - withdraw_bid_event: account::new_event_handle>(bidder), - increase_bid_event: account::new_event_handle>(bidder), - order_executed_event: account::new_event_handle>(bidder), - } - ); - }; - } - - /// withdraw the coin and store them in bid struct and return a global unique bid id - public fun bid( - bidder: &signer, - token_id: TokenId, - token_amount:u64, - offer_price: u64, - entry: &Listing, - expiration_sec: u64, - keys: vector, - values: vector>, - types: vector, - - ): BidId acquires BidRecords { - initialize_bid_records(bidder); - let bidder_address = signer::address_of(bidder); - // check the bid is legit for the listing - let total_coin_amount = offer_price * token_amount; // the total coin offerred by the bidder - // check bidder has sufficient balance - assert!(coin::balance(bidder_address) >= total_coin_amount, error::invalid_argument(ENO_SUFFICIENT_FUND)); - assert_bid_parameters(token_id, total_coin_amount, token_amount, entry, timestamp::now_seconds()); - - - // assert the bid_id not exist in the bid records - initialize_bid_records(bidder); - let bid_records = borrow_global_mut>(bidder_address); - let bid_id = create_bid_id(bidder_address, listing_util::get_listing_id(entry)); - assert!(!table::contains(&bid_records.records, bid_id), error::already_exists(EBID_ID_EXISTS)); - - // withdraw the coin and store them in escrow to ensure the fund is avaliable until expiration_sec - let coin = coin::withdraw(bidder, total_coin_amount); - - let bid = Bid { - id: bid_id, - coin, - offer_price, - expiration_sec, - config: property_map::new(keys, values, types), - }; - - table::add(&mut bid_records.records, bid_id, bid); - event::emit_event>( - &mut bid_records.bid_event, - BidEvent { - offer_price, - bid_id, - expiration_sec, - }, - ); - // opt-in direct transfer to receive token without signer - token::opt_in_direct_transfer(bidder, true); - - bid_id - } - - /// Allow the bid to increase the coin for an existing bid - public fun increase_bid( - bidder: &signer, - bid_id: BidId, - price_delta: u64, - entry: &Listing, - ) acquires BidRecords { - let bidder_address = signer::address_of(bidder); - - let bid_records = borrow_global_mut>(bidder_address); - assert!(table::contains(&bid_records.records, bid_id), error::not_found(EBID_NOT_EXIST)); - - let listing_id = listing_util::get_listing_id(entry); - assert!(bid_id.listing_id == listing_id, error::invalid_argument(ELISTING_ID_NOT_MATCH)); - - // check the bid is legit for the listing - let token_amount = listing_util::get_listing_token_amount(entry); - let added_amount = price_delta * token_amount; - // check bidder has sufficient balance - assert!(coin::balance(bidder_address) >= added_amount, error::invalid_argument(ENO_SUFFICIENT_FUND)); - - // add coin to the bid and update its info - let added_coin = coin::withdraw(bidder, added_amount); - let bid = table::borrow_mut(&mut bid_records.records, bid_id); - bid.offer_price = bid.offer_price + price_delta; - coin::merge(&mut bid.coin, added_coin); - - event::emit_event>( - &mut bid_records.increase_bid_event, - IncreaseBidEvent { - new_price: bid.offer_price, - bid_id, - }, - ); - } - - - /// execute a bid to a listing, no signer required to perform this function - /// pay fee to 3rd party based on a percentage - /// deduct royalty and send to the payee account - /// only the listing owner can execute the bid - public fun execute_listing_bid( - bid_id: BidId, - entry: Listing, - market_fund_address: address, - fee_numerator: u64, - fee_denominator: u64, - ) acquires BidRecords { - let bid_records = &mut borrow_global_mut>(bid_id.bidder).records; - assert!(table::contains(bid_records, bid_id), error::not_found(EBID_NOT_EXIST)); - let bid = table::borrow(bid_records, bid_id); - let ( - id, - token_id, - listed_amount, - min_price, - _, - _, - expiration_sec, - withdraw_cap, - _, - ) = listing_util::destroy_listing(entry); - let coin_owner = bid.id.bidder; - // validate offerred amount and price - let min_total = min_price * listed_amount; - assert!(coin::value(&bid.coin) >= min_total, error::invalid_argument(ENO_SUFFICIENT_FUND)); - // validate expiration time - let now = timestamp::now_seconds(); - assert!(now >= expiration_sec, error::invalid_argument(ELISTING_EXPIRED)); - //listing_id matches - assert!(id == bid.id.listing_id, error::invalid_argument(ELISTING_ID_NOT_MATCH)); - - // transfer coin and token - let token = token::withdraw_with_capability( - withdraw_cap - ); - - token::direct_deposit_with_opt_in(coin_owner, token); - - let bid_mut = table::remove(bid_records, bid_id); - let offer_price = bid_mut.offer_price; - let coins = clear_bid(bid_mut); - - // deduct royalty fee from the transactions - let royalty = token::get_royalty(token_id); - let royalty_payee = token::get_royalty_payee(&royalty); - let royalty_coin = deduct_fee( - &mut coins, - token::get_royalty_numerator(&royalty), - token::get_royalty_denominator(&royalty) - ); - coin::deposit(royalty_payee, royalty_coin); - - // deduct marketplace fee - let market_fee = deduct_fee(&mut coins, fee_numerator, fee_denominator); - coin::deposit(market_fund_address, market_fee); - - // give the remaining to the seller - let token_owner = guid::id_creator_address(&id); - coin::deposit(token_owner, coins); - - emit_order_executed_event( - coin_owner, - token_owner, - guid::id_creation_num(&id), - offer_price, - market_fund_address, - ); - } - - /// validate if bid is legit for a listing. - public fun assert_bid_parameters( - token_id: TokenId, - offer_price: u64, - token_amount: u64, - entry: &Listing, - bid_time: u64, - ) { - // validate token_id match - assert!(token_id == listing_util::get_listing_token_id(entry), error::invalid_argument(ETOKEN_ID_NOT_MATCH)); - // validate offerred amount and price - let listed_amount = listing_util::get_listing_token_amount(entry); - let min_total = listing_util::get_listing_min_price(entry) * listed_amount; - let total_coin_amount = offer_price * token_amount; - assert!(total_coin_amount >= min_total, ENO_SUFFICIENT_FUND); - assert!(token_amount == listed_amount, ETOKEN_AMOUNT_NOT_MATCH); - assert!(bid_time >= listing_util::get_listing_start(entry), error::invalid_argument(ELISTING_NOT_STARTED)); - assert!(bid_time <= listing_util::get_listing_expiration(entry), error::invalid_argument(ELISTING_EXPIRED)); - } - - public fun get_bid_info( - bid_id: BidId - ): (u64, u64) acquires BidRecords { - let bid_records = &mut borrow_global_mut>(bid_id.bidder).records; - assert!(table::contains(bid_records, bid_id), error::not_found(EBID_NOT_EXIST)); - - let bid = table::borrow(bid_records, bid_id); - (bid.offer_price, bid.expiration_sec) - } - - /// internal function for assigned a global unique id for a listing - public fun create_bid_id(bidder: address, listing_id: ID): BidId { - BidId { - bidder, - listing_id, - } - } - - /// get bidder address from BidId - public fun get_bid_id_address(bid_id: &BidId): address { - bid_id.bidder - } - - /// get bidder listing id from BidId - public fun get_bid_id_listing_id(bid_id: &BidId): ID { - bid_id.listing_id - } - - // - // Private or friend functions - // - - /// destruct the bid struct and extract coins - fun clear_bid(bid: Bid): Coin { - let Bid { - id: _, - coin, - offer_price: _, - expiration_sec: _, - config: _ - } = bid; - coin - } - - fun emit_order_executed_event( - buyer: address, - lister_address: address, - listing_creation_number: u64, - executed_price: u64, - market_place_address: address, - ) acquires BidRecords { - let records = borrow_global_mut>(buyer); - event::emit_event>( - &mut records.order_executed_event, - OrderExecutedEvent { - buyer, - lister_address, - listing_creation_number, - executed_price, - market_place_address, - }, - ); - } - - fun deduct_fee( - total_coin: &mut Coin, - fee_numerator: u64, - fee_denominator: u64 - ): Coin { - let value = coin::value(total_coin); - let fee = if (fee_denominator == 0) { - 0 - } else { - value * fee_numerator/ fee_denominator - }; - coin::extract(total_coin, fee) - } - - #[test_only] - public fun test_aution_setup( - owner: &signer, - bidder_a: &signer, - aptos_framework: &signer, - use_wrong_coin_amount: bool, - use_wrong_token_amount: bool, - ): (BidId, Listing) acquires BidRecords { - timestamp::set_time_has_started_for_testing(aptos_framework); - timestamp::update_global_time_for_test(11000000); - - account::create_account_for_test(signer::address_of(owner)); - account::create_account_for_test(signer::address_of(bidder_a)); - account::create_account_for_test(signer::address_of(aptos_framework)); - - - // owner creats a listing - let token_id = token:: create_collection_and_token( - owner, - 2, - 2, - 2, - vector[], - vector>[], - vector[], - vector[false, false, false], - vector[false, false, false, false, false], - ); - let entry = listing_util::create_listing( - owner, - token_id, - 1, - 2, - false, - 0, - 100, - 200, - vector[], - vector>[], - vector[], - ); - - coin::create_fake_money(aptos_framework, bidder_a, 100); - coin::transfer(aptos_framework, signer::address_of(bidder_a), 100); - //assert!(signer::address_of(&owner) == @0x1, 1); - - token::initialize_token_store(bidder_a); - coin::register(owner); - let token_amount = if (use_wrong_token_amount) { 10 } else {1}; - let offered_price = if (use_wrong_coin_amount) {1} else {10}; - let bid_1 = bid( - bidder_a, - token_id, - token_amount, - offered_price, - &entry, - 100000001, - vector[], - vector>[], - vector[], - ); - (bid_1, entry) - } - - #[test(owner = @0xFE, bidder_a = @0xBC, aptos_framework = @aptos_framework)] - public fun test_successful( - owner: signer, - bidder_a: signer, - aptos_framework: signer - ) acquires BidRecords { - let (bid_id, entry) = test_aution_setup( - &owner, - &bidder_a, - &aptos_framework, - false, - false, - ); - let lister = listing_util::get_listing_creator(&entry); - timestamp::update_global_time_for_test(100000000); - execute_listing_bid(bid_id, entry,@aptos_framework, 10, 100); - - // listing owner get paid with a deduction of market fee - // 1 * 10 - (1 * 10) * (10 / 100) - assert!(coin::balance(lister) == 9, 1); - } - - #[test(owner = @marketplace, bidder_a = @0xBB, aptos_framework = @aptos_framework)] - #[expected_failure(abort_code = 1, location = marketplace::marketplace_bid_utils)] - public fun test_wrong_coin_amount( - owner: signer, - bidder_a: signer, - aptos_framework: signer - ) acquires BidRecords { - let (bid_id, entry) = test_aution_setup( - &owner, - &bidder_a, - &aptos_framework, - true, - false, - ); - timestamp::update_global_time_for_test(100000000); - execute_listing_bid(bid_id, entry, @aptos_framework, 0, 1); - } - - #[test(owner = @marketplace, bidder_a = @0xBB, aptos_framework = @aptos_framework)] - #[expected_failure(abort_code = 5, location = marketplace::marketplace_bid_utils)] - public fun test_wrong_token_amount( - owner: signer, - bidder_a: signer, - aptos_framework: signer - ) acquires BidRecords { - let (bid_id, entry) = test_aution_setup( - &owner, - &bidder_a, - &aptos_framework, - false, - true, - ); - timestamp::update_global_time_for_test(100000000); - execute_listing_bid(bid_id, entry, @aptos_framework, 0, 1); - } - - #[test(owner = @marketplace, bidder_a = @0xBB, aptos_framework = @aptos_framework)] - public fun test_increase_bid( - owner: signer, - bidder_a: signer, - aptos_framework: signer - ) acquires BidRecords { - let (bid_id, entry) = test_aution_setup( - &owner, - &bidder_a, - &aptos_framework, - false, - false, - ); - increase_bid(&bidder_a, bid_id, 10, &entry); - - assert!(coin::balance(signer::address_of(&bidder_a)) == 80, 1); - } - - #[test_only] - public fun test_instant_sale_setup( - owner: &signer, - buyer: &signer, - aptos_framework: &signer, - start_sec: u64, - end_sec: u64, - ): (Listing, TokenId) { - timestamp::set_time_has_started_for_testing(aptos_framework); - - account::create_account_for_test(signer::address_of(owner)); - account::create_account_for_test(signer::address_of(buyer)); - account::create_account_for_test(signer::address_of(aptos_framework)); - - - // owner creats a listing - let token_id = token::create_collection_and_token( - owner, - 2, - 2, - 2, - vector[], - vector>[], - vector[], - vector[false, false, false], - vector[false, false, false, false, false], - ); - let entry = listing_util::create_listing( - owner, - token_id, - 1, - 100, - true, - start_sec, - end_sec, - end_sec + 1, // token transfer happens immedidately after buying - vector[], - vector>[], - vector[], - ); - - coin::create_fake_money(aptos_framework, buyer, 100); - coin::transfer(aptos_framework, signer::address_of(buyer), 100); - //assert!(signer::address_of(&owner) == @0x1, 1); - token::initialize_token_store(buyer); - coin::register(owner); - (entry, token_id) - } - - #[test(owner = @marketplace, buyer = @0xBB, framework = @aptos_framework, market = @0x33)] - fun test_buy_successful( - owner: &signer, - buyer: &signer, - framework: &signer, - market: &signer, - ) acquires BidRecords { - account::create_account_for_test(signer::address_of(market)); - coin::register(market); - - let (entry, token_id) = test_instant_sale_setup(owner, buyer, framework, 1, 10); - timestamp::update_global_time_for_test(2000000); - - let owner_addr = signer::address_of(owner); - let buyer_addr = signer::address_of(buyer); - buy_from_listing_with_fee( - buyer, - entry, - signer::address_of(market), - 1, - 100, - ); - - // assert the token and coin are transferred as expected - assert!(token::balance_of(owner_addr, token_id) == 1, 1); - assert!(token::balance_of(buyer_addr, token_id) == 1, 1); - assert!(coin::balance(buyer_addr) == 0, 1); - // 1 % is paid as market fee - assert!(coin::balance(owner_addr) == 99, 1); - } - - #[test(owner = @0x12, buyer = @0x34, framework = @aptos_framework)] - #[expected_failure(abort_code = 65538, location = aptos_framework::timestamp)] - fun test_buy_before_start( - owner: &signer, - buyer: &signer, - framework: &signer, - ) acquires BidRecords { - let (entry, _) = test_instant_sale_setup(owner, buyer, framework, 1, 10); - timestamp::update_global_time_for_test(0); - - buy_from_listing_with_fee( - buyer, - entry, - signer::address_of(framework), - 1, - 100, - ); - } - - #[test(owner = @0x12, buyer = @0x34, framework = @aptos_framework)] - #[expected_failure(abort_code = 65547, location = marketplace::marketplace_bid_utils)] - fun test_buy_after_expire( - owner: &signer, - buyer: &signer, - framework: &signer, - ) acquires BidRecords { - let (entry, _) = test_instant_sale_setup(owner, buyer, framework, 1, 10); - timestamp::update_global_time_for_test(30000000); - - buy_from_listing_with_fee( - buyer, - entry, - signer::address_of(framework), - 1, - 100, - ); - } - - #[test(owner = @marketplace, bidder_a = @0xBB, framework = @aptos_framework, buyer = @0xee)] - #[expected_failure(abort_code = 65546, location = marketplace::marketplace_bid_utils)] - fun test_buy_from_auction_listing( - owner: &signer, - bidder_a: &signer, - framework: &signer, - buyer: &signer, - ) acquires BidRecords { - let (_, entry) = test_aution_setup( - owner, - bidder_a, - framework, - false, - false, - ); - buy_from_listing_with_fee( - buyer, - entry, - signer::address_of(framework), - 1, - 100, - ); - } -} diff --git a/aptos-move/move-examples/marketplace/sources/marketplace_instant_sale_example.move b/aptos-move/move-examples/marketplace/sources/marketplace_instant_sale_example.move deleted file mode 100644 index 79dd8502a0735..0000000000000 --- a/aptos-move/move-examples/marketplace/sources/marketplace_instant_sale_example.move +++ /dev/null @@ -1,85 +0,0 @@ -/// This is an example demonstrating how to use marketplace_bid_utils and market_place_listing_utils to build an auction house -/// This example shows how to build a decentralized marketplace where listing are stored under owner's account -/// Note: the buyer can buy from any listing that is stored under owners' account -/// For more detailed description, check readme -module marketplace::marketplace_instant_sale_example { - use std::string::String; - use aptos_std::table::Table; - use marketplace::marketplace_listing_utils::{Self as listing_utils, Listing}; - use marketplace::marketplace_bid_utils::{Self as bid_utils}; - - use aptos_framework::guid::ID; - - - struct Config has key { - market_fee_numerator: u64, - market_fee_denominator: u64, - fee_address: address, - } - - public entry fun initialize_market( - account: &signer, - market_fee_numerator: u64, - market_fee_denominator: u64, - fee_address: address, - ) { - move_to( - account, - Config { - market_fee_denominator, - market_fee_numerator, - fee_address, - } - ); - } - - struct Listings has key { - all_active_Listings: Table>, - } - - public entry fun creat_listing( - owner: &signer, - creator: address, - collection_name: String, - token_name: String, - property_version: u64, - amount: u64, - min_price: u64, - start_sec: u64, - expiration_sec: u64, - withdraw_expiration_sec: u64, - ) { - listing_utils::direct_listing( - owner, - creator, - collection_name, - token_name, - property_version, - amount, - min_price, - true, - start_sec, - expiration_sec, - withdraw_expiration_sec, - ); - } - - public entry fun buy_listing( - buyer: &signer, - lister_address: address, - listing_creation_number: u64, - ) acquires Config { - // charge fee for the aggregator - let config = borrow_global(@marketplace); - - // buy the token from owner directly - bid_utils::buy_from_owner_with_fee( - buyer, - lister_address, - listing_creation_number, - config.fee_address, - config.market_fee_numerator, - config.market_fee_denominator, - ); - } -} diff --git a/aptos-move/move-examples/marketplace/sources/marketplace_listing_utils.move b/aptos-move/move-examples/marketplace/sources/marketplace_listing_utils.move deleted file mode 100644 index 78a35bf2cd199..0000000000000 --- a/aptos-move/move-examples/marketplace/sources/marketplace_listing_utils.move +++ /dev/null @@ -1,377 +0,0 @@ -/// An marketplace library providing basic function for listing NFTs -/// To see how to use the library, please check the two example contract in the same folder -module marketplace::marketplace_listing_utils { - use std::error; - use std::signer; - use std::string::String; - use aptos_framework::account; - use aptos_framework::event::{Self, EventHandle}; - use aptos_std::table::{Self, Table}; - use aptos_std::guid::{Self, ID}; - use aptos_token::token::{Self, TokenId, WithdrawCapability}; - use aptos_token::property_map::{Self, PropertyMap}; - - friend marketplace::marketplace_bid_utils; - - - // - // Errors - // - - /// Not enough token to list - const EOWNER_NOT_HAVING_ENOUGH_TOKEN: u64 = 1; - - /// Listing doesn't exist - const ELISTING_NOT_EXIST:u64 = 2; - - /// Withdraw time should be longer than listing time - const EWITHDRAW_EXPIRE_TIME_SHORT_THAN_LISTING_TIME: u64 = 3; - - /// Start time should be less than expire time - const ESTART_TIME_LARGER_THAN_EXPIRE_TIME: u64 = 4; - - /// Listing zero token - const ELISTING_ZERO_TOKEN: u64 = 5; - - - /// immutable struct for recording listing info. - struct Listing has drop, store { - id: ID, - token_id: TokenId, - amount: u64, - min_price: u64, - instant_sale: bool, // true for marketplace and false for auction - start_sec: u64, // timestamp in secs for the listing starting time - expiration_sec: u64, // timestamp in secs for the listing expiration time - withdraw_cap: WithdrawCapability, - config: PropertyMap, - } - - struct ListingEvent has copy, drop, store { - id: ID, - token_id: TokenId, - amount: u64, - min_price: u64, - instant_sale: bool, - start_sec: u64, - expiration_sec: u64, - withdraw_sec: u64, - market_address: address, - config: PropertyMap, - } - - struct CancelListingEvent has copy, drop, store { - id: ID, - market_address: address, - } - - /// store listings on the owner's account - struct ListingRecords has key { - records: Table>, - listing_event: EventHandle, - cancel_listing_event: EventHandle, - } - - // - // entry functions - // - - /// creator uses this function to directly list token for sale under their own accounts - public entry fun direct_listing( - owner: &signer, - creator: address, - collection_name: String, - token_name: String, - property_version: u64, - amount: u64, - min_price: u64, - instant_sale: bool, // indicate if this listing is for sale or for auction - start_sec: u64, - expiration_sec: u64, - withdraw_expiration_sec: u64, - ) acquires ListingRecords { - let token_id = token::create_token_id_raw(creator, collection_name, token_name, property_version); - create_list_under_user_account( - owner, - token_id, - amount, - min_price, - instant_sale, - start_sec, - expiration_sec, - withdraw_expiration_sec, - ); - } - - /// remove a listing for the direct listing records - public entry fun cancel_direct_listing( - owner: &signer, - listing_id_creation_number: u64 - ) acquires ListingRecords { - let listing_id = guid::create_id(signer::address_of(owner), listing_id_creation_number); - let owner_addr = signer::address_of(owner); - let records = borrow_global_mut>(owner_addr); - assert!(table::contains(&records.records, listing_id), error::not_found(ELISTING_NOT_EXIST)); - table::remove(&mut records.records, listing_id); - - event::emit_event( - &mut records.cancel_listing_event, - CancelListingEvent { - id: listing_id, - market_address: signer::address_of(owner), - }, - ); - } - - // - // public functions - // - - /// Return a listing struct, marketplace owner can use this function to create a listing and store it in its inventory - public fun create_listing( - owner: &signer, - token_id: TokenId, - amount: u64, - min_price: u64, - instant_sale: bool, - start_sec: u64, - listing_expiration_sec: u64, - withdraw_expiration_sec: u64, // The end time when the listed token can be withdrawn. - keys: vector, - values: vector>, - types: vector, - ): Listing { - let owner_addr = signer::address_of(owner); - assert!(listing_expiration_sec > start_sec, error::invalid_argument(ESTART_TIME_LARGER_THAN_EXPIRE_TIME)); - assert!(token::balance_of(owner_addr, token_id) >= amount, error::invalid_argument(EOWNER_NOT_HAVING_ENOUGH_TOKEN)); - assert!(withdraw_expiration_sec > listing_expiration_sec, error::invalid_argument(EWITHDRAW_EXPIRE_TIME_SHORT_THAN_LISTING_TIME)); - assert!(amount > 0, error::invalid_argument(ELISTING_ZERO_TOKEN)); - Listing { - id: create_listing_id(owner), - token_id, - amount, - min_price, - instant_sale, - start_sec, - expiration_sec: listing_expiration_sec, - withdraw_cap: token::create_withdraw_capability(owner, token_id, amount, withdraw_expiration_sec), - config: property_map::new(keys, values, types), - } - } - - public fun initialize_listing_records(owner: &signer){ - let owner_addr = signer::address_of(owner); - - if (!exists>(owner_addr)) { - move_to( - owner, - ListingRecords { - records: table::new(), - listing_event: account::new_event_handle(owner), - cancel_listing_event: account::new_event_handle(owner), - } - ); - }; - } - - public fun create_list_under_user_account( - owner: &signer, - token_id: TokenId, - amount: u64, - min_price: u64, - instant_sale: bool, - start_sec: u64, - expiration_sec: u64, - withdraw_expiration_sec: u64, - ): ID acquires ListingRecords { - let owner_addr = signer::address_of(owner); - let record = create_listing( - owner, - token_id, - amount, - min_price, - instant_sale, - start_sec, - expiration_sec, - withdraw_expiration_sec, - vector[], - vector>[], - vector[], - ); - initialize_listing_records(owner); - let records = borrow_global_mut>(owner_addr); - - let id = create_listing_id(owner); - // add a new record to the listing - table::add(&mut records.records, id, record); - event::emit_event( - &mut records.listing_event, - ListingEvent { - id, - token_id, - amount, - min_price, - instant_sale, - start_sec, - expiration_sec, - withdraw_sec: withdraw_expiration_sec, - market_address: owner_addr, - config: property_map::empty(), - }, - ); - id - } - - public fun destroy_listing(entry: Listing): ( - ID, - TokenId, - u64, - u64, - bool, - u64, - u64, - WithdrawCapability, - PropertyMap, - ){ - let Listing { - id, - token_id, - amount, - min_price, - instant_sale, - start_sec, - expiration_sec, - withdraw_cap, - config, - } = entry; - (id, token_id, amount, min_price, instant_sale, start_sec, expiration_sec, withdraw_cap, config) - } - - /// util function for constructing the listing id from raw fields - public fun create_listing_id_raw(lister: address, listing_creation_number: u64): ID { - guid::create_id(lister, listing_creation_number) - } - - public fun get_listing_id(list: &Listing): ID { - list.id - } - - public fun get_listing_id_tuple(list: &Listing): (u64, address) { - let id = list.id; - (guid::id_creation_num(&id), guid::id_creator_address(&id)) - } - - public fun get_listing_creator(list: &Listing): address { - guid::id_creator_address(&list.id) - } - - public fun get_listing_token_id(list: &Listing): TokenId { - list.token_id - } - - public fun get_listing_expiration(list: &Listing): u64 { - list.expiration_sec - } - - public fun get_listing_start(list: &Listing): u64 { - list.start_sec - } - - public fun get_listing_min_price(list: &Listing): u64 { - list.min_price - } - - public fun get_listing_token_amount(list: &Listing): u64 { - list.amount - } - - public fun get_listing_instant_sale(list: &Listing): bool { - list.instant_sale - } - - public fun create_listing_event( - id: ID, - token_id: TokenId, - amount: u64, - min_price: u64, - instant_sale: bool, - start_sec: u64, - expiration_sec: u64, - withdraw_sec: u64, - market_address: address, - config: PropertyMap - ): ListingEvent { - ListingEvent { - id, token_id, amount, min_price, instant_sale, start_sec, expiration_sec, withdraw_sec, market_address, config - } - } - - /// Get the read-only listing reference from listing stored on user account. - public fun get_listing_info( - lister_address: address, - listing_creation_number: u64 - ): (TokenId, u64, u64, bool, u64, u64) acquires ListingRecords { - let listing_id = guid::create_id(lister_address, listing_creation_number); - let records = borrow_global_mut>(lister_address); - assert!(table::contains(&records.records, listing_id), error::not_found(ELISTING_NOT_EXIST)); - let listing = table::borrow(&records.records, listing_id); - ( - listing.token_id, - listing.amount, - listing.min_price, - listing.instant_sale, - listing.start_sec, - listing.expiration_sec, - ) - } - - // - // Private or friend functions - // - - /// internal function for creating a new unique id for a listing - fun create_listing_id(owner: &signer): ID { - let gid = account::create_guid(owner); - guid::id(&gid) - } - - /// Get the listing struct which contains withdraw_capability - /// This function should stay friend to prevent Listing be exposed to un-trusted module - public(friend) fun remove_listing(lister_address: address, listing_creation_number: u64): Listing acquires ListingRecords { - let listing_id = guid::create_id(lister_address, listing_creation_number); - let records = borrow_global_mut>(lister_address); - assert!(table::contains(&records.records, listing_id), error::not_found(ELISTING_NOT_EXIST)); - table::remove(&mut records.records, listing_id) - } - - - #[test(owner = @marketplace)] - public fun test_cancel_listing(owner: signer)acquires ListingRecords { - use aptos_framework::coin; - - account::create_account_for_test(signer::address_of(&owner)); - let token_id = token::create_collection_and_token( - &owner, - 1, - 2, - 1, - vector[], - vector>[], - vector[], - vector[false, false, false], - vector[false, false, false, false, false], - ); - let listing_id = create_list_under_user_account( - &owner, - token_id, - 1, - 1, - false, - 0, - 10000, - 10001, - ); - cancel_direct_listing(&owner, guid::id_creation_num(&listing_id)); - } - -} diff --git a/aptos-move/move-examples/marketplace/sources/test_utils.move b/aptos-move/move-examples/marketplace/sources/test_utils.move new file mode 100644 index 0000000000000..76ce5276bcb21 --- /dev/null +++ b/aptos-move/move-examples/marketplace/sources/test_utils.move @@ -0,0 +1,142 @@ +#[test_only] +module marketplace::test_utils { + use std::signer; + use std::string; + use std::vector; + + use aptos_framework::account; + use aptos_framework::aptos_coin::{Self, AptosCoin}; + use aptos_framework::coin; + use aptos_framework::object::{Self, Object}; + use aptos_framework::timestamp; + + use aptos_token::token as tokenv1; + use aptos_token_objects::token::Token; + use aptos_token_objects::aptos_token; + + use marketplace::fee_schedule::{Self, FeeSchedule}; + + public inline fun setup( + aptos_framework: &signer, + marketplace: &signer, + seller: &signer, + purchaser: &signer, + ): (address, address, address) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let (burn_cap, mint_cap) = aptos_coin::initialize_for_test(aptos_framework); + + let marketplace_addr = signer::address_of(marketplace); + account::create_account_for_test(marketplace_addr); + coin::register(marketplace); + + let seller_addr = signer::address_of(seller); + account::create_account_for_test(seller_addr); + coin::register(seller); + + let purchaser_addr = signer::address_of(purchaser); + account::create_account_for_test(purchaser_addr); + coin::register(purchaser); + + let coins = coin::mint(10000, &mint_cap); + coin::deposit(seller_addr, coins); + let coins = coin::mint(10000, &mint_cap); + coin::deposit(purchaser_addr, coins); + + coin::destroy_burn_cap(burn_cap); + coin::destroy_mint_cap(mint_cap); + + (marketplace_addr, seller_addr, purchaser_addr) + } + + public fun fee_schedule(seller: &signer): Object { + fee_schedule::init_internal( + seller, + signer::address_of(seller), + 2, + 1, + 100, + 1, + ) + } + + public inline fun increment_timestamp(seconds: u64) { + timestamp::update_global_time_for_test(timestamp::now_microseconds() + (seconds * 1000000)); + } + + public fun mint_tokenv2(seller: &signer): Object { + let seller_addr = signer::address_of(seller); + let collection_name = string::utf8(b"collection_name"); + let token_creation_num = account::get_guid_next_creation_num(seller_addr); + + aptos_token::create_collection( + seller, + string::utf8(b"collection description"), + 1, + collection_name, + string::utf8(b"collection uri"), + true, + true, + true, + true, + true, + true, + true, + true, + true, + 1, + 100, + ); + + aptos_token::mint( + seller, + collection_name, + string::utf8(b"description"), + string::utf8(b"token_name"), + string::utf8(b"uri"), + vector::empty(), + vector::empty(), + vector::empty(), + ); + + let obj_addr = object::create_guid_object_address(seller_addr, token_creation_num); + object::address_to_object(obj_addr) + } + + public fun mint_tokenv1(seller: &signer): tokenv1::TokenId { + let collection_name = string::utf8(b"collection_name"); + let token_name = string::utf8(b"token_name"); + + tokenv1::create_collection( + seller, + collection_name, + string::utf8(b"Collection: Hello, World"), + string::utf8(b"https://aptos.dev"), + 1, + vector[true, true, true], + ); + + tokenv1::create_token_script( + seller, + collection_name, + token_name, + string::utf8(b"Hello, Token"), + 1, + 1, + string::utf8(b"https://aptos.dev"), + signer::address_of(seller), + 100, + 1, + vector[true, true, true, true, true], + vector::empty(), + vector::empty(), + vector::empty(), + ); + + tokenv1::create_token_id_raw( + signer::address_of(seller), + collection_name, + token_name, + 0, + ) + } +} diff --git a/aptos-move/move-examples/post_mint_reveal_nft/sources/bucket_table.move b/aptos-move/move-examples/post_mint_reveal_nft/sources/bucket_table.move index 3a0bbd3113f0a..fa1d601fb0535 100644 --- a/aptos-move/move-examples/post_mint_reveal_nft/sources/bucket_table.move +++ b/aptos-move/move-examples/post_mint_reveal_nft/sources/bucket_table.move @@ -71,13 +71,10 @@ module post_mint_reveal_nft::bucket_table { let hash = sip_hash_from_value(&key); let index = bucket_index(map.level, map.num_buckets, hash); let bucket = table_with_length::borrow_mut(&mut map.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow(bucket, i); + vector::for_each_ref(bucket, |entry|{ + let entry: &Entry = entry; assert!(&entry.key != &key, error::invalid_argument(EALREADY_EXIST)); - i = i + 1; - }; + }); vector::push_back(bucket, Entry {hash, key, value}); map.len = map.len + 1; @@ -180,16 +177,10 @@ module post_mint_reveal_nft::bucket_table { public fun contains(map: &BucketTable, key: &K): bool { let index = bucket_index(map.level, map.num_buckets, sip_hash_from_value(key)); let bucket = table_with_length::borrow(&map.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow(bucket, i); - if (&entry.key == key) { - return true - }; - i = i + 1; - }; - false + vector::any(bucket, |entry| { + let entry: &Entry = entry; + &entry.key == key + }) } /// Remove from `table` and return the value which `key` maps to. diff --git a/aptos-move/move-examples/post_mint_reveal_nft/sources/minting.move b/aptos-move/move-examples/post_mint_reveal_nft/sources/minting.move index 49e0147f5d8aa..0940e28232260 100644 --- a/aptos-move/move-examples/post_mint_reveal_nft/sources/minting.move +++ b/aptos-move/move-examples/post_mint_reveal_nft/sources/minting.move @@ -374,9 +374,7 @@ module post_mint_reveal_nft::minting { assert!(vector::length(&token_uris) + big_vector::length(&collection_config.tokens) <= collection_config.destination_collection_maximum || collection_config.destination_collection_maximum == 0, error::invalid_argument(EEXCEEDS_COLLECTION_MAXIMUM)); - let i = 0; - while (i < vector::length(&token_uris)) { - let token_uri = vector::borrow(&token_uris, i); + vector::enumerate_ref(&token_uris, |i, token_uri| { assert!(!bucket_table::contains(&collection_config.deduped_tokens, token_uri), error::invalid_argument(EDUPLICATE_TOKEN_URI)); big_vector::push_back(&mut collection_config.tokens, TokenAsset { token_uri: *token_uri, @@ -385,8 +383,7 @@ module post_mint_reveal_nft::minting { property_types: *vector::borrow(&property_types, i), }); bucket_table::add(&mut collection_config.deduped_tokens, *token_uri, true); - i = i + 1; - }; + }); } /// Mint source certificate. diff --git a/aptos-move/move-examples/post_mint_reveal_nft/sources/whitelist.move b/aptos-move/move-examples/post_mint_reveal_nft/sources/whitelist.move index 16186e841d3a4..c05c3de4b9547 100644 --- a/aptos-move/move-examples/post_mint_reveal_nft/sources/whitelist.move +++ b/aptos-move/move-examples/post_mint_reveal_nft/sources/whitelist.move @@ -134,11 +134,8 @@ module post_mint_reveal_nft::whitelist { let now = timestamp::now_seconds(); assert!(now < whitelist_stage.whitelist_minting_end_time, error::invalid_argument(EINVALID_UPDATE_AFTER_MINTING)); - let i = 0; - while (i < vector::length(&wl_addresses)) { - let wl_address = vector::borrow(&wl_addresses, i); + vector::for_each_ref(&wl_addresses, |wl_address| { bucket_table::add(&mut whitelist_stage.whitelisted_address, *wl_address, mint_limit); - i = i + 1; - }; + }); } } diff --git a/aptos-move/move-examples/shared_account/sources/shared_account.move b/aptos-move/move-examples/shared_account/sources/shared_account.move index 0479181588a4a..d71c9414eb892 100644 --- a/aptos-move/move-examples/shared_account/sources/shared_account.move +++ b/aptos-move/move-examples/shared_account/sources/shared_account.move @@ -30,22 +30,20 @@ module shared_account::SharedAccount { // Create and initialize a shared account public entry fun initialize(source: &signer, seed: vector, addresses: vector
, numerators: vector) { - let i = 0; let total = 0; let share_record = vector::empty(); - while (i < vector::length(&addresses)) { + vector::enumerate_ref(&addresses, |i, addr|{ + let addr = *addr; let num_shares = *vector::borrow(&numerators, i); - let addr = *vector::borrow(&addresses, i); // make sure that the account exists, so when we call disperse() it wouldn't fail // because one of the accounts does not exist assert!(account::exists_at(addr), error::invalid_argument(EACCOUNT_NOT_FOUND)); - vector::push_back(&mut share_record, Share { share_holder: addr, num_shares: num_shares }); + vector::push_back(&mut share_record, Share { share_holder: addr, num_shares }); total = total + num_shares; - i = i + 1; - }; + }); let (resource_signer, resource_signer_cap) = account::create_resource_account(source, seed); @@ -73,13 +71,11 @@ module shared_account::SharedAccount { let shared_account = borrow_global(resource_addr); let resource_signer = account::create_signer_with_capability(&shared_account.signer_capability); - let i = 0; - while (i < vector::length(&shared_account.share_record)) { - let share_record = vector::borrow(&shared_account.share_record, i); - let current_amount = share_record.num_shares * total_balance / shared_account.total_shares; - coin::transfer(&resource_signer, share_record.share_holder, current_amount); - i = i + 1; - }; + vector::for_each_ref(&shared_account.share_record, |shared_record|{ + let shared_record: &Share = shared_record; + let current_amount = shared_record.num_shares * total_balance / shared_account.total_shares; + coin::transfer(&resource_signer, shared_record.share_holder, current_amount); + }); } #[test_only] diff --git a/aptos-move/move-examples/tests/move_unit_tests.rs b/aptos-move/move-examples/tests/move_unit_tests.rs index bfdcb8d86ceb7..926d93a075003 100644 --- a/aptos-move/move-examples/tests/move_unit_tests.rs +++ b/aptos-move/move-examples/tests/move_unit_tests.rs @@ -113,10 +113,22 @@ fn test_message_board() { #[test] fn test_fungible_asset() { let named_address = BTreeMap::from([( - String::from("fungible_asset_extension"), + String::from("example_addr"), AccountAddress::from_hex_literal("0xcafe").unwrap(), )]); - run_tests_for_pkg("fungible_asset", named_address); + run_tests_for_pkg( + "fungible_asset/managed_fungible_asset", + named_address.clone(), + ); + run_tests_for_pkg( + "fungible_asset/managed_fungible_token", + named_address.clone(), + ); + run_tests_for_pkg( + "fungible_asset/preminted_managed_coin", + named_address.clone(), + ); + run_tests_for_pkg("fungible_asset/simple_managed_coin", named_address); } #[test] @@ -167,6 +179,7 @@ fn test_token_objects() { AccountAddress::from_hex_literal("0xcafe").unwrap(), )]); run_tests_for_pkg("token_objects/hero", named_address.clone()); + run_tests_for_pkg("token_objects/token_lockup", named_address.clone()); run_tests_for_pkg("token_objects/ambassador/move", named_address); } diff --git a/aptos-move/move-examples/token_objects/README.md b/aptos-move/move-examples/token_objects/README.md index c1c001773cfee..82c86543a4baa 100644 --- a/aptos-move/move-examples/token_objects/README.md +++ b/aptos-move/move-examples/token_objects/README.md @@ -3,3 +3,4 @@ This directory contains various token object examples, including: * hero * ambassador: a soulbound token example +* token lockup: an example of how to disable transferring a token for the first 7 days of ownership \ No newline at end of file diff --git a/aptos-move/move-examples/token_objects/ambassador/Move.toml b/aptos-move/move-examples/token_objects/ambassador/Move.toml new file mode 100644 index 0000000000000..7a3cded854dab --- /dev/null +++ b/aptos-move/move-examples/token_objects/ambassador/Move.toml @@ -0,0 +1,10 @@ +[package] +name = 'ambassador_token' +version = '1.0.0' + +[addresses] +token_objects = "0xCAFE" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } +AptosTokenObjects = { local = "../../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/token_objects/ambassador/move/Move.toml b/aptos-move/move-examples/token_objects/ambassador/move/Move.toml deleted file mode 100644 index 00e7a47c45b47..0000000000000 --- a/aptos-move/move-examples/token_objects/ambassador/move/Move.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = 'ambassador_token' -version = '1.0.0' - -[addresses] -token_objects = "0xCAFE" - -[dependencies] -AptosFramework = { local = "../../../../framework/aptos-framework" } -AptosTokenObjects = { local = "../../../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/token_objects/ambassador/move/sources/ambassador.move b/aptos-move/move-examples/token_objects/ambassador/sources/ambassador.move similarity index 87% rename from aptos-move/move-examples/token_objects/ambassador/move/sources/ambassador.move rename to aptos-move/move-examples/token_objects/ambassador/sources/ambassador.move index c379bb6d033c0..d60102db45134 100644 --- a/aptos-move/move-examples/token_objects/ambassador/move/sources/ambassador.move +++ b/aptos-move/move-examples/token_objects/ambassador/sources/ambassador.move @@ -8,6 +8,8 @@ /// The rank is determined by the level such that the rank is Bronze if the level is between 0 and 9, /// Silver if the level is between 10 and 19, and Gold if the level is 20 or greater. /// The rank is stored in the property map, thus displayed in a wallet as a trait of the token. +/// The token uri is the concatenation of the base uri and the rank, where the base uri is given +/// as an argument of the minting function. So, the token uri changes when the rank changes. module token_objects::ambassador { use std::error; use std::option; @@ -45,16 +47,22 @@ module token_objects::ambassador { const RANK_SILVER: vector = b"Silver"; const RANK_BRONZE: vector = b"Bronze"; + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] /// The ambassador token struct AmbassadorToken has key { + /// Used to mutate the token uri + mutator_ref: token::MutatorRef, /// Used to burn. burn_ref: token::BurnRef, /// Used to mutate properties property_mutator_ref: property_map::MutatorRef, /// Used to emit LevelUpdateEvent level_update_events: event::EventHandle, + /// the base URI of the token + base_uri: String, } + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] /// The ambassador level struct AmbassadorLevel has key { ambassador_level: u64, @@ -86,6 +94,20 @@ module token_objects::ambassador { property_map::read_string(&token, &string::utf8(b"Rank")) } + #[view] + /// Returns the ambassador level of the token of the address + public fun ambassador_level_from_address(addr: address): u64 acquires AmbassadorLevel { + let token = object::address_to_object(addr); + ambassador_level(token) + } + + #[view] + /// Returns the ambassador rank of the token of the address + public fun ambassador_rank_from_address(addr: address): String { + let token = object::address_to_object(addr); + ambassador_rank(token) + } + /// Creates the ambassador collection. This function creates a collection with unlimited supply using /// the module constants for description, name, and URI, defined above. The collection will not have /// any royalty configuration because the tokens in this collection will not be transferred or sold. @@ -121,13 +143,15 @@ module token_objects::ambassador { creator: &signer, description: String, name: String, - uri: String, + base_uri: String, soul_bound_to: address, ) { // The collection name is used to locate the collection object and to create a new token object. let collection = string::utf8(COLLECTION_NAME); // Creates the ambassador token, and get the constructor ref of the token. The constructor ref // is used to generate the refs of the token. + let uri = base_uri; + string::append(&mut uri, string::utf8(RANK_BRONZE)); let constructor_ref = token::create_named_token( creator, collection, @@ -141,6 +165,7 @@ module token_objects::ambassador { // (e.g., AmbassadorLevel) under the token object address. The refs are used to manage the token. let object_signer = object::generate_signer(&constructor_ref); let transfer_ref = object::generate_transfer_ref(&constructor_ref); + let mutator_ref = token::generate_mutator_ref(&constructor_ref); let burn_ref = token::generate_burn_ref(&constructor_ref); let property_mutator_ref = property_map::generate_mutator_ref(&constructor_ref); @@ -165,9 +190,11 @@ module token_objects::ambassador { // Publishes the AmbassadorToken resource with the refs and the event handle for `LevelUpdateEvent`. let ambassador_token = AmbassadorToken { + mutator_ref, burn_ref, property_mutator_ref, level_update_events: object::new_event_handle(&object_signer), + base_uri }; move_to(&object_signer, ambassador_token); } @@ -178,9 +205,11 @@ module token_objects::ambassador { authorize_creator(creator, &token); let ambassador_token = move_from(object::object_address(&token)); let AmbassadorToken { + mutator_ref: _, burn_ref, property_mutator_ref, level_update_events, + base_uri: _ } = ambassador_token; event::destroy_handle(level_update_events); @@ -229,10 +258,15 @@ module token_objects::ambassador { }; let token_address = object::object_address(&token); + let ambassador_token = borrow_global(token_address); // Gets `property_mutator_ref` to update the rank in the property map. - let property_mutator_ref = &borrow_global(token_address).property_mutator_ref; + let property_mutator_ref = &ambassador_token.property_mutator_ref; // Updates the rank in the property map. property_map::update_typed(property_mutator_ref, &string::utf8(b"Rank"), string::utf8(new_rank)); + // Updates the token URI based on the new rank. + let uri = ambassador_token.base_uri; + string::append(&mut uri, string::utf8(new_rank)); + token::set_uri(&ambassador_token.mutator_ref, uri); } /// Authorizes the creator of the token. Asserts that the token exists and the creator of the token @@ -261,7 +295,7 @@ module token_objects::ambassador { // ------------------------------------------- let token_name = string::utf8(b"Ambassador Token #1"); let token_description = string::utf8(b"Ambassador Token #1 Description"); - let token_uri = string::utf8(b"Ambassador Token #1 URI"); + let token_uri = string::utf8(b"Ambassador Token #1 URI/"); let user1_addr = signer::address_of(user1); // Creates the Ambassador token for User1. mint_ambassador_token( @@ -288,12 +322,13 @@ module token_objects::ambassador { assert!(ambassador_level(token) == 0, 2); // Asserts that the initial rank of the token is "Bronze". assert!(ambassador_rank(token) == string::utf8(RANK_BRONZE), 3); + assert!(token::uri(token) == string::utf8(b"Ambassador Token #1 URI/Bronze"), 4); // `creator` sets the level to 15. set_ambassador_level(creator, token, 15); // Asserts that the level is updated to 15. assert!(ambassador_level(token) == 15, 4); // Asserts that the rank is updated to "Silver" which is the expected rank for level 15. - assert!(ambassador_rank(token) == string::utf8(RANK_SILVER), 5); + assert!(token::uri(token) == string::utf8(b"Ambassador Token #1 URI/Silver"), 5); // ------------------------ // Creator burns the token. diff --git a/aptos-move/move-examples/token_objects/knight/Move.toml b/aptos-move/move-examples/token_objects/knight/Move.toml new file mode 100644 index 0000000000000..04e829911b84a --- /dev/null +++ b/aptos-move/move-examples/token_objects/knight/Move.toml @@ -0,0 +1,10 @@ +[package] +name = 'knight' +version = '1.0.0' + +[addresses] +token_objects = "_" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } +AptosTokenObjects = { local = "../../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/token_objects/knight/sources/food.move b/aptos-move/move-examples/token_objects/knight/sources/food.move new file mode 100644 index 0000000000000..94060fff516dd --- /dev/null +++ b/aptos-move/move-examples/token_objects/knight/sources/food.move @@ -0,0 +1,327 @@ +/// This module implements the the food tokens (fungible token). When the module initializes, +/// it creates the collection and two fungible tokens such as Corn and Meat. +module token_objects::food { + use std::error; + use std::option; + use std::signer; + use std::string::{Self, String}; + use aptos_framework::fungible_asset::{Self, Metadata}; + use aptos_framework::object::{Self, Object}; + use aptos_framework::primary_fungible_store; + use aptos_token_objects::collection; + use aptos_token_objects::property_map; + use aptos_token_objects::token; + + /// The token does not exist + const ETOKEN_DOES_NOT_EXIST: u64 = 1; + /// The provided signer is not the creator + const ENOT_CREATOR: u64 = 2; + /// Attempted to mutate an immutable field + const EFIELD_NOT_MUTABLE: u64 = 3; + /// Attempted to burn a non-burnable token + const ETOKEN_NOT_BURNABLE: u64 = 4; + /// Attempted to mutate a property map that is not mutable + const EPROPERTIES_NOT_MUTABLE: u64 = 5; + // The collection does not exist + const ECOLLECTION_DOES_NOT_EXIST: u64 = 6; + + /// The food collection name + const FOOD_COLLECTION_NAME: vector = b"Food Collection Name"; + /// The food collection description + const FOOD_COLLECTION_DESCRIPTION: vector = b"Food Collection Description"; + /// The food collection URI + const FOOD_COLLECTION_URI: vector = b"https://food.collection.uri"; + + /// The knight token collection name + const KNIGHT_COLLECTION_NAME: vector = b"Knight Collection Name"; + /// The knight collection description + const KNIGHT_COLLECTION_DESCRIPTION: vector = b"Knight Collection Description"; + /// The knight collection URI + const KNIGHT_COLLECTION_URI: vector = b"https://knight.collection.uri"; + + /// The corn token name + const CORN_TOKEN_NAME: vector = b"Corn Token"; + /// The meat token name + const MEAT_TOKEN_NAME: vector = b"Meat Token"; + + /// Property names + const CONDITION_PROPERTY_NAME: vector = b"Condition"; + const RESTORATION_VALUE_PROPERTY_NAME: vector = b"Restoration Value"; + const HEALTH_POINT_PROPERTY_NAME: vector = b"Health Point"; + + /// The condition of a knight + const CONDITION_HUNGRY: vector = b"Hungry"; + const CONDITION_GOOD: vector = b"Good"; + + friend token_objects::knight; + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + // Food Token + struct FoodToken has key { + /// Used to mutate properties + property_mutator_ref: property_map::MutatorRef, + /// Used to mint fungible assets. + fungible_asset_mint_ref: fungible_asset::MintRef, + /// Used to burn fungible assets. + fungible_asset_burn_ref: fungible_asset::BurnRef, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Restoration value of the food. An attribute of a food token. + struct RestorationValue has key { + value: u64, + } + + /// Initializes the module, creating the food collection and creating two fungible tokens such as Corn, and Meat. + fun init_module(sender: &signer) { + // Create a collection for food tokens. + create_food_collection(sender); + // Create two food token (i.e., Corn and Meat) as fungible tokens, meaning that there can be multiple units of them. + create_food_token_as_fungible_token( + sender, + string::utf8(b"Corn Token Description"), + string::utf8(CORN_TOKEN_NAME), + string::utf8(b"https://raw.githubusercontent.com/junkil-park/metadata/main/knight/Corn"), + string::utf8(b"Corn"), + string::utf8(b"CORN"), + string::utf8(b"https://raw.githubusercontent.com/junkil-park/metadata/main/knight/Corn.png"), + string::utf8(b"https://www.aptoslabs.com"), + 5, + ); + create_food_token_as_fungible_token( + sender, + string::utf8(b"Meat Token Description"), + string::utf8(MEAT_TOKEN_NAME), + string::utf8(b"https://raw.githubusercontent.com/junkil-park/metadata/main/knight/Meat"), + string::utf8(b"Meat"), + string::utf8(b"MEAT"), + string::utf8(b"https://raw.githubusercontent.com/junkil-park/metadata/main/knight/Meat.png"), + string::utf8(b"https://www.aptoslabs.com"), + 20, + ); + } + + #[view] + /// Returns the restoration value of the food token + public fun restoration_value(token: Object): u64 acquires RestorationValue { + let restoration_value_in_food = borrow_global(object::object_address(&token)); + restoration_value_in_food.value + } + + #[view] + /// Returns the balance of the food token of the owner + public fun food_balance(owner_addr: address, food: Object): u64 { + let metadata = object::convert(food); + let store = primary_fungible_store::ensure_primary_store_exists(owner_addr, metadata); + fungible_asset::balance(store) + } + + #[view] + /// Returns the corn token address + public fun corn_token_address(): address { + food_token_address(string::utf8(CORN_TOKEN_NAME)) + } + + #[view] + /// Returns the meat token address + public fun meat_token_address(): address { + food_token_address(string::utf8(MEAT_TOKEN_NAME)) + } + + #[view] + /// Returns the food token address by name + public fun food_token_address(food_token_name: String): address { + token::create_token_address(&@token_objects, &string::utf8(FOOD_COLLECTION_NAME), &food_token_name) + } + + /// Creates the food collection. + fun create_food_collection(creator: &signer) { + // Constructs the strings from the bytes. + let description = string::utf8(FOOD_COLLECTION_DESCRIPTION); + let name = string::utf8(FOOD_COLLECTION_NAME); + let uri = string::utf8(FOOD_COLLECTION_URI); + + // Creates the collection with unlimited supply and without establishing any royalty configuration. + collection::create_unlimited_collection( + creator, + description, + name, + option::none(), + uri, + ); + } + + /// Creates the food token as fungible token. + fun create_food_token_as_fungible_token( + creator: &signer, + description: String, + name: String, + uri: String, + fungible_asset_name: String, + fungible_asset_symbol: String, + icon_uri: String, + project_uri: String, + restoration_value: u64, + ) { + // The collection name is used to locate the collection object and to create a new token object. + let collection = string::utf8(FOOD_COLLECTION_NAME); + // Creates the food token, and get the constructor ref of the token. The constructor ref + // is used to generate the refs of the token. + let constructor_ref = token::create_named_token( + creator, + collection, + description, + name, + option::none(), + uri, + ); + + // Generates the object signer and the refs. The object signer is used to publish a resource + // (e.g., RestorationValue) under the token object address. The refs are used to manage the token. + let object_signer = object::generate_signer(&constructor_ref); + let property_mutator_ref = property_map::generate_mutator_ref(&constructor_ref); + + // Initializes the value with the given value in food. + move_to(&object_signer, RestorationValue { value: restoration_value }); + + // Initialize the property map. + let properties = property_map::prepare_input(vector[], vector[], vector[]); + property_map::init(&constructor_ref, properties); + property_map::add_typed( + &property_mutator_ref, + string::utf8(RESTORATION_VALUE_PROPERTY_NAME), + restoration_value + ); + + // Creates the fungible asset. + primary_fungible_store::create_primary_store_enabled_fungible_asset( + &constructor_ref, + option::none(), + fungible_asset_name, + fungible_asset_symbol, + 0, + icon_uri, + project_uri, + ); + let fungible_asset_mint_ref = fungible_asset::generate_mint_ref(&constructor_ref); + let fungible_asset_burn_ref = fungible_asset::generate_burn_ref(&constructor_ref); + + // Publishes the FoodToken resource with the refs. + let food_token = FoodToken { + property_mutator_ref, + fungible_asset_mint_ref, + fungible_asset_burn_ref, + }; + move_to(&object_signer, food_token); + } + + /// Mints the given amount of the corn token to the given receiver. + public entry fun mint_corn(creator: &signer, receiver: address, amount: u64) acquires FoodToken { + let corn_token = object::address_to_object(corn_token_address()); + mint_internal(creator, corn_token, receiver, amount); + } + + /// Mints the given amount of the meat token to the given receiver. + public entry fun mint_meat(creator: &signer, receiver: address, amount: u64) acquires FoodToken { + let meat_token = object::address_to_object(meat_token_address()); + mint_internal(creator, meat_token, receiver, amount); + } + + /// The internal mint function. + fun mint_internal(creator: &signer, token: Object, receiver: address, amount: u64) acquires FoodToken { + let food_token = authorized_borrow(creator, &token); + let fungible_asset_mint_ref = &food_token.fungible_asset_mint_ref; + let fa = fungible_asset::mint(fungible_asset_mint_ref, amount); + primary_fungible_store::deposit(receiver, fa); + } + + /// Transfers the given amount of the corn token from the given sender to the given receiver. + public entry fun transfer_corn(from: &signer, to: address, amount: u64) { + transfer_food(from, object::address_to_object(corn_token_address()), to, amount); + } + + /// Transfers the given amount of the meat token from the given sender to the given receiver. + public entry fun transfer_meat(from: &signer, to: address, amount: u64) { + transfer_food(from, object::address_to_object(meat_token_address()), to, amount); + } + + public entry fun transfer_food(from: &signer, food: Object, to: address, amount: u64) { + let metadata = object::convert(food); + primary_fungible_store::transfer(from, metadata, to, amount); + } + + public(friend) fun burn_food(from: &signer, food: Object, amount: u64) acquires FoodToken { + let metadata = object::convert(food); + let food_addr = object::object_address(&food); + let food_token = borrow_global(food_addr); + let from_store = primary_fungible_store::ensure_primary_store_exists(signer::address_of(from), metadata); + fungible_asset::burn_from(&food_token.fungible_asset_burn_ref, from_store, amount); + } + + inline fun authorized_borrow(creator: &signer, token: &Object): &FoodToken { + let token_address = object::object_address(token); + assert!( + exists(token_address), + error::not_found(ETOKEN_DOES_NOT_EXIST), + ); + + assert!( + token::creator(*token) == signer::address_of(creator), + error::permission_denied(ENOT_CREATOR), + ); + borrow_global(token_address) + } + + #[test_only] + public fun init_module_for_test(creator: &signer) { + init_module(creator); + } + + #[test(creator = @token_objects, user1 = @0x456, user2 = @0x789)] + public fun test_food(creator: &signer, user1: &signer, user2: &signer) acquires FoodToken { + // This test assumes that the creator's address is equal to @token_objects. + assert!(signer::address_of(creator) == @token_objects, 0); + + // --------------------------------------------------------------------- + // Creator creates the collection, and mints corn and meat tokens in it. + // --------------------------------------------------------------------- + init_module(creator); + + // ------------------------------------------- + // Creator mints and sends 100 corns to User1. + // ------------------------------------------- + let user1_addr = signer::address_of(user1); + mint_corn(creator, user1_addr, 100); + + let corn_token = object::address_to_object(corn_token_address()); + // Assert that the user1 has 100 corns. + assert!(food_balance(user1_addr, corn_token) == 100, 0); + + // ------------------------------------------- + // Creator mints and sends 200 meats to User2. + // ------------------------------------------- + let user2_addr = signer::address_of(user2); + mint_meat(creator, user2_addr, 200); + let meat_token = object::address_to_object(meat_token_address()); + // Assert that the user2 has 200 meats. + assert!(food_balance(user2_addr, meat_token) == 200, 0); + + // ------------------------------ + // User1 sends 10 corns to User2. + // ------------------------------ + transfer_corn(user1, user2_addr, 10); + // Assert that the user1 has 90 corns. + assert!(food_balance(user1_addr, corn_token) == 90, 0); + // Assert that the user2 has 10 corns. + assert!(food_balance(user2_addr, corn_token) == 10, 0); + + // ------------------------------ + // User2 sends 20 meats to User1. + // ------------------------------ + transfer_meat(user2, user1_addr, 20); + // Assert that the user1 has 20 meats. + assert!(food_balance(user1_addr, meat_token) == 20, 0); + // Assert that the user2 has 180 meats. + assert!(food_balance(user2_addr, meat_token) == 180, 0); + } +} diff --git a/aptos-move/move-examples/token_objects/knight/sources/knight.move b/aptos-move/move-examples/token_objects/knight/sources/knight.move new file mode 100644 index 0000000000000..9501b4572457d --- /dev/null +++ b/aptos-move/move-examples/token_objects/knight/sources/knight.move @@ -0,0 +1,278 @@ +/// This module implements the knight token (non-fungible token) including the +/// functions create the collection and the knight tokens, and the function to feed a +/// knight token with food tokens to increase the knight's health point. +module token_objects::knight { + use std::option; + use std::string::{Self, String}; + use aptos_framework::event; + use aptos_framework::object::{Self, Object}; + use aptos_token_objects::collection; + use aptos_token_objects::property_map; + use aptos_token_objects::token; + use token_objects::food::{Self, FoodToken}; + + /// The token does not exist + const ETOKEN_DOES_NOT_EXIST: u64 = 1; + /// The provided signer is not the creator + const ENOT_CREATOR: u64 = 2; + /// Attempted to mutate an immutable field + const EFIELD_NOT_MUTABLE: u64 = 3; + /// Attempted to burn a non-burnable token + const ETOKEN_NOT_BURNABLE: u64 = 4; + /// Attempted to mutate a property map that is not mutable + const EPROPERTIES_NOT_MUTABLE: u64 = 5; + // The collection does not exist + const ECOLLECTION_DOES_NOT_EXIST: u64 = 6; + + /// The knight token collection name + const KNIGHT_COLLECTION_NAME: vector = b"Knight Collection Name"; + /// The knight collection description + const KNIGHT_COLLECTION_DESCRIPTION: vector = b"Knight Collection Description"; + /// The knight collection URI + const KNIGHT_COLLECTION_URI: vector = b"https://knight.collection.uri"; + + /// Property names + const CONDITION_PROPERTY_NAME: vector = b"Condition"; + const HEALTH_POINT_PROPERTY_NAME: vector = b"Health Point"; + + /// The condition of a knight + const CONDITION_HUNGRY: vector = b"Hungry"; + const CONDITION_GOOD: vector = b"Good"; + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Knight token + struct KnightToken has key { + /// Used to mutate the token uri + mutator_ref: token::MutatorRef, + /// Used to mutate properties + property_mutator_ref: property_map::MutatorRef, + /// Used to emit HealthUpdateEvent + health_update_events: event::EventHandle, + /// the base URI of the token + base_uri: String, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// The knight's health point + struct HealthPoint has key { + value: u64, + } + + /// The health update event + struct HealthUpdateEvent has drop, store { + old_health: u64, + new_health: u64, + } + + /// Initializes the module, creating the knight token collection. + fun init_module(sender: &signer) { + // Create a collection for knight tokens. + create_knight_collection(sender); + } + + #[view] + /// Returns the knight health point of the token + public fun health_point(token: Object): u64 acquires HealthPoint { + let health = borrow_global(object::object_address(&token)); + health.value + } + + #[view] + /// Returns the knight token address by name + public fun knight_token_address(knight_token_name: String): address { + token::create_token_address(&@token_objects, &string::utf8(KNIGHT_COLLECTION_NAME), &knight_token_name) + } + + /// Creates the knight collection. This function creates a collection with unlimited supply using + /// the module constants for description, name, and URI, defined above. The royalty configuration + /// is skipped in this collection for simplicity. + fun create_knight_collection(creator: &signer) { + // Constructs the strings from the bytes. + let description = string::utf8(KNIGHT_COLLECTION_DESCRIPTION); + let name = string::utf8(KNIGHT_COLLECTION_NAME); + let uri = string::utf8(KNIGHT_COLLECTION_URI); + + // Creates the collection with unlimited supply and without establishing any royalty configuration. + collection::create_unlimited_collection( + creator, + description, + name, + option::none(), + uri, + ); + } + + /// Mints an knight token. This function mints a new knight token and transfers it to the + /// `soul_bound_to` address. The token is minted with health point 0 and condition Hungry. + public entry fun mint_knight( + creator: &signer, + description: String, + name: String, + base_uri: String, + receiver: address, + ) { + // The collection name is used to locate the collection object and to create a new token object. + let collection = string::utf8(KNIGHT_COLLECTION_NAME); + // Creates the knight token, and get the constructor ref of the token. The constructor ref + // is used to generate the refs of the token. + let uri = base_uri; + string::append(&mut uri, string::utf8(CONDITION_HUNGRY)); + let constructor_ref = token::create_named_token( + creator, + collection, + description, + name, + option::none(), + uri, + ); + + // Generates the object signer and the refs. The object signer is used to publish a resource + // (e.g., HealthPoint) under the token object address. The refs are used to manage the token. + let object_signer = object::generate_signer(&constructor_ref); + let transfer_ref = object::generate_transfer_ref(&constructor_ref); + let mutator_ref = token::generate_mutator_ref(&constructor_ref); + let property_mutator_ref = property_map::generate_mutator_ref(&constructor_ref); + + // Transfers the token to the `soul_bound_to` address + let linear_transfer_ref = object::generate_linear_transfer_ref(&transfer_ref); + object::transfer_with_ref(linear_transfer_ref, receiver); + + // Initializes the knight health point as 0 + move_to(&object_signer, HealthPoint { value: 1 }); + + // Initialize the property map and the knight condition as Hungry + let properties = property_map::prepare_input(vector[], vector[], vector[]); + property_map::init(&constructor_ref, properties); + property_map::add_typed( + &property_mutator_ref, + string::utf8(CONDITION_PROPERTY_NAME), + string::utf8(CONDITION_HUNGRY), + ); + // Although the health point is stored in the HealthPoint resource, it is also duplicated + // and stored in the property map to be recognized as a property by the wallet. + property_map::add_typed( + &property_mutator_ref, + string::utf8(HEALTH_POINT_PROPERTY_NAME), + 1, + ); + + // Publishes the KnightToken resource with the refs and the event handle for `HealthUpdateEvent`. + let knight_token = KnightToken { + mutator_ref, + property_mutator_ref, + health_update_events: object::new_event_handle(&object_signer), + base_uri + }; + move_to(&object_signer, knight_token); + } + + public entry fun feed_corn(from: &signer, to: Object, amount: u64) acquires HealthPoint, KnightToken { + let corn_token = object::address_to_object(food::corn_token_address()); + feed_food(from, corn_token, to, amount); + } + + public entry fun feed_meat(from: &signer, to: Object, amount: u64) acquires HealthPoint, KnightToken { + let meat_token = object::address_to_object(food::meat_token_address()); + feed_food(from, meat_token, to, amount); + } + + public entry fun feed_food(from: &signer, food_token: Object, to: Object, amount: u64) acquires HealthPoint, KnightToken { + let knight_token_address = object::object_address(&to); + food::burn_food(from, food_token, amount); + + let restoration_amount = food::restoration_value(food_token) * amount; + let health_point = borrow_global_mut(object::object_address(&to)); + let old_health_point = health_point.value; + let new_health_point = old_health_point + restoration_amount; + health_point.value = new_health_point; + + let knight = borrow_global_mut(knight_token_address); + // Gets `property_mutator_ref` to update the health point and condition in the property map. + let property_mutator_ref = &knight.property_mutator_ref; + // Updates the health point in the property map. + property_map::update_typed(property_mutator_ref, &string::utf8(HEALTH_POINT_PROPERTY_NAME), new_health_point); + + event::emit_event( + &mut knight.health_update_events, + HealthUpdateEvent { + old_health: old_health_point, + new_health: new_health_point, + } + ); + + // `new_condition` is determined based on the new health point. + let new_condition = if (new_health_point <= 20) { + CONDITION_HUNGRY + } else { + CONDITION_GOOD + }; + // Updates the condition in the property map. + property_map::update_typed(property_mutator_ref, &string::utf8(CONDITION_PROPERTY_NAME), string::utf8(new_condition)); + + // Updates the token URI based on the new condition. + let uri = knight.base_uri; + string::append(&mut uri, string::utf8(new_condition)); + token::set_uri(&knight.mutator_ref, uri); + } + + #[test_only] + use std::signer; + + #[test(creator = @token_objects, user1 = @0x456)] + public fun test_knight(creator: &signer, user1: &signer) acquires HealthPoint, KnightToken { + // This test assumes that the creator's address is equal to @token_objects. + assert!(signer::address_of(creator) == @token_objects, 0); + + // --------------------------------------------------------------------- + // Creator creates the collection, and mints corn and meat tokens in it. + // --------------------------------------------------------------------- + food::init_module_for_test(creator); + init_module(creator); + + // ------------------------------------------------------- + // Creator mints and sends 90 corns and 20 meats to User1. + // ------------------------------------------------------- + let user1_addr = signer::address_of(user1); + food::mint_corn(creator, user1_addr, 90); + food::mint_meat(creator, user1_addr, 20); + + // --------------------------------------- + // Creator mints a knight token for User1. + // --------------------------------------- + let token_name = string::utf8(b"Knight Token #1"); + let token_description = string::utf8(b"Knight Token #1 Description"); + let token_uri = string::utf8(b"Knight Token #1 URI"); + let user1_addr = signer::address_of(user1); + // Creates the knight token for User1. + mint_knight( + creator, + token_description, + token_name, + token_uri, + user1_addr, + ); + let token_address = knight_token_address(token_name); + let knight_token = object::address_to_object(token_address); + + // Asserts that the owner of the token is User1. + assert!(object::owner(knight_token) == user1_addr, 1); + // Asserts that the health point of the token is 1. + assert!(health_point(knight_token) == 1, 2); + + let corn_token = object::address_to_object(food::corn_token_address()); + let old_corn_balance = food::food_balance(user1_addr, corn_token); + feed_food(user1, corn_token, knight_token, 3); + // Asserts that the corn balance decreases by 3. + assert!(food::food_balance(user1_addr, corn_token) == old_corn_balance - 3, 0); + // Asserts that the health point increases by 15 (= amount * restoration_value = 3 * 5). + assert!(health_point(knight_token) == 16, 2); + + let meat_token = object::address_to_object(food::meat_token_address()); + let old_meat_balance = food::food_balance(user1_addr, meat_token); + feed_food(user1, meat_token, knight_token, 2); + // Asserts that the corn balance decreases by 3. + assert!(food::food_balance(user1_addr, meat_token) == old_meat_balance - 2, 0); + // Asserts that the health point increases by 40 (= amount * restoration_value = 2 * 20). + assert!(health_point(knight_token) == 56, 3); + } +} diff --git a/aptos-move/move-examples/token_objects/token_lockup/Move.toml b/aptos-move/move-examples/token_objects/token_lockup/Move.toml new file mode 100644 index 0000000000000..3d8f62cb13e63 --- /dev/null +++ b/aptos-move/move-examples/token_objects/token_lockup/Move.toml @@ -0,0 +1,10 @@ +[package] +name = 'Token Lockup' +version = '1.0.0' + +[addresses] +token_objects = "_" + +[dependencies] +AptosFramework = { local = "../../../framework/aptos-framework" } +AptosTokenObjects = { local = "../../../framework/aptos-token-objects" } diff --git a/aptos-move/move-examples/token_objects/token_lockup/sources/token_lockup.move b/aptos-move/move-examples/token_objects/token_lockup/sources/token_lockup.move new file mode 100644 index 0000000000000..2f664d4579961 --- /dev/null +++ b/aptos-move/move-examples/token_objects/token_lockup/sources/token_lockup.move @@ -0,0 +1,105 @@ +module token_objects::token_lockup { + use std::signer; + use std::option; + use std::error; + use std::string::{Self, String}; + use std::object::{Self, Object, TransferRef, ConstructorRef}; + use std::timestamp; + use aptos_token_objects::royalty::{Royalty}; + use aptos_token_objects::token::{Self, Token}; + use aptos_token_objects::collection; + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct LockupConfig has key { + last_transfer: u64, + transfer_ref: TransferRef, + } + + /// The owner of the token has not owned it for long enough + const ETOKEN_IN_LOCKUP: u64 = 0; + /// The owner must own the token to transfer it + const ENOT_TOKEN_OWNER: u64 = 1; + + const COLLECTION_NAME: vector = b"Rickety Raccoons"; + const COLLECTION_DESCRIPTION: vector = b"A collection of rickety raccoons!"; + const COLLECTION_URI: vector = b"https://ricketyracoonswebsite.com/collection/rickety-raccoon.png"; + const TOKEN_URI: vector = b"https://ricketyracoonswebsite.com/tokens/raccoon.png"; + const MAXIMUM_SUPPLY: u64 = 1000; + // 24 hours in one day * 60 minutes in one hour * 60 seconds in one minute * 7 days + const LOCKUP_PERIOD_SECS: u64 = (24 * 60 * 60) * 7; + + public fun initialize_collection(creator: &signer) { + collection::create_fixed_collection( + creator, + string::utf8(COLLECTION_DESCRIPTION), + MAXIMUM_SUPPLY, + string::utf8(COLLECTION_NAME), + option::none(), + string::utf8(COLLECTION_URI), + ); + } + + public fun mint_to( + creator: &signer, + token_name: String, + to: address, + ): ConstructorRef { + let token_constructor_ref = token::create_named_token( + creator, + string::utf8(COLLECTION_NAME), + string::utf8(COLLECTION_DESCRIPTION), + token_name, + option::none(), + string::utf8(TOKEN_URI), + ); + + let transfer_ref = object::generate_transfer_ref(&token_constructor_ref); + let token_signer = object::generate_signer(&token_constructor_ref); + let token_object = object::object_from_constructor_ref(&token_constructor_ref); + + // transfer the token to the receiving account before we permanently disable ungated transfer + object::transfer(creator, token_object, to); + + // disable the ability to transfer the token through any means other than the `transfer` function we define + object::disable_ungated_transfer(&transfer_ref); + + move_to( + &token_signer, + LockupConfig { + last_transfer: timestamp::now_seconds(), + transfer_ref, + } + ); + + token_constructor_ref + } + + public entry fun transfer( + from: &signer, + token: Object, + to: address, + ) acquires LockupConfig { + // redundant error checking for clear error message + assert!(object::is_owner(token, signer::address_of(from)), error::permission_denied(ENOT_TOKEN_OWNER)); + let now = timestamp::now_seconds(); + let lockup_config = borrow_global_mut(object::object_address(&token)); + + let time_since_transfer = now - lockup_config.last_transfer; + let lockup_period_secs = LOCKUP_PERIOD_SECS; + assert!(time_since_transfer >= lockup_period_secs, error::permission_denied(ETOKEN_IN_LOCKUP)); + + // generate linear transfer ref and transfer the token object + let linear_transfer_ref = object::generate_linear_transfer_ref(&lockup_config.transfer_ref); + object::transfer_with_ref(linear_transfer_ref, to); + + // update the lockup config to reflect the latest transfer time + *&mut lockup_config.last_transfer = now; + } + + #[view] + public fun view_last_transfer( + token: Object, + ): u64 acquires LockupConfig { + borrow_global(object::object_address(&token)).last_transfer + } +} diff --git a/aptos-move/move-examples/token_objects/token_lockup/sources/unit_tests.move b/aptos-move/move-examples/token_objects/token_lockup/sources/unit_tests.move new file mode 100644 index 0000000000000..e99a23320453d --- /dev/null +++ b/aptos-move/move-examples/token_objects/token_lockup/sources/unit_tests.move @@ -0,0 +1,170 @@ +module token_objects::unit_tests { + #[test_only] + use std::signer; + #[test_only] + use aptos_framework::object; + #[test_only] + use aptos_framework::account; + #[test_only] + use aptos_framework::timestamp; + #[test_only] + use token_objects::token_lockup; + #[test_only] + use std::string::{Self}; + #[test_only] + use aptos_token_objects::token::{Token}; + + const TEST_START_TIME: u64 = 1000000000; + // 24 hours in one day * 60 minutes in one hour * 60 seconds in one minute * 7 days + const LOCKUP_PERIOD_SECS: u64 = (24 * 60 * 60) * 7; + + #[test_only] + fun setup_test( + creator: &signer, + owner_1: &signer, + owner_2: &signer, + aptos_framework: &signer, + start_time: u64, + ) { + timestamp::set_time_has_started_for_testing(aptos_framework); + timestamp::update_global_time_for_test_secs(start_time); + account::create_account_for_test(signer::address_of(creator)); + account::create_account_for_test(signer::address_of(owner_1)); + account::create_account_for_test(signer::address_of(owner_2)); + token_lockup::initialize_collection(creator); + } + + #[test_only] + fun fast_forward_secs(seconds: u64) { + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + seconds); + } + + #[test (creator = @0xFA, owner_1 = @0xA, owner_2 = @0xB, aptos_framework = @0x1)] + /// Tests transferring multiple tokens to different owners with slightly different initial lockup times + fun test_happy_path( + creator: &signer, + owner_1: &signer, + owner_2: &signer, + aptos_framework: &signer, + ) { + setup_test( + creator, + owner_1, + owner_2, + aptos_framework, + TEST_START_TIME + ); + + let owner_1_addr = signer::address_of(owner_1); + let owner_2_addr = signer::address_of(owner_2); + + // mint 1 token to each of the 2 owner accounts + let token_1_constructor_ref = token_lockup::mint_to(creator, string::utf8(b"Token #1"), owner_1_addr); + let token_2_constructor_ref = token_lockup::mint_to(creator, string::utf8(b"Token #2"), owner_2_addr); + // mint 1 more token to owner_1 one second later + fast_forward_secs(1); + let token_3_constructor_ref = token_lockup::mint_to(creator, string::utf8(b"Token #3"), owner_1_addr); + + let token_1_obj = object::object_from_constructor_ref(&token_1_constructor_ref); + let token_2_obj = object::object_from_constructor_ref(&token_2_constructor_ref); + let token_3_obj = object::object_from_constructor_ref(&token_3_constructor_ref); + + // fast forward global time by 1 week - 1 second + fast_forward_secs((LOCKUP_PERIOD_SECS) - 1); + + // ensures that the `last_transfer` for each token is correct + assert!(token_lockup::view_last_transfer(token_1_obj) == TEST_START_TIME, 0); + assert!(token_lockup::view_last_transfer(token_2_obj) == TEST_START_TIME, 1); + assert!(token_lockup::view_last_transfer(token_3_obj) == TEST_START_TIME + 1, 2); + + + // transfer the first token from owner_1 to owner_2 + token_lockup::transfer(owner_1, token_1_obj, owner_2_addr); + // transfer the second token from owner_2 to owner_1 + token_lockup::transfer(owner_2, token_2_obj, owner_1_addr); + // fast forward global time by 1 second + fast_forward_secs(1); + // transfer the third token from owner_1 to owner_2 + token_lockup::transfer(owner_1, token_3_obj, owner_2_addr); + // ensures that the `last_transfer` for each token is correct + assert!(token_lockup::view_last_transfer(token_1_obj) == TEST_START_TIME + (LOCKUP_PERIOD_SECS), 3); + assert!(token_lockup::view_last_transfer(token_2_obj) == TEST_START_TIME + (LOCKUP_PERIOD_SECS), 4); + assert!(token_lockup::view_last_transfer(token_3_obj) == TEST_START_TIME + (LOCKUP_PERIOD_SECS) + 1, 5); + + // ensures that the owners respectively are owner_2, owner_1, and owner_2 + assert!(object::is_owner(token_1_obj, owner_2_addr), 6); + assert!(object::is_owner(token_2_obj, owner_1_addr), 7); + assert!(object::is_owner(token_3_obj, owner_2_addr), 8); + } + + #[test (creator = @0xFA, owner_1 = @0xA, owner_2 = @0xB, aptos_framework = @0x1)] + #[expected_failure(abort_code = 0x50003, location = aptos_framework::object)] + fun transfer_raw_fail( + creator: &signer, + owner_1: &signer, + owner_2: &signer, + aptos_framework: &signer, + ) { + setup_test( + creator, + owner_1, + owner_2, + aptos_framework, + TEST_START_TIME + ); + + let token_1_constructor_ref = token_lockup::mint_to(creator, string::utf8(b"Token #1"), signer::address_of(owner_1)); + object::transfer_raw( + owner_1, + object::address_from_constructor_ref(&token_1_constructor_ref), + signer::address_of(owner_2) + ); + } + + #[test (creator = @0xFA, owner_1 = @0xA, owner_2 = @0xB, aptos_framework = @0x1)] + #[expected_failure(abort_code = 0x50000, location = token_objects::token_lockup)] + fun transfer_too_early( + creator: &signer, + owner_1: &signer, + owner_2: &signer, + aptos_framework: &signer, + ) { + setup_test( + creator, + owner_1, + owner_2, + aptos_framework, + TEST_START_TIME + ); + + let token_1_constructor_ref = token_lockup::mint_to(creator, string::utf8(b"Token #1"), signer::address_of(owner_1)); + let token_1_obj = object::object_from_constructor_ref(&token_1_constructor_ref); + + // one second too early + fast_forward_secs((LOCKUP_PERIOD_SECS) - 1); + token_lockup::transfer(owner_1, token_1_obj, signer::address_of(owner_2)); + } + + #[test (creator = @0xFA, owner_1 = @0xA, owner_2 = @0xB, aptos_framework = @0x1)] + #[expected_failure(abort_code = 0x50001, location = token_objects::token_lockup)] + fun transfer_wrong_owner( + creator: &signer, + owner_1: &signer, + owner_2: &signer, + aptos_framework: &signer, + ) { + setup_test( + creator, + owner_1, + owner_2, + aptos_framework, + TEST_START_TIME + ); + + let token_1_constructor_ref = token_lockup::mint_to(creator, string::utf8(b"Token #1"), signer::address_of(owner_1)); + let token_1_obj = object::object_from_constructor_ref(&token_1_constructor_ref); + + fast_forward_secs(LOCKUP_PERIOD_SECS); + token_lockup::transfer(owner_2, token_1_obj, signer::address_of(owner_1)); + } +} diff --git a/aptos-move/mvhashmap/src/lib.rs b/aptos-move/mvhashmap/src/lib.rs index fb6a4efb15285..7fe101e4bcf20 100644 --- a/aptos-move/mvhashmap/src/lib.rs +++ b/aptos-move/mvhashmap/src/lib.rs @@ -3,20 +3,23 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - types::{MVCodeError, MVCodeOutput, MVDataError, MVDataOutput, TxnIndex, Version}, - versioned_code::VersionedCode, + types::{MVDataError, MVDataOutput, MVModulesError, MVModulesOutput, TxnIndex, Version}, versioned_data::VersionedData, + versioned_modules::VersionedModules, }; use aptos_aggregator::delta_change_set::DeltaOp; +use aptos_crypto::hash::HashValue; use aptos_types::{ - executable::{Executable, ExecutableDescriptor, ModulePath}, + executable::{Executable, ModulePath}, write_set::TransactionWrite, }; use std::{fmt::Debug, hash::Hash}; pub mod types; -pub mod versioned_code; +pub mod unsync_map; +mod utils; pub mod versioned_data; +pub mod versioned_modules; #[cfg(test)] mod unit_tests; @@ -28,37 +31,35 @@ mod unit_tests; /// given key, it holds exclusive access and doesn't need to explicitly synchronize /// with other reader/writers. /// -/// TODO: separate V into different generic types for data and modules / code (currently -/// both WriteOp for executor, and use extract_raw_bytes. data for aggregators, and -/// code for computing the module hash. +/// TODO: separate V into different generic types for data and code modules with specialized +/// traits (currently both WriteOp for executor). pub struct MVHashMap { data: VersionedData, - code: VersionedCode, + modules: VersionedModules, } impl MVHashMap { // ----------------------------------- - // Functions shared for data and code. + // Functions shared for data and modules. - // Option is passed to allow re-using code cache between blocks. - pub fn new(code_cache: Option>) -> MVHashMap { + pub fn new() -> MVHashMap { MVHashMap { data: VersionedData::new(), - code: code_cache.unwrap_or_default(), + modules: VersionedModules::new(), } } - pub fn take(self) -> (VersionedData, VersionedCode) { - (self.data, self.code) + pub fn take(self) -> (VersionedData, VersionedModules) { + (self.data, self.modules) } /// Mark an entry from transaction 'txn_idx' at access path 'key' as an estimated write /// (for future incarnation). Will panic if the entry is not in the data-structure. pub fn mark_estimate(&self, key: &K, txn_idx: TxnIndex) { match key.module_path() { - Some(_) => self.code.mark_estimate(key, txn_idx), + Some(_) => self.modules.mark_estimate(key, txn_idx), None => self.data.mark_estimate(key, txn_idx), } } @@ -68,15 +69,15 @@ impl self.code.delete(key, txn_idx), + Some(_) => self.modules.delete(key, txn_idx), None => self.data.delete(key, txn_idx), }; } - /// Add a versioned write at a specified key, in code or data map according to the key. - pub fn write(&self, key: &K, version: Version, value: V) { + /// Add a versioned write at a specified key, in data or modules map according to the key. + pub fn write(&self, key: K, version: Version, value: V) { match key.module_path() { - Some(_) => self.code.write(key, version.0, value), + Some(_) => self.modules.write(key, version.0, value), None => self.data.write(key, version, value), } } @@ -85,7 +86,7 @@ impl anyhow::Result, MVCodeError> { - self.code.fetch_code(key, txn_idx) + ) -> anyhow::Result, MVModulesError> { + self.modules.fetch_module(key, txn_idx) } } @@ -144,6 +149,6 @@ impl { fn default() -> Self { - Self::new(None) + Self::new() } } diff --git a/aptos-move/mvhashmap/src/types.rs b/aptos-move/mvhashmap/src/types.rs index aa2fadb61deb7..628fe31bab22b 100644 --- a/aptos-move/mvhashmap/src/types.rs +++ b/aptos-move/mvhashmap/src/types.rs @@ -30,7 +30,7 @@ pub enum MVDataError { } #[derive(Debug, PartialEq, Eq)] -pub enum MVCodeError { +pub enum MVModulesError { /// No prior entry is found. NotFound, /// A dependency on other transaction has been found during the read. @@ -51,12 +51,12 @@ pub enum MVDataOutput { /// Returned as Ok(..) when read successfully from the multi-version data-structure. #[derive(Debug, PartialEq, Eq)] -pub enum MVCodeOutput { +pub enum MVModulesOutput { /// Arc to the executable corresponding to the latest module, and a descriptor /// with either the module hash or indicator that the module is from storage. Executable((Arc, ExecutableDescriptor)), /// Arc to the latest module, together with its (cryptographic) hash. Note that - /// this can't be a storage-level module, as it's from multi-versioned code map. + /// this can't be a storage-level module, as it's from multi-versioned modules map. /// The Option can be None if HashValue can't be computed, currently may happen /// if the latest entry corresponded to the module deletion. Module((Arc, HashValue)), diff --git a/aptos-move/mvhashmap/src/unit_tests/mod.rs b/aptos-move/mvhashmap/src/unit_tests/mod.rs index f999ad9fd9fa9..20e132b9a04a3 100644 --- a/aptos-move/mvhashmap/src/unit_tests/mod.rs +++ b/aptos-move/mvhashmap/src/unit_tests/mod.rs @@ -4,6 +4,7 @@ use super::{ types::{Incarnation, MVDataError, MVDataOutput, TxnIndex}, + unsync_map::UnsyncMap, *, }; use aptos_aggregator::{ @@ -15,7 +16,7 @@ use aptos_types::{ executable::{ExecutableTestType, ModulePath}, state_store::state_value::StateValue, }; -use claims::{assert_err_eq, assert_ok_eq}; +use claims::{assert_err_eq, assert_none, assert_ok_eq, assert_some_eq}; use std::sync::Arc; mod proptest_types; @@ -80,6 +81,22 @@ impl ModulePath for KeyType { } } +#[test] +fn unsync_map_data_basic() { + let map: UnsyncMap>, Value, ExecutableTestType> = UnsyncMap::new(); + + let ap = KeyType(b"/foo/b".to_vec()); + + // Reads that should go the DB return None + assert_none!(map.fetch_data(&ap)); + // Ensure write registers the new value. + map.write(ap.clone(), value_for(10, 1)); + assert_some_eq!(map.fetch_data(&ap), arc_value_for(10, 1)); + // Ensure the next write overwrites the value. + map.write(ap.clone(), value_for(14, 1)); + assert_some_eq!(map.fetch_data(&ap), arc_value_for(14, 1)); +} + #[test] fn create_write_read_placeholder_struct() { use MVDataError::*; @@ -89,14 +106,14 @@ fn create_write_read_placeholder_struct() { let ap2 = KeyType(b"/foo/c".to_vec()); let ap3 = KeyType(b"/foo/d".to_vec()); - let mvtbl: MVHashMap>, Value, ExecutableTestType> = MVHashMap::new(None); + let mvtbl: MVHashMap>, Value, ExecutableTestType> = MVHashMap::new(); // Reads that should go the DB return Err(NotFound) let r_db = mvtbl.fetch_data(&ap1, 5); assert_eq!(Err(NotFound), r_db); // Write by txn 10. - mvtbl.write(&ap1, (10, 1), value_for(10, 1)); + mvtbl.write(ap1.clone(), (10, 1), value_for(10, 1)); // Reads that should go the DB return Err(NotFound) let r_db = mvtbl.fetch_data(&ap1, 9); @@ -110,17 +127,17 @@ fn create_write_read_placeholder_struct() { assert_eq!(Ok(Versioned((10, 1), arc_value_for(10, 1))), r_10); // More deltas. - mvtbl.add_delta(&ap1, 11, delta_add(11, u128::MAX)); - mvtbl.add_delta(&ap1, 12, delta_add(12, u128::MAX)); - mvtbl.add_delta(&ap1, 13, delta_sub(74, u128::MAX)); + mvtbl.add_delta(ap1.clone(), 11, delta_add(11, u128::MAX)); + mvtbl.add_delta(ap1.clone(), 12, delta_add(12, u128::MAX)); + mvtbl.add_delta(ap1.clone(), 13, delta_sub(74, u128::MAX)); // Reads have to go traverse deltas until a write is found. let r_sum = mvtbl.fetch_data(&ap1, 14); assert_eq!(Ok(Resolved(u128_for(10, 1) + 11 + 12 - (61 + 13))), r_sum); // More writes. - mvtbl.write(&ap1, (12, 0), value_for(12, 0)); - mvtbl.write(&ap1, (8, 3), value_for(8, 3)); + mvtbl.write(ap1.clone(), (12, 0), value_for(12, 0)); + mvtbl.write(ap1.clone(), (8, 3), value_for(8, 3)); // Verify reads. let r_12 = mvtbl.fetch_data(&ap1, 15); @@ -143,15 +160,15 @@ fn create_write_read_placeholder_struct() { // Delete the entry written by 10, write to a different ap. mvtbl.delete(&ap1, 10); - mvtbl.write(&ap2, (10, 2), value_for(10, 2)); + mvtbl.write(ap2.clone(), (10, 2), value_for(10, 2)); // Read by txn 11 no longer observes entry from txn 10. let r_8 = mvtbl.fetch_data(&ap1, 11); assert_eq!(Ok(Versioned((8, 3), arc_value_for(8, 3))), r_8); // Reads, writes for ap2 and ap3. - mvtbl.write(&ap2, (5, 0), value_for(5, 0)); - mvtbl.write(&ap3, (20, 4), value_for(20, 4)); + mvtbl.write(ap2.clone(), (5, 0), value_for(5, 0)); + mvtbl.write(ap3.clone(), (20, 4), value_for(20, 4)); let r_5 = mvtbl.fetch_data(&ap2, 10); assert_eq!(Ok(Versioned((5, 0), arc_value_for(5, 0))), r_5); let r_20 = mvtbl.fetch_data(&ap3, 21); @@ -175,16 +192,16 @@ fn create_write_read_placeholder_struct() { assert_eq!(Ok(Versioned((10, 2), arc_value_for(10, 2))), r_10); // Both delta-write and delta-delta application failures are detected. - mvtbl.add_delta(&ap1, 30, delta_add(30, 32)); - mvtbl.add_delta(&ap1, 31, delta_add(31, 32)); + mvtbl.add_delta(ap1.clone(), 30, delta_add(30, 32)); + mvtbl.add_delta(ap1.clone(), 31, delta_add(31, 32)); let r_33 = mvtbl.fetch_data(&ap1, 33); assert_eq!(Err(DeltaApplicationFailure), r_33); let val = value_for(10, 3); // sub base sub_for for which should underflow. let sub_base = AggregatorValue::from_write(&val).unwrap().into(); - mvtbl.write(&ap2, (10, 3), val); - mvtbl.add_delta(&ap2, 30, delta_sub(30 + sub_base, u128::MAX)); + mvtbl.write(ap2.clone(), (10, 3), val); + mvtbl.add_delta(ap2.clone(), 30, delta_sub(30 + sub_base, u128::MAX)); let r_31 = mvtbl.fetch_data(&ap2, 31); assert_eq!(Err(DeltaApplicationFailure), r_31); } @@ -197,9 +214,9 @@ fn materialize_delta_shortcut() { let ap = KeyType(b"/foo/b".to_vec()); let limit = 10000; - vd.add_delta(&ap, 5, delta_add(10, limit)); - vd.add_delta(&ap, 8, delta_add(20, limit)); - vd.add_delta(&ap, 11, delta_add(30, limit)); + vd.add_delta(ap.clone(), 5, delta_add(10, limit)); + vd.add_delta(ap.clone(), 8, delta_add(20, limit)); + vd.add_delta(ap.clone(), 11, delta_add(30, limit)); match_unresolved(vd.fetch_data(&ap, 10), DeltaUpdate::Plus(30)); assert_err_eq!( @@ -216,11 +233,11 @@ fn materialize_delta_shortcut() { // Make sure shortcut is committed by adding a delta at a lower txn idx // and ensuring tha fetch_data output no longer changes. - vd.add_delta(&ap, 6, delta_add(15, limit)); + vd.add_delta(ap.clone(), 6, delta_add(15, limit)); assert_eq!(vd.fetch_data(&ap, 10), Ok(Resolved(35))); // However, if we add a delta at txn_idx = 9, it should have an effect. - vd.add_delta(&ap, 9, delta_add(15, limit)); + vd.add_delta(ap.clone(), 9, delta_add(15, limit)); assert_eq!(vd.fetch_data(&ap, 10), Ok(Resolved(50))); } @@ -261,7 +278,7 @@ fn commit_without_entry() { let vd: VersionedData>, Value> = VersionedData::new(); let ap = KeyType(b"/foo/b".to_vec()); - vd.add_delta(&ap, 8, delta_add(20, 1000)); + vd.add_delta(ap.clone(), 8, delta_add(20, 1000)); vd.set_aggregator_base_value(&ap, 10); // Must panic as there is no delta at provided index. diff --git a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs index 2214a0246dec7..5a30f2f73cbf2 100644 --- a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs +++ b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs @@ -190,7 +190,7 @@ where let baseline = Baseline::new(transactions.as_slice()); // Only testing data, provide executable type (). - let map = MVHashMap::, Value, ExecutableTestType>::new(None); + let map = MVHashMap::, Value, ExecutableTestType>::new(); // make ESTIMATE placeholders for all versions to be updated. // allows to test that correct values appear at the end of concurrent execution. @@ -205,7 +205,7 @@ where }) .collect::>(); for (key, idx) in versions_to_write { - map.write(&KeyType(key.clone()), (idx as TxnIndex, 0), Value(None)); + map.write(KeyType(key.clone()), (idx as TxnIndex, 0), Value(None)); map.mark_estimate(&KeyType(key), idx as TxnIndex); } @@ -283,17 +283,17 @@ where } }, Operator::Remove => { - map.write(&KeyType(key.clone()), (idx as TxnIndex, 1), Value(None)); + map.write(KeyType(key.clone()), (idx as TxnIndex, 1), Value(None)); }, Operator::Insert(v) => { map.write( - &KeyType(key.clone()), + KeyType(key.clone()), (idx as TxnIndex, 1), Value(Some(v.clone())), ); }, Operator::Update(delta) => { - map.add_delta(&KeyType(key.clone()), idx as TxnIndex, *delta) + map.add_delta(KeyType(key.clone()), idx as TxnIndex, *delta) }, } }) @@ -302,6 +302,8 @@ where Ok(()) } +// TODO: proptest MVHashMap delete and dependency handling! + proptest! { #[test] fn single_key_proptest( diff --git a/aptos-move/mvhashmap/src/unsync_map.rs b/aptos-move/mvhashmap/src/unsync_map.rs new file mode 100644 index 0000000000000..9c9d32e0c9436 --- /dev/null +++ b/aptos-move/mvhashmap/src/unsync_map.rs @@ -0,0 +1,90 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{types::MVModulesOutput, utils::module_hash}; +use aptos_crypto::hash::HashValue; +use aptos_types::{ + executable::{Executable, ExecutableDescriptor, ModulePath}, + write_set::TransactionWrite, +}; +use std::{cell::RefCell, collections::HashMap, hash::Hash, sync::Arc}; + +/// UnsyncMap is designed to mimic the functionality of MVHashMap for sequential execution. +/// In this case only the latest recorded version is relevant, simplifying the implementation. +/// The functionality also includes Executable caching based on the hash of ExecutableDescriptor +/// (i.e. module hash for modules published during the latest block - not at storage version). +pub struct UnsyncMap { + // Only use Arc to provide unified interfaces with the MVHashMap / concurrent setting. This + // simplifies the trait-based integration for executable caching. TODO: better representation. + // Optional hash can store the hash of the module to avoid re-computations. + map: RefCell, Option)>>, + executable_cache: RefCell>>, + executable_bytes: RefCell, +} + +impl Default + for UnsyncMap +{ + fn default() -> Self { + Self { + map: RefCell::new(HashMap::new()), + executable_cache: RefCell::new(HashMap::new()), + executable_bytes: RefCell::new(0), + } + } +} + +impl UnsyncMap { + pub fn new() -> Self { + Self { + map: RefCell::new(HashMap::new()), + executable_cache: RefCell::new(HashMap::new()), + executable_bytes: RefCell::new(0), + } + } + + pub fn fetch_data(&self, key: &K) -> Option> { + self.map.borrow().get(key).map(|entry| entry.0.clone()) + } + + pub fn fetch_module(&self, key: &K) -> Option> { + use MVModulesOutput::*; + debug_assert!(key.module_path().is_some()); + + self.map.borrow_mut().get_mut(key).map(|entry| { + let hash = entry.1.get_or_insert(module_hash(entry.0.as_ref())); + + self.executable_cache.borrow().get(hash).map_or_else( + || Module((entry.0.clone(), *hash)), + |x| Executable((x.clone(), ExecutableDescriptor::Published(*hash))), + ) + }) + } + + pub fn write(&self, key: K, value: V) { + self.map.borrow_mut().insert(key, (Arc::new(value), None)); + } + + /// We return false if the executable was already stored, as this isn't supposed to happen + /// during sequential execution (and the caller may choose to e.g. log a message). + /// Versioned modules storage does not cache executables at storage version, hence directly + /// the descriptor hash in ExecutableDescriptor::Published is provided. + pub fn store_executable(&self, descriptor_hash: HashValue, executable: X) -> bool { + let size = executable.size_bytes(); + if self + .executable_cache + .borrow_mut() + .insert(descriptor_hash, Arc::new(executable)) + .is_some() + { + *self.executable_bytes.borrow_mut() += size; + true + } else { + false + } + } + + pub fn executable_size(&self) -> usize { + *self.executable_bytes.borrow() + } +} diff --git a/aptos-move/mvhashmap/src/utils.rs b/aptos-move/mvhashmap/src/utils.rs new file mode 100644 index 0000000000000..7b65ba6be9afa --- /dev/null +++ b/aptos-move/mvhashmap/src/utils.rs @@ -0,0 +1,16 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_crypto::hash::{DefaultHasher, HashValue}; +use aptos_types::write_set::TransactionWrite; + +pub(crate) fn module_hash(module: &V) -> HashValue { + module + .extract_raw_bytes() + .map(|bytes| { + let mut hasher = DefaultHasher::new(b"Module"); + hasher.update(&bytes); + hasher.finish() + }) + .expect("Module can't be deleted") +} diff --git a/aptos-move/mvhashmap/src/versioned_data.rs b/aptos-move/mvhashmap/src/versioned_data.rs index 3c4ba6586de07..440005dabc751 100644 --- a/aptos-move/mvhashmap/src/versioned_data.rs +++ b/aptos-move/mvhashmap/src/versioned_data.rs @@ -217,8 +217,8 @@ impl VersionedData { assert_eq!(*v.aggregator_base_value.get_or_insert(value), value); } - pub(crate) fn add_delta(&self, key: &K, txn_idx: TxnIndex, delta: DeltaOp) { - let mut v = self.values.entry(key.clone()).or_default(); + pub(crate) fn add_delta(&self, key: K, txn_idx: TxnIndex, delta: DeltaOp) { + let mut v = self.values.entry(key).or_default(); v.versioned_map .insert(txn_idx, CachePadded::new(Entry::new_delta_from(delta))); } @@ -251,10 +251,10 @@ impl VersionedData { .unwrap_or(Err(MVDataError::NotFound)) } - pub(crate) fn write(&self, key: &K, version: Version, data: V) { + pub(crate) fn write(&self, key: K, version: Version, data: V) { let (txn_idx, incarnation) = version; - let mut v = self.values.entry(key.clone()).or_default(); + let mut v = self.values.entry(key).or_default(); let prev_entry = v.versioned_map.insert( txn_idx, CachePadded::new(Entry::new_write_from(incarnation, data)), diff --git a/aptos-move/mvhashmap/src/versioned_code.rs b/aptos-move/mvhashmap/src/versioned_modules.rs similarity index 64% rename from aptos-move/mvhashmap/src/versioned_code.rs rename to aptos-move/mvhashmap/src/versioned_modules.rs index cc799d571bca8..8e88889363215 100644 --- a/aptos-move/mvhashmap/src/versioned_code.rs +++ b/aptos-move/mvhashmap/src/versioned_modules.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::types::{Flag, MVCodeError, MVCodeOutput, TxnIndex}; +use crate::types::{Flag, MVModulesError, MVModulesOutput, TxnIndex}; use aptos_crypto::hash::{DefaultHasher, HashValue}; use aptos_types::{ executable::{Executable, ExecutableDescriptor}, @@ -35,14 +35,12 @@ struct Entry { struct VersionedValue { versioned_map: BTreeMap>>, - /// Executable based on the storage version of the module. - base_executable: Option>, /// Executables corresponding to published versions of the module, based on hash. executables: HashMap>, } /// Maps each key (access path) to an internal VersionedValue. -pub struct VersionedCode { +pub struct VersionedModules { values: DashMap>, } @@ -77,23 +75,21 @@ impl VersionedValue { pub fn new() -> Self { Self { versioned_map: BTreeMap::new(), - base_executable: None, executables: HashMap::new(), } } - fn read(&self, txn_idx: TxnIndex) -> anyhow::Result<(Arc, HashValue), MVCodeError> { - use MVCodeError::*; + fn read(&self, txn_idx: TxnIndex) -> anyhow::Result<(Arc, HashValue), MVModulesError> { + match self.versioned_map.range(0..txn_idx).next_back() { + Some((idx, entry)) => { + if entry.flag() == Flag::Estimate { + // Found a dependency. + return Err(MVModulesError::Dependency(*idx)); + } - if let Some((idx, entry)) = self.versioned_map.range(0..txn_idx).next_back() { - if entry.flag() == Flag::Estimate { - // Found a dependency. - return Err(Dependency(*idx)); - } - - Ok((entry.module.clone(), entry.hash)) - } else { - Err(NotFound) + Ok((entry.module.clone(), entry.hash)) + }, + None => Err(MVModulesError::NotFound), } } } @@ -104,7 +100,7 @@ impl Default for VersionedValue { } } -impl VersionedCode { +impl VersionedModules { pub(crate) fn new() -> Self { Self { values: DashMap::new(), @@ -119,52 +115,34 @@ impl VersionedCode { - let mut v = self.values.get_mut(key).expect("Path must exist"); - v.executables.entry(hash).or_insert(x); - }, - ExecutableDescriptor::Storage => { - let mut v = self.values.entry(key.clone()).or_default(); - v.base_executable.get_or_insert(x); - }, - }; + pub(crate) fn store_executable(&self, key: &K, descriptor_hash: HashValue, executable: X) { + let mut v = self.values.get_mut(key).expect("Path must exist"); + v.executables + .entry(descriptor_hash) + .or_insert_with(|| Arc::new(executable)); } - pub(crate) fn fetch_code( + pub(crate) fn fetch_module( &self, key: &K, txn_idx: TxnIndex, - ) -> anyhow::Result, MVCodeError> { - use MVCodeError::*; - use MVCodeOutput::*; + ) -> anyhow::Result, MVModulesError> { + use MVModulesError::*; + use MVModulesOutput::*; match self.values.get(key) { - Some(v) => match v.read(txn_idx) { - Ok((module, hash)) => Ok(match v.executables.get(&hash) { + Some(v) => v + .read(txn_idx) + .map(|(module, hash)| match v.executables.get(&hash) { Some(x) => Executable((x.clone(), ExecutableDescriptor::Published(hash))), None => Module((module, hash)), }), - Err(NotFound) => v - .base_executable - .as_ref() - .map(|x| Executable((x.clone(), ExecutableDescriptor::Storage))) - .ok_or(NotFound), - Err(Dependency(idx)) => Err(Dependency(idx)), - }, None => Err(NotFound), } } @@ -178,9 +156,3 @@ impl VersionedCode Default for VersionedCode { - fn default() -> Self { - VersionedCode::new() - } -} diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index 0d26a9ddf0417..95516deea6444 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -409,7 +409,6 @@ pub fn default_features() -> Vec { FeatureFlag::STRUCT_CONSTRUCTORS, FeatureFlag::CRYPTOGRAPHY_ALGEBRA_NATIVES, FeatureFlag::BLS12_381_STRUCTURES, - FeatureFlag::STORAGE_SLOT_METADATA, FeatureFlag::CHARGE_INVARIANT_VIOLATION, ] } diff --git a/aptos-node/Cargo.toml b/aptos-node/Cargo.toml index 5602a99daf5b0..d951d471de065 100644 --- a/aptos-node/Cargo.toml +++ b/aptos-node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "aptos-node" description = "Aptos node" -version = "1.4.0" +version = "1.5.0" # Workspace inherited keys authors = { workspace = true } @@ -68,7 +68,6 @@ rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -serde_merge = { workspace = true } serde_yaml = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } diff --git a/aptos-node/src/lib.rs b/aptos-node/src/lib.rs index 6f0be3b864787..c74bc4dfda6ab 100644 --- a/aptos-node/src/lib.rs +++ b/aptos-node/src/lib.rs @@ -18,7 +18,7 @@ mod tests; use anyhow::anyhow; use aptos_api::bootstrap as bootstrap_api; use aptos_build_info::build_information; -use aptos_config::config::{NodeConfig, PersistableConfig}; +use aptos_config::config::{merge_node_config, NodeConfig, PersistableConfig}; use aptos_framework::ReleaseBundle; use aptos_logger::{prelude::*, telemetry_log_writer::TelemetryLog, Level, LoggerFilterUpdater}; use aptos_state_sync_driver::driver_factory::StateSyncRuntimes; @@ -328,19 +328,6 @@ where start(config, Some(log_file), false) } -/// Merges node_config with the config override file -fn merge_test_config_override( - node_config: NodeConfig, - test_config_override: serde_yaml::Value, -) -> NodeConfig { - serde_merge::tmerge::( - node_config, - test_config_override, - ) - .map_err(|e| anyhow::anyhow!("Unable to merge default config with override. Error: {}", e)) - .unwrap() -} - /// Creates a single node test config, with a few config tweaks to reduce /// the overhead of running the node on a local machine. fn create_single_node_test_config( @@ -368,7 +355,7 @@ fn create_single_node_test_config( e ) })?; - merge_test_config_override(NodeConfig::get_default_validator_config(), values) + merge_node_config(NodeConfig::get_default_validator_config(), values)? }, None => NodeConfig::get_default_validator_config(), }; diff --git a/aptos-node/src/state_sync.rs b/aptos-node/src/state_sync.rs index e08c9b37ee7f4..d800e0c872e8f 100644 --- a/aptos-node/src/state_sync.rs +++ b/aptos-node/src/state_sync.rs @@ -21,7 +21,7 @@ use aptos_state_sync_driver::{ driver_factory::{DriverFactory, StateSyncRuntimes}, metadata_storage::PersistentMetadataStorage, }; -use aptos_storage_interface::DbReaderWriter; +use aptos_storage_interface::{DbReader, DbReaderWriter}; use aptos_storage_service_client::StorageServiceClient; use aptos_storage_service_server::{ network::StorageServiceNetworkEvents, storage::StorageReader, StorageServiceServer, @@ -99,7 +99,7 @@ pub fn start_state_sync_and_get_notification_handles( // Start the data client let (aptos_data_client, aptos_data_client_runtime) = - setup_aptos_data_client(node_config, network_client)?; + setup_aptos_data_client(node_config, network_client, db_rw.reader.clone())?; // Start the data streaming service let (streaming_service_client, streaming_service_runtime) = @@ -173,6 +173,7 @@ fn setup_data_streaming_service( fn setup_aptos_data_client( node_config: &NodeConfig, network_client: NetworkClient, + storage: Arc, ) -> anyhow::Result<(AptosDataClient, Runtime)> { // Create the storage service client let storage_service_client = StorageServiceClient::new(network_client); @@ -185,6 +186,7 @@ fn setup_aptos_data_client( node_config.state_sync.aptos_data_client, node_config.base.clone(), TimeService::real(), + storage, storage_service_client, Some(aptos_data_client_runtime.handle().clone()), ); diff --git a/aptos-node/src/storage.rs b/aptos-node/src/storage.rs index 5a71240ff61e7..84b44a7b3d82a 100644 --- a/aptos-node/src/storage.rs +++ b/aptos-node/src/storage.rs @@ -62,6 +62,7 @@ fn create_rocksdb_checkpoint_and_change_working_dir( AptosDB::create_checkpoint( &source_dir, &checkpoint_dir, + node_config.storage.rocksdb_configs.split_ledger_db, node_config .storage .rocksdb_configs diff --git a/config/Cargo.toml b/config/Cargo.toml index d6767b7a19b07..7d22af1d2aa20 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -31,6 +31,7 @@ mirai-annotations = { workspace = true } poem-openapi = { workspace = true } rand = { workspace = true } serde = { workspace = true } +serde_merge = { workspace = true } serde_yaml = { workspace = true } thiserror = { workspace = true } url = { workspace = true } diff --git a/config/src/config/api_config.rs b/config/src/config/api_config.rs index 2497ef00cc6e8..d2a77471fead2 100644 --- a/config/src/config/api_config.rs +++ b/config/src/config/api_config.rs @@ -74,14 +74,14 @@ pub struct ApiConfig { pub gas_estimation: GasEstimationConfig, } -pub const DEFAULT_ADDRESS: &str = "127.0.0.1"; -pub const DEFAULT_PORT: u16 = 8080; -pub const DEFAULT_REQUEST_CONTENT_LENGTH_LIMIT: u64 = 8 * 1024 * 1024; // 8 MB +const DEFAULT_ADDRESS: &str = "127.0.0.1"; +const DEFAULT_PORT: u16 = 8080; +const DEFAULT_REQUEST_CONTENT_LENGTH_LIMIT: u64 = 8 * 1024 * 1024; // 8 MB pub const DEFAULT_MAX_SUBMIT_TRANSACTION_BATCH_SIZE: usize = 10; pub const DEFAULT_MAX_PAGE_SIZE: u16 = 100; -pub const DEFAULT_MAX_ACCOUNT_RESOURCES_PAGE_SIZE: u16 = 9999; -pub const DEFAULT_MAX_ACCOUNT_MODULES_PAGE_SIZE: u16 = 9999; -pub const DEFAULT_MAX_VIEW_GAS: u64 = 2_000_000; // We keep this value the same as the max number of gas allowed for one single transaction defined in aptos-gas. +const DEFAULT_MAX_ACCOUNT_RESOURCES_PAGE_SIZE: u16 = 9999; +const DEFAULT_MAX_ACCOUNT_MODULES_PAGE_SIZE: u16 = 9999; +const DEFAULT_MAX_VIEW_GAS: u64 = 2_000_000; // We keep this value the same as the max number of gas allowed for one single transaction defined in aptos-gas. fn default_enabled() -> bool { true diff --git a/config/src/config/consensus_config.rs b/config/src/config/consensus_config.rs index bd72be00347fc..d240ab9f8d5bc 100644 --- a/config/src/config/consensus_config.rs +++ b/config/src/config/consensus_config.rs @@ -102,8 +102,8 @@ impl Default for ConsensusConfig { max_sending_block_bytes: 600 * 1024, // 600 KB max_sending_block_bytes_quorum_store_override: 5 * 1024 * 1024, // 5MB max_receiving_block_txns: 10000, - max_receiving_block_txns_quorum_store_override: 2 - * MAX_SENDING_BLOCK_TXNS_QUORUM_STORE_OVERRIDE, + max_receiving_block_txns_quorum_store_override: 10000 + .max(2 * MAX_SENDING_BLOCK_TXNS_QUORUM_STORE_OVERRIDE), max_receiving_block_bytes: 3 * 1024 * 1024, // 3MB max_receiving_block_bytes_quorum_store_override: 6 * 1024 * 1024, // 6MB max_pruned_blocks_in_mem: 100, diff --git a/config/src/config/indexer_grpc_config.rs b/config/src/config/indexer_grpc_config.rs index 019a5002ff0e0..79645d31c65c8 100644 --- a/config/src/config/indexer_grpc_config.rs +++ b/config/src/config/indexer_grpc_config.rs @@ -8,10 +8,10 @@ use aptos_types::chain_id::ChainId; use serde::{Deserialize, Serialize}; // Useful indexer defaults -pub const DEFAULT_ADDRESS: &str = "0.0.0.0:50051"; -pub const DEFAULT_OUTPUT_BATCH_SIZE: u16 = 100; -pub const DEFAULT_PROCESSOR_BATCH_SIZE: u16 = 1000; -pub const DEFAULT_PROCESSOR_TASK_COUNT: u16 = 20; +const DEFAULT_ADDRESS: &str = "0.0.0.0:50051"; +const DEFAULT_OUTPUT_BATCH_SIZE: u16 = 100; +const DEFAULT_PROCESSOR_BATCH_SIZE: u16 = 1000; +const DEFAULT_PROCESSOR_TASK_COUNT: u16 = 20; #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] diff --git a/config/src/config/node_config.rs b/config/src/config/node_config.rs index c9affc2d8d820..ebf1aba8e2ad5 100644 --- a/config/src/config/node_config.rs +++ b/config/src/config/node_config.rs @@ -228,9 +228,25 @@ fn parse_serialized_node_config(serialized_config: &str, caller: &'static str) - }) } +/// Merges node_config with a config config override +pub fn merge_node_config( + node_config: NodeConfig, + override_node_config: serde_yaml::Value, +) -> Result { + serde_merge::tmerge::( + node_config, + override_node_config, + ) + .map_err(|e| { + Error::Unexpected(format!( + "Unable to merge default config with override. Error: {}", + e + )) + }) +} #[cfg(test)] mod test { - use crate::config::{NodeConfig, SafetyRulesConfig}; + use crate::config::{merge_node_config, Error, NodeConfig, SafetyRulesConfig}; #[test] fn verify_config_defaults() { @@ -242,4 +258,32 @@ mod test { // Verify the safety rules config default SafetyRulesConfig::get_default_config(); } + + #[test] + fn verify_merge_node_config() { + let node_config = NodeConfig::get_default_pfn_config(); + let override_node_config = serde_yaml::from_str( + r#" + api: + enabled: false + "#, + ) + .unwrap(); + let merged_node_config = merge_node_config(node_config, override_node_config).unwrap(); + assert!(!merged_node_config.api.enabled); + } + + #[test] + fn verify_bad_merge_node_config() { + let node_config = NodeConfig::get_default_pfn_config(); + let override_node_config = serde_yaml::from_str( + r#" + blablafakenodeconfigkeyblabla: + enabled: false + "#, + ) + .unwrap(); + let merged_node_config = merge_node_config(node_config, override_node_config); + assert!(matches!(merged_node_config, Err(Error::Unexpected(_)))); + } } diff --git a/config/src/config/state_sync_config.rs b/config/src/config/state_sync_config.rs index 5264991f8eae3..d456ab975b17b 100644 --- a/config/src/config/state_sync_config.rs +++ b/config/src/config/state_sync_config.rs @@ -154,10 +154,10 @@ pub struct StorageServiceConfig { pub max_network_channel_size: u64, /// Maximum number of bytes to send per network message pub max_network_chunk_bytes: u64, + /// Maximum period (ms) of pending optimistic fetch requests + pub max_optimistic_fetch_period: u64, /// Maximum number of state keys and values per chunk pub max_state_chunk_size: u64, - /// Maximum period (ms) of pending subscription requests - pub max_subscription_period_ms: u64, /// Maximum number of transactions per chunk pub max_transaction_chunk_size: u64, /// Maximum number of transaction outputs per chunk @@ -179,8 +179,8 @@ impl Default for StorageServiceConfig { max_lru_cache_size: 500, // At ~0.6MiB per chunk, this should take no more than 0.5GiB max_network_channel_size: 4000, max_network_chunk_bytes: MAX_MESSAGE_SIZE as u64, + max_optimistic_fetch_period: 5000, // 5 seconds max_state_chunk_size: MAX_STATE_CHUNK_SIZE, - max_subscription_period_ms: 5000, // 5 seconds max_transaction_chunk_size: MAX_TRANSACTION_CHUNK_SIZE, max_transaction_output_chunk_size: MAX_TRANSACTION_OUTPUT_CHUNK_SIZE, min_time_to_ignore_peers_secs: 300, // 5 minutes @@ -236,6 +236,8 @@ impl Default for DataStreamingServiceConfig { #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct AptosDataClientConfig { + /// The interval (milliseconds) at which to refresh the latency monitor + pub latency_monitor_loop_interval_ms: u64, /// Maximum number of epoch ending ledger infos per chunk pub max_epoch_chunk_size: u64, /// Maximum number of in-flight polls for priority peers @@ -265,6 +267,7 @@ pub struct AptosDataClientConfig { impl Default for AptosDataClientConfig { fn default() -> Self { Self { + latency_monitor_loop_interval_ms: 50, // 50 milliseconds max_epoch_chunk_size: MAX_EPOCH_CHUNK_SIZE, max_num_in_flight_priority_polls: 10, max_num_in_flight_regular_polls: 10, @@ -322,11 +325,13 @@ impl ConfigOptimizer for StateSyncDriverConfig { let state_sync_driver_config = &mut node_config.state_sync.state_sync_driver; let local_driver_config_yaml = &local_config_yaml["state_sync"]["state_sync_driver"]; - // Default to fast sync for all testnet nodes because testnet is old - // enough that pruning has kicked in, and nodes will struggle to - // locate all the data since genesis. + // Default to fast sync for all testnet and mainnet nodes + // because pruning has kicked in, and nodes will struggle + // to locate all the data since genesis. let mut modified_config = false; - if chain_id.is_testnet() && local_driver_config_yaml["bootstrapping_mode"].is_null() { + if (chain_id.is_testnet() || chain_id.is_mainnet()) + && local_driver_config_yaml["bootstrapping_mode"].is_null() + { state_sync_driver_config.bootstrapping_mode = BootstrappingMode::DownloadLatestStates; modified_config = true; } @@ -371,7 +376,7 @@ mod tests { use super::*; #[test] - fn test_optimize_bootstrapping_mode_testnet_vfn() { + fn test_optimize_bootstrapping_mode_devnet_vfn() { // Create a node config with execution mode enabled let mut node_config = create_execution_mode_config(); @@ -380,15 +385,15 @@ mod tests { &mut node_config, &serde_yaml::from_str("{}").unwrap(), // An empty local config, NodeType::ValidatorFullnode, - ChainId::testnet(), + ChainId::new(40), // Not mainnet or testnet ) .unwrap(); assert!(modified_config); - // Verify that the bootstrapping mode is now set to fast sync + // Verify that the bootstrapping mode is not changed assert_eq!( node_config.state_sync.state_sync_driver.bootstrapping_mode, - BootstrappingMode::DownloadLatestStates + BootstrappingMode::ExecuteTransactionsFromGenesis ); } @@ -429,10 +434,10 @@ mod tests { .unwrap(); assert!(modified_config); - // Verify that the bootstrapping mode is still set to execution mode + // Verify that the bootstrapping mode is now set to fast sync assert_eq!( node_config.state_sync.state_sync_driver.bootstrapping_mode, - BootstrappingMode::ExecuteTransactionsFromGenesis + BootstrappingMode::DownloadLatestStates ); } diff --git a/config/src/config/storage_config.rs b/config/src/config/storage_config.rs index c9f29cd240ade..933fd1d940db4 100644 --- a/config/src/config/storage_config.rs +++ b/config/src/config/storage_config.rs @@ -65,9 +65,6 @@ pub struct RocksdbConfigs { pub ledger_db_config: RocksdbConfig, pub state_merkle_db_config: RocksdbConfig, // Note: Not ready for production use yet. - // TODO(grao): Deprecate this flag and use the split_ledger_db_to_individual_dbs below. - pub use_state_kv_db: bool, - // Note: Not ready for production use yet. pub use_sharded_state_merkle_db: bool, // Note: Not ready for production use yet. // TODO(grao): Add RocksdbConfig for individual DBs when necessary. @@ -81,7 +78,6 @@ impl Default for RocksdbConfigs { Self { ledger_db_config: RocksdbConfig::default(), state_merkle_db_config: RocksdbConfig::default(), - use_state_kv_db: false, use_sharded_state_merkle_db: false, split_ledger_db: false, state_kv_db_config: RocksdbConfig::default(), diff --git a/consensus/consensus-types/src/block_test.rs b/consensus/consensus-types/src/block_test.rs index df195c588036b..29544025f5e98 100644 --- a/consensus/consensus-types/src/block_test.rs +++ b/consensus/consensus-types/src/block_test.rs @@ -108,9 +108,6 @@ fn test_block_relation() { genesis_block.id() ); assert_eq!(next_block.payload(), Some(&payload)); - - let cloned_block = next_block.clone(); - assert_eq!(cloned_block.round(), next_block.round()); } // Ensure that blocks that extend from the same QuorumCertificate but with different signatures diff --git a/consensus/consensus-types/src/safety_data.rs b/consensus/consensus-types/src/safety_data.rs index ebae3e67c58c2..9e90f2870f68b 100644 --- a/consensus/consensus-types/src/safety_data.rs +++ b/consensus/consensus-types/src/safety_data.rs @@ -62,6 +62,6 @@ fn test_safety_data_upgrade() { preferred_round: 100, last_vote: None, }; - let value = serde_json::to_value(&old_data).unwrap(); + let value = serde_json::to_value(old_data).unwrap(); let _: SafetyData = serde_json::from_value(value).unwrap(); } diff --git a/consensus/src/dag/dag_store.rs b/consensus/src/dag/dag_store.rs new file mode 100644 index 0000000000000..e9a5cf0de067a --- /dev/null +++ b/consensus/src/dag/dag_store.rs @@ -0,0 +1,271 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{anyhow, ensure}; +use aptos_consensus_types::common::{Author, Payload, Round}; +use aptos_crypto::{ + hash::{CryptoHash, CryptoHasher}, + HashValue, +}; +use aptos_crypto_derive::CryptoHasher; +use aptos_types::{aggregate_signature::AggregateSignature, validator_verifier::ValidatorVerifier}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + ops::Deref, + sync::Arc, +}; + +/// Represents the metadata about the node, without payload and parents from Node +#[derive(Clone, Serialize, Deserialize)] +pub struct NodeMetadata { + epoch: u64, + round: Round, + author: Author, + timestamp: u64, + digest: HashValue, +} + +/// Node representation in the DAG, parents contain 2f+1 strong links (links to previous round) +/// plus weak links (links to lower round) +#[derive(Clone, Serialize, Deserialize, CryptoHasher)] +pub struct Node { + metadata: NodeMetadata, + payload: Payload, + parents: Vec, +} + +impl Node { + pub fn new( + epoch: u64, + round: Round, + author: Author, + timestamp: u64, + payload: Payload, + parents: Vec, + ) -> Self { + let digest = Self::calculate_digest(epoch, round, author, timestamp, &payload, &parents); + + Self { + metadata: NodeMetadata { + epoch, + round, + author, + timestamp, + digest, + }, + payload, + parents, + } + } + + /// Calculate the node digest based on all fields in the node + fn calculate_digest( + epoch: u64, + round: Round, + author: Author, + timestamp: u64, + payload: &Payload, + parents: &Vec, + ) -> HashValue { + #[derive(Serialize)] + struct NodeWithoutDigest<'a> { + epoch: u64, + round: Round, + author: Author, + timestamp: u64, + payload: &'a Payload, + parents: &'a Vec, + } + + impl<'a> CryptoHash for NodeWithoutDigest<'a> { + type Hasher = NodeHasher; + + fn hash(&self) -> HashValue { + let mut state = Self::Hasher::new(); + let bytes = bcs::to_bytes(&self).expect("Unable to serialize node"); + state.update(&bytes); + state.finish() + } + } + + let node_with_out_digest = NodeWithoutDigest { + epoch, + round, + author, + timestamp, + payload, + parents, + }; + node_with_out_digest.hash() + } + + pub fn digest(&self) -> HashValue { + self.metadata.digest + } + + pub fn metadata(&self) -> NodeMetadata { + self.metadata.clone() + } +} + +/// Quorum signatures over the node digest +#[derive(Clone)] +pub struct NodeCertificate { + digest: HashValue, + signatures: AggregateSignature, +} + +impl NodeCertificate { + pub fn new(digest: HashValue, signatures: AggregateSignature) -> Self { + Self { digest, signatures } + } +} + +#[derive(Clone)] +pub struct CertifiedNode { + node: Node, + certificate: NodeCertificate, +} + +impl CertifiedNode { + pub fn new(node: Node, certificate: NodeCertificate) -> Self { + Self { node, certificate } + } +} + +impl Deref for CertifiedNode { + type Target = Node; + + fn deref(&self) -> &Self::Target { + &self.node + } +} + +/// Data structure that stores the DAG representation, it maintains both hash based index and +/// round based index. +pub struct Dag { + nodes_by_digest: HashMap>, + nodes_by_round: BTreeMap>>>, + /// Map between peer id to vector index + author_to_index: HashMap, + /// Highest head nodes that are not linked by other nodes + highest_unlinked_nodes_by_author: Vec>>, +} + +impl Dag { + pub fn new(author_to_index: HashMap, initial_round: Round) -> Self { + let mut nodes_by_round = BTreeMap::new(); + let num_nodes = author_to_index.len(); + nodes_by_round.insert(initial_round, vec![None; num_nodes]); + Self { + nodes_by_digest: HashMap::new(), + nodes_by_round, + author_to_index, + highest_unlinked_nodes_by_author: vec![None; num_nodes], + } + } + + fn lowest_round(&self) -> Round { + *self + .nodes_by_round + .first_key_value() + .map(|(round, _)| round) + .unwrap_or(&0) + } + + fn highest_round(&self) -> Round { + *self + .nodes_by_round + .last_key_value() + .map(|(round, _)| round) + .unwrap_or(&0) + } + + pub fn add_node(&mut self, node: CertifiedNode) -> anyhow::Result<()> { + let node = Arc::new(node); + let index = *self + .author_to_index + .get(&node.metadata.author) + .ok_or_else(|| anyhow!("unknown author"))?; + let round = node.metadata.round; + ensure!(round >= self.lowest_round(), "round too low"); + ensure!(round <= self.highest_round() + 1, "round too high"); + for parent in &node.parents { + ensure!(self.exists(&parent.digest), "parent not exist"); + } + ensure!( + self.nodes_by_digest + .insert(node.metadata.digest, node.clone()) + .is_none(), + "duplicate node" + ); + ensure!( + self.nodes_by_round + .entry(round) + .or_insert_with(|| vec![None; self.author_to_index.len()])[index] + .replace(node.clone()) + .is_none(), + "equivocate node" + ); + if round + > self.highest_unlinked_nodes_by_author[index] + .as_ref() + .map_or(0, |node| node.metadata.round) + { + self.highest_unlinked_nodes_by_author[index].replace(node); + } + Ok(()) + } + + pub fn exists(&self, digest: &HashValue) -> bool { + self.nodes_by_digest.contains_key(digest) + } + + pub fn get_node(&self, digest: &HashValue) -> Option> { + self.nodes_by_digest.get(digest).cloned() + } + + pub fn get_unlinked_nodes_for_new_round( + &self, + validator_verifier: &ValidatorVerifier, + ) -> Option> { + let current_round = self.highest_round(); + let strong_link_authors = + self.highest_unlinked_nodes_by_author + .iter() + .filter_map(|maybe_node| { + maybe_node.as_ref().and_then(|node| { + if node.metadata.round == current_round { + Some(&node.metadata.author) + } else { + None + } + }) + }); + if validator_verifier + .check_voting_power(strong_link_authors) + .is_ok() + { + Some( + self.highest_unlinked_nodes_by_author + .iter() + .filter_map(|maybe_node| maybe_node.as_ref().map(|node| node.metadata.clone())) + .collect(), + ) + } else { + None + } + } + + pub fn mark_nodes_linked(&mut self, node_metadata: &[NodeMetadata]) { + let digests: HashSet<_> = node_metadata.iter().map(|node| node.digest).collect(); + for maybe_node in &mut self.highest_unlinked_nodes_by_author { + if let Some(node) = maybe_node { + if digests.contains(&node.metadata.digest) { + *maybe_node = None; + } + } + } + } +} diff --git a/consensus/src/dag/mod.rs b/consensus/src/dag/mod.rs new file mode 100644 index 0000000000000..3108eeeaec6fc --- /dev/null +++ b/consensus/src/dag/mod.rs @@ -0,0 +1,9 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 +#![allow(dead_code)] + +mod dag_store; +mod reliable_broadcast; +#[cfg(test)] +mod tests; diff --git a/consensus/src/dag/reliable_broadcast.rs b/consensus/src/dag/reliable_broadcast.rs new file mode 100644 index 0000000000000..91f56d4cc4de4 --- /dev/null +++ b/consensus/src/dag/reliable_broadcast.rs @@ -0,0 +1,98 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::network_interface::ConsensusMsg; +use aptos_consensus_types::common::Author; +use async_trait::async_trait; +use futures::{stream::FuturesUnordered, StreamExt}; +use std::{future::Future, sync::Arc, time::Duration}; +use tokio::sync::oneshot; + +pub trait DAGMessage: Sized + Clone { + fn from_network_message(msg: ConsensusMsg) -> anyhow::Result; + + fn into_network_message(self) -> ConsensusMsg; +} + +pub trait BroadcastStatus { + type Message: DAGMessage; + type Ack: DAGMessage; + type Aggregated; + + fn empty(validators: Vec) -> Self; + + fn add(&mut self, peer: Author, ack: Self::Ack) -> anyhow::Result>; +} + +#[async_trait] +pub trait DAGNetworkSender: Send + Sync { + async fn send_rpc( + &self, + receiver: Author, + message: ConsensusMsg, + timeout: Duration, + ) -> anyhow::Result; +} + +pub struct ReliableBroadcast { + validators: Vec, + network_sender: Arc, +} + +impl ReliableBroadcast { + pub fn new(validators: Vec, network_sender: Arc) -> Self { + Self { + validators, + network_sender, + } + } + + pub fn broadcast( + &self, + message: S::Message, + return_tx: oneshot::Sender, + mut cancel_rx: oneshot::Receiver<()>, + ) -> impl Future { + let receivers: Vec<_> = self.validators.clone(); + let network_message = message.into_network_message(); + let network_sender = self.network_sender.clone(); + async move { + let mut aggregating = S::empty(receivers.clone()); + let mut fut = FuturesUnordered::new(); + let send_message = |receiver, message| { + let network_sender = network_sender.clone(); + async move { + ( + receiver, + network_sender + .send_rpc(receiver, message, Duration::from_millis(500)) + .await, + ) + } + }; + for receiver in receivers { + fut.push(send_message(receiver, network_message.clone())); + } + loop { + tokio::select! { + Some((receiver, result)) = fut.next() => { + match result { + Ok(msg) => { + if let Ok(ack) = S::Ack::from_network_message(msg) { + if let Ok(Some(aggregated)) = aggregating.add(receiver, ack) { + let _ = return_tx.send(aggregated); + return; + } + } + }, + Err(_) => fut.push(send_message(receiver, network_message.clone())), + } + } + _ = &mut cancel_rx => { + return; + } + } + } + } + } +} diff --git a/consensus/src/dag/tests/dag_test.rs b/consensus/src/dag/tests/dag_test.rs new file mode 100644 index 0000000000000..e03b45f9449c4 --- /dev/null +++ b/consensus/src/dag/tests/dag_test.rs @@ -0,0 +1,97 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::dag::dag_store::{CertifiedNode, Dag, Node, NodeCertificate, NodeMetadata}; +use aptos_consensus_types::common::{Author, Payload, Round}; +use aptos_types::{ + aggregate_signature::AggregateSignature, validator_verifier::random_validator_verifier, +}; + +#[test] +fn test_dag_insertion_succeed() { + let (signers, validator_verifier) = random_validator_verifier(4, None, false); + let author_to_index = validator_verifier.address_to_validator_index().clone(); + let mut dag = Dag::new(author_to_index, 0); + + // Round 1 - nodes 0, 1, 2 links to vec![] + for signer in &signers[0..3] { + let node = new_node(1, signer.author(), vec![]); + assert!(dag.add_node(node).is_ok()); + } + let parents = dag + .get_unlinked_nodes_for_new_round(&validator_verifier) + .unwrap(); + + // Round 2 nodes 0, 1, 2 links to 0, 1, 2 + for signer in &signers[0..3] { + let node = new_node(2, signer.author(), parents.clone()); + assert!(dag.add_node(node).is_ok()); + } + + let slow_node = new_node(1, signers[3].author(), vec![]); + assert!(dag.add_node(slow_node).is_ok()); + + // Round 3 nodes 1, 2 links to 0, 1, 2, 3 (weak) + let parents = dag + .get_unlinked_nodes_for_new_round(&validator_verifier) + .unwrap(); + assert_eq!(parents.len(), 4); + + dag.mark_nodes_linked(&parents); + assert!(dag + .get_unlinked_nodes_for_new_round(&validator_verifier) + .is_none()); + + for signer in &signers[1..3] { + let node = new_node(3, signer.author(), parents.clone()); + assert!(dag.add_node(node).is_ok()); + } + + // not enough strong links + assert!(dag + .get_unlinked_nodes_for_new_round(&validator_verifier) + .is_none()); +} + +#[test] +fn test_dag_insertion_failure() { + let (signers, validator_verifier) = random_validator_verifier(4, None, false); + let author_to_index = validator_verifier.address_to_validator_index().clone(); + let mut dag = Dag::new(author_to_index, 0); + + // Round 1 - nodes 0, 1, 2 links to vec![] + for signer in &signers[0..3] { + let node = new_node(1, signer.author(), vec![]); + assert!(dag.add_node(node.clone()).is_ok()); + // duplicate node + assert!(dag.add_node(node).is_err()); + } + + let missing_node = new_node(1, signers[3].author(), vec![]); + let mut parents = dag + .get_unlinked_nodes_for_new_round(&validator_verifier) + .unwrap(); + parents.push(missing_node.metadata()); + + let node = new_node(2, signers[0].author(), parents.clone()); + // parents not exist + assert!(dag.add_node(node).is_err()); + + let node = new_node(3, signers[0].author(), vec![]); + // round too high + assert!(dag.add_node(node).is_err()); + + let node = new_node(2, signers[0].author(), parents[0..3].to_vec()); + assert!(dag.add_node(node).is_ok()); + let node = new_node(2, signers[0].author(), vec![]); + assert!(dag.add_node(node).is_err()); +} + +fn new_node(round: Round, author: Author, parents: Vec) -> CertifiedNode { + let node = Node::new(1, round, author, 0, Payload::empty(false), parents); + let digest = node.digest(); + CertifiedNode::new( + node, + NodeCertificate::new(digest, AggregateSignature::empty()), + ) +} diff --git a/consensus/src/dag/tests/mod.rs b/consensus/src/dag/tests/mod.rs new file mode 100644 index 0000000000000..c5254238605a4 --- /dev/null +++ b/consensus/src/dag/tests/mod.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod dag_test; +mod reliable_broadcast_tests; diff --git a/consensus/src/dag/tests/reliable_broadcast_tests.rs b/consensus/src/dag/tests/reliable_broadcast_tests.rs new file mode 100644 index 0000000000000..a606a86579bd1 --- /dev/null +++ b/consensus/src/dag/tests/reliable_broadcast_tests.rs @@ -0,0 +1,153 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + dag::reliable_broadcast::{BroadcastStatus, DAGMessage, DAGNetworkSender, ReliableBroadcast}, + network_interface::ConsensusMsg, +}; +use anyhow::bail; +use aptos_consensus_types::common::Author; +use aptos_infallible::Mutex; +use aptos_types::validator_verifier::random_validator_verifier; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, + time::Duration, +}; +use tokio::sync::oneshot; + +#[derive(Serialize, Deserialize, Clone)] +struct TestMessage(Vec); + +impl DAGMessage for TestMessage { + fn from_network_message(msg: ConsensusMsg) -> anyhow::Result { + match msg { + ConsensusMsg::DAGTestMessage(payload) => Ok(Self(payload)), + _ => bail!("wrong message"), + } + } + + fn into_network_message(self) -> ConsensusMsg { + ConsensusMsg::DAGTestMessage(self.0) + } +} + +#[derive(Serialize, Deserialize, Clone)] +struct TestAck; + +impl DAGMessage for TestAck { + fn from_network_message(_: ConsensusMsg) -> anyhow::Result { + Ok(TestAck) + } + + fn into_network_message(self) -> ConsensusMsg { + ConsensusMsg::DAGTestMessage(vec![]) + } +} + +struct TestBroadcastStatus { + threshold: usize, + received: HashSet, +} + +impl BroadcastStatus for TestBroadcastStatus { + type Ack = TestAck; + type Aggregated = HashSet; + type Message = TestMessage; + + fn empty(receivers: Vec) -> Self { + Self { + threshold: receivers.len(), + received: HashSet::new(), + } + } + + fn add(&mut self, peer: Author, _ack: Self::Ack) -> anyhow::Result> { + self.received.insert(peer); + if self.received.len() == self.threshold { + Ok(Some(self.received.clone())) + } else { + Ok(None) + } + } +} + +struct TestDAGSender { + failures: Mutex>, + received: Mutex>, +} + +impl TestDAGSender { + fn new(failures: HashMap) -> Self { + Self { + failures: Mutex::new(failures), + received: Mutex::new(HashMap::new()), + } + } +} + +#[async_trait] +impl DAGNetworkSender for TestDAGSender { + async fn send_rpc( + &self, + receiver: Author, + message: ConsensusMsg, + _timeout: Duration, + ) -> anyhow::Result { + match self.failures.lock().entry(receiver) { + Entry::Occupied(mut entry) => { + let count = entry.get_mut(); + *count -= 1; + if *count == 0 { + entry.remove(); + } + bail!("simulated failure"); + }, + Entry::Vacant(_) => (), + }; + self.received + .lock() + .insert(receiver, TestMessage::from_network_message(message)?); + Ok(ConsensusMsg::DAGTestMessage(vec![])) + } +} + +#[tokio::test] +async fn test_reliable_broadcast() { + let (_, validator_verifier) = random_validator_verifier(5, None, false); + let validators = validator_verifier.get_ordered_account_addresses(); + let failures = HashMap::from([(validators[0], 1), (validators[2], 3)]); + let sender = Arc::new(TestDAGSender::new(failures)); + let rb = ReliableBroadcast::new(validators.clone(), sender); + let message = TestMessage(vec![1, 2, 3]); + let (tx, rx) = oneshot::channel(); + let (_cancel_tx, cancel_rx) = oneshot::channel(); + tokio::spawn(rb.broadcast::(message, tx, cancel_rx)); + assert_eq!(rx.await.unwrap(), validators.into_iter().collect()); +} + +#[tokio::test] +async fn test_reliable_broadcast_cancel() { + let (_, validator_verifier) = random_validator_verifier(5, None, false); + let validators = validator_verifier.get_ordered_account_addresses(); + let failures = HashMap::from([(validators[0], 1), (validators[2], 3)]); + let sender = Arc::new(TestDAGSender::new(failures)); + let rb = ReliableBroadcast::new(validators.clone(), sender); + let message = TestMessage(vec![1, 2, 3]); + + // explicit send cancel + let (tx, rx) = oneshot::channel(); + let (cancel_tx, cancel_rx) = oneshot::channel(); + cancel_tx.send(()).unwrap(); + tokio::spawn(rb.broadcast::(message.clone(), tx, cancel_rx)); + assert!(rx.await.is_err()); + + // implicit drop cancel + let (tx, rx) = oneshot::channel(); + let (cancel_tx, cancel_rx) = oneshot::channel(); + drop(cancel_tx); + tokio::spawn(rb.broadcast::(message, tx, cancel_rx)); + assert!(rx.await.is_err()); +} diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 88e735374cb09..51c0ccc45610d 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -17,6 +17,7 @@ extern crate core; mod block_storage; mod consensusdb; +mod dag; mod epoch_manager; mod error; mod experimental; diff --git a/consensus/src/liveness/leader_reputation_test.rs b/consensus/src/liveness/leader_reputation_test.rs index 42858e12f59d4..ad1f58ad3eacf 100644 --- a/consensus/src/liveness/leader_reputation_test.rs +++ b/consensus/src/liveness/leader_reputation_test.rs @@ -36,7 +36,7 @@ use std::{collections::HashMap, sync::Arc}; #[test] fn test_aggregation_bitmap_to_voters() { - let validators: Vec<_> = (0..4).into_iter().map(|_| Author::random()).collect(); + let validators: Vec<_> = (0..4).map(|_| Author::random()).collect(); let bitmap = vec![true, true, false, true]; if let Ok(voters) = NewBlockEventAggregation::bitvec_to_voters(&validators, &bitmap.into()) { @@ -51,7 +51,6 @@ fn test_aggregation_bitmap_to_voters() { #[test] fn test_aggregation_bitmap_to_voters_mismatched_lengths() { let validators: Vec<_> = (0..8) // size of 8 with one u8 in bitvec - .into_iter() .map(|_| Author::random()) .collect(); let bitmap_too_long = vec![true; 9]; // 2 bytes in bitvec @@ -66,7 +65,7 @@ fn test_aggregation_bitmap_to_voters_mismatched_lengths() { #[test] fn test_aggregation_indices_to_authors() { - let validators: Vec<_> = (0..4).into_iter().map(|_| Author::random()).collect(); + let validators: Vec<_> = (0..4).map(|_| Author::random()).collect(); let indices = vec![2u64, 2, 0, 3]; if let Ok(authors) = NewBlockEventAggregation::indices_to_validators(&validators, &indices) { @@ -81,7 +80,7 @@ fn test_aggregation_indices_to_authors() { #[test] fn test_aggregation_indices_to_authors_out_of_index() { - let validators: Vec<_> = (0..4).into_iter().map(|_| Author::random()).collect(); + let validators: Vec<_> = (0..4).map(|_| Author::random()).collect(); let indices = vec![0, 0, 4, 0]; assert!(NewBlockEventAggregation::indices_to_validators(&validators, &indices).is_err()); } @@ -95,8 +94,7 @@ struct Example1 { impl Example1 { fn new(window_size: usize) -> Self { - let mut sorted_validators: Vec = - (0..5).into_iter().map(|_| Author::random()).collect(); + let mut sorted_validators: Vec = (0..5).map(|_| Author::random()).collect(); sorted_validators.sort(); // same first 3 validators, different 4th validator (index 3). let mut validators0: Vec = sorted_validators[..3].to_vec(); diff --git a/consensus/src/network_interface.rs b/consensus/src/network_interface.rs index 68b38adb28db1..ec1b26695b27f 100644 --- a/consensus/src/network_interface.rs +++ b/consensus/src/network_interface.rs @@ -61,6 +61,8 @@ pub enum ConsensusMsg { SignedBatchInfo(Box), /// Quorum Store: Broadcast a certified proof of store (a digest that received 2f+1 votes). ProofOfStoreMsg(Box), + #[cfg(test)] + DAGTestMessage(Vec), } /// Network type for consensus @@ -83,6 +85,8 @@ impl ConsensusMsg { ConsensusMsg::BatchResponse(_) => "BatchResponse", ConsensusMsg::SignedBatchInfo(_) => "SignedBatchInfo", ConsensusMsg::ProofOfStoreMsg(_) => "ProofOfStoreMsg", + #[cfg(test)] + ConsensusMsg::DAGTestMessage(_) => "DAGTestMessage", } } } diff --git a/consensus/src/quorum_store/proof_coordinator.rs b/consensus/src/quorum_store/proof_coordinator.rs index a179cfb617c56..a601542be8579 100644 --- a/consensus/src/quorum_store/proof_coordinator.rs +++ b/consensus/src/quorum_store/proof_coordinator.rs @@ -116,13 +116,12 @@ impl IncrementalProofState { } self.completed = true; - let proof = match validator_verifier + match validator_verifier .aggregate_signatures(&PartialSignatures::new(self.aggregated_signature.clone())) { Ok(sig) => ProofOfStore::new(self.info.clone(), sig), Err(e) => unreachable!("Cannot aggregate signatures on digest err = {:?}", e), - }; - proof + } } } diff --git a/consensus/src/round_manager_test.rs b/consensus/src/round_manager_test.rs index 3f23d6506351b..7fccdface5e66 100644 --- a/consensus/src/round_manager_test.rs +++ b/consensus/src/round_manager_test.rs @@ -1305,6 +1305,7 @@ fn vote_resent_on_timeout() { } #[test] +#[ignore] // TODO: this test needs to be fixed! fn sync_on_partial_newer_sync_info() { let runtime = consensus_runtime(); let mut playground = NetworkPlayground::new(runtime.handle().clone()); @@ -1331,7 +1332,7 @@ fn sync_on_partial_newer_sync_info() { .unwrap(); // commit genesis and block 1 for i in 0..2 { - let _ = node.commit_next_ordered(&[i]); + node.commit_next_ordered(&[i]).await; } let vote_msg = node.next_vote().await; let vote_data = vote_msg.vote().vote_data(); diff --git a/consensus/src/state_computer.rs b/consensus/src/state_computer.rs index 5a17885d5138f..2f6bbdcdb1703 100644 --- a/consensus/src/state_computer.rs +++ b/consensus/src/state_computer.rs @@ -58,6 +58,7 @@ pub struct ExecutionProxy { write_mutex: AsyncMutex, payload_manager: Mutex>>, transaction_shuffler: Mutex>>, + maybe_block_gas_limit: Mutex>, transaction_deduper: Mutex>>, } @@ -92,6 +93,7 @@ impl ExecutionProxy { write_mutex: AsyncMutex::new(LogicalTime::new(0, 0)), payload_manager: Mutex::new(None), transaction_shuffler: Mutex::new(None), + maybe_block_gas_limit: Mutex::new(None), transaction_deduper: Mutex::new(None), } } @@ -127,7 +129,7 @@ impl StateComputer for ExecutionProxy { let deduped_txns = txn_deduper.dedup(txns); let shuffled_txns = txn_shuffler.shuffle(deduped_txns); - let block_gas_limit = self.executor.get_block_gas_limit(); + let block_gas_limit = *self.maybe_block_gas_limit.lock(); // TODO: figure out error handling for the prologue txn let executor = self.executor.clone(); @@ -141,7 +143,11 @@ impl StateComputer for ExecutionProxy { let compute_result = monitor!( "execute_block", tokio::task::spawn_blocking(move || { - executor.execute_block((block_id, transactions_to_execute), parent_block_id) + executor.execute_block( + (block_id, transactions_to_execute).into(), + parent_block_id, + block_gas_limit, + ) }) .await ) @@ -185,7 +191,7 @@ impl StateComputer for ExecutionProxy { let txn_deduper = self.transaction_deduper.lock().as_ref().unwrap().clone(); let txn_shuffler = self.transaction_shuffler.lock().as_ref().unwrap().clone(); - let block_gas_limit = self.executor.get_block_gas_limit(); + let block_gas_limit = *self.maybe_block_gas_limit.lock(); for block in blocks { block_ids.push(block.id()); @@ -295,7 +301,7 @@ impl StateComputer for ExecutionProxy { epoch_state: &EpochState, payload_manager: Arc, transaction_shuffler: Arc, - _block_gas_limit: Option, + block_gas_limit: Option, transaction_deduper: Arc, ) { *self.validators.lock() = epoch_state @@ -306,9 +312,7 @@ impl StateComputer for ExecutionProxy { self.transaction_shuffler .lock() .replace(transaction_shuffler); - // TODO: Temporarily disable initializing block gas limit and leave it as default None, - // until there is a better way to handle the possible panic when executor is initialized. - // self.executor.update_block_gas_limit(block_gas_limit); + *self.maybe_block_gas_limit.lock() = block_gas_limit; self.transaction_deduper.lock().replace(transaction_deduper); } @@ -329,6 +333,7 @@ async fn test_commit_sync_race() { use aptos_consensus_notifications::Error; use aptos_types::{ aggregate_signature::AggregateSignature, + block_executor::partitioner::ExecutableBlock, block_info::BlockInfo, ledger_info::LedgerInfo, on_chain_config::{TransactionDeduperType, TransactionShufflerType}, @@ -350,8 +355,9 @@ async fn test_commit_sync_race() { fn execute_block( &self, - _block: (HashValue, Vec), + _block: ExecutableBlock, _parent_block_id: HashValue, + _maybe_block_gas_limit: Option, ) -> Result { Ok(StateComputeResult::new_dummy()) } @@ -370,12 +376,6 @@ async fn test_commit_sync_race() { } fn finish(&self) {} - - fn get_block_gas_limit(&self) -> Option { - None - } - - fn update_block_gas_limit(&self, _block_gas_limit: Option) {} } #[async_trait::async_trait] diff --git a/consensus/src/txn_hash_and_authenticator_deduper.rs b/consensus/src/txn_hash_and_authenticator_deduper.rs index 38ec5aeef421b..1f3331065b081 100644 --- a/consensus/src/txn_hash_and_authenticator_deduper.rs +++ b/consensus/src/txn_hash_and_authenticator_deduper.rs @@ -282,7 +282,6 @@ mod tests { let sender = Account::new(); let txns: Vec<_> = (0..PERF_TXN_PER_BLOCK) - .into_iter() .map(|i| { empty_txn(sender.addr, i as u64, 100) .sign(&sender.privkey, sender.pubkey.clone()) @@ -324,7 +323,6 @@ mod tests { let sender = Account::new(); let receiver = Account::new(); let txns: Vec<_> = (0..PERF_TXN_PER_BLOCK) - .into_iter() .map(|i| { peer_to_peer_txn(sender.addr, receiver.addr, i as u64, 100) .sign(&sender.privkey, sender.pubkey.clone()) diff --git a/crates/aptos-crypto/src/multi_ed25519.rs b/crates/aptos-crypto/src/multi_ed25519.rs index 53bab7eec39cc..0a543dd1c5d07 100644 --- a/crates/aptos-crypto/src/multi_ed25519.rs +++ b/crates/aptos-crypto/src/multi_ed25519.rs @@ -284,7 +284,7 @@ impl PublicKey for MultiEd25519PublicKey { type PrivateKeyMaterial = MultiEd25519PrivateKey; } -#[allow(clippy::derive_hash_xor_eq)] +#[allow(clippy::derived_hash_with_manual_eq)] impl std::hash::Hash for MultiEd25519PublicKey { fn hash(&self, state: &mut H) { let encoded_pubkey = self.to_bytes(); @@ -457,7 +457,7 @@ impl Length for MultiEd25519Signature { } } -#[allow(clippy::derive_hash_xor_eq)] +#[allow(clippy::derived_hash_with_manual_eq)] impl std::hash::Hash for MultiEd25519Signature { fn hash(&self, state: &mut H) { let encoded_signature = self.to_bytes(); diff --git a/crates/aptos-crypto/src/unit_tests/ed25519_test.rs b/crates/aptos-crypto/src/unit_tests/ed25519_test.rs index 801501c928f07..335f2de2ed248 100644 --- a/crates/aptos-crypto/src/unit_tests/ed25519_test.rs +++ b/crates/aptos-crypto/src/unit_tests/ed25519_test.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::redundant_clone)] // Required to work around prop_assert_eq! limitations + use crate as aptos_crypto; use crate::{ ed25519::{ diff --git a/crates/aptos-faucet/cli/src/main.rs b/crates/aptos-faucet/cli/src/main.rs index f31fa05fbdbb2..b1baed5038dc0 100644 --- a/crates/aptos-faucet/cli/src/main.rs +++ b/crates/aptos-faucet/cli/src/main.rs @@ -74,7 +74,8 @@ impl FaucetCliArgs { Duration::from_secs(30), None, self.max_gas_amount, - 30, + 25, // transaction_expiration_secs + 30, // wait_for_outstanding_txns_secs true, ); diff --git a/crates/aptos-faucet/core/Cargo.toml b/crates/aptos-faucet/core/Cargo.toml index 54d663090e7e2..0653a1eacc56e 100644 --- a/crates/aptos-faucet/core/Cargo.toml +++ b/crates/aptos-faucet/core/Cargo.toml @@ -33,7 +33,7 @@ once_cell = { workspace = true } poem = { workspace = true } poem-openapi = { workspace = true } rand = { workspace = true } -redis = { workspace = true, features = ["aio", "tokio-comp", "connection-manager"], default-features = false } +redis = { workspace = true, features = ["aio", "tokio-comp", "connection-manager"] } reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/crates/aptos-faucet/core/src/funder/common.rs b/crates/aptos-faucet/core/src/funder/common.rs index 728ff1a246461..e54d83f0f1b25 100644 --- a/crates/aptos-faucet/core/src/funder/common.rs +++ b/crates/aptos-faucet/core/src/funder/common.rs @@ -140,6 +140,10 @@ pub struct TransactionSubmissionConfig { #[serde(default = "TransactionSubmissionConfig::default_transaction_expiration_secs")] pub transaction_expiration_secs: u64, + /// Amount of time we'll wait for the seqnum to catch up before resetting it. + #[serde(default = "TransactionSubmissionConfig::default_wait_for_outstanding_txns_secs")] + pub wait_for_outstanding_txns_secs: u64, + /// Whether to wait for the transaction before returning. #[serde(default)] pub wait_for_transactions: bool, @@ -152,6 +156,7 @@ impl TransactionSubmissionConfig { gas_unit_price_override: Option, max_gas_amount: u64, transaction_expiration_secs: u64, + wait_for_outstanding_txns_secs: u64, wait_for_transactions: bool, ) -> Self { Self { @@ -160,6 +165,7 @@ impl TransactionSubmissionConfig { gas_unit_price_override, max_gas_amount, transaction_expiration_secs, + wait_for_outstanding_txns_secs, wait_for_transactions, } } @@ -173,6 +179,10 @@ impl TransactionSubmissionConfig { } fn default_transaction_expiration_secs() -> u64 { + 25 + } + + fn default_wait_for_outstanding_txns_secs() -> u64 { 30 } @@ -198,6 +208,7 @@ pub async fn update_sequence_numbers( outstanding_requests: &RwLock>, receiver_address: AccountAddress, amount: u64, + wait_for_outstanding_txns_secs: u64, ) -> Result<(u64, Option), AptosTapError> { let (mut funder_seq, mut receiver_seq) = get_sequence_numbers(client, funder_account, receiver_address).await?; @@ -216,7 +227,7 @@ pub async fn update_sequence_numbers( let mut set_outstanding = false; // We shouldn't have too many outstanding txns - for _ in 0..60 { + for _ in 0..(wait_for_outstanding_txns_secs * 2) { if our_funder_seq < funder_seq + MAX_NUM_OUTSTANDING_TRANSACTIONS { // Enforce a stronger ordering of priorities based upon the MintParams that arrived // first. Then put the other folks to sleep to try again until the queue fills up. @@ -353,7 +364,7 @@ pub async fn submit_transaction( Ok(_) => { info!( hash = signed_transaction.clone().committed_hash().to_hex_literal(), - receiver_address = receiver_address, + address = receiver_address, event = event_on_success, ); Ok(signed_transaction) diff --git a/crates/aptos-faucet/core/src/funder/mint.rs b/crates/aptos-faucet/core/src/funder/mint.rs index 75ec630ac7f9c..1cfcaeb930487 100644 --- a/crates/aptos-faucet/core/src/funder/mint.rs +++ b/crates/aptos-faucet/core/src/funder/mint.rs @@ -76,6 +76,8 @@ impl MintFunderConfig { self.transaction_submission_config.max_gas_amount, self.transaction_submission_config .transaction_expiration_secs, + self.transaction_submission_config + .wait_for_outstanding_txns_secs, self.transaction_submission_config.wait_for_transactions, ); @@ -112,6 +114,9 @@ pub struct MintFunder { /// requests in the order they came in. outstanding_requests: RwLock>, + /// Amount of time we'll wait for the seqnum to catch up before resetting it. + wait_for_outstanding_txns_secs: u64, + /// If set, we won't return responses until the transaction is processed. wait_for_transactions: bool, } @@ -126,6 +131,7 @@ impl MintFunder { gas_unit_price_override: Option, max_gas_amount: u64, transaction_expiration_secs: u64, + wait_for_outstanding_txns_secs: u64, wait_for_transactions: bool, ) -> Self { let gas_unit_price_manager = @@ -140,6 +146,7 @@ impl MintFunder { gas_unit_price_manager, gas_unit_price_override, outstanding_requests: RwLock::new(vec![]), + wait_for_outstanding_txns_secs, wait_for_transactions, } } @@ -253,6 +260,7 @@ impl MintFunder { &self.outstanding_requests, receiver_address, amount, + self.wait_for_outstanding_txns_secs, ) .await?; diff --git a/crates/aptos-faucet/core/src/funder/transfer.rs b/crates/aptos-faucet/core/src/funder/transfer.rs index 54212b5fba430..d771c8c0e55fb 100644 --- a/crates/aptos-faucet/core/src/funder/transfer.rs +++ b/crates/aptos-faucet/core/src/funder/transfer.rs @@ -71,6 +71,8 @@ impl TransferFunderConfig { self.transaction_submission_config.max_gas_amount, self.transaction_submission_config .transaction_expiration_secs, + self.transaction_submission_config + .wait_for_outstanding_txns_secs, self.transaction_submission_config.wait_for_transactions, ); @@ -103,6 +105,9 @@ pub struct TransferFunder { /// requests in the order they came in. outstanding_requests: RwLock>, + /// Amount of time we'll wait for the seqnum to catch up before resetting it. + wait_for_outstanding_txns_secs: u64, + /// If set, we won't return responses until the transaction is processed. wait_for_transactions: bool, } @@ -118,6 +123,7 @@ impl TransferFunder { gas_unit_price_override: Option, max_gas_amount: u64, transaction_expiration_secs: u64, + wait_for_outstanding_txns_secs: u64, wait_for_transactions: bool, ) -> Self { let gas_unit_price_manager = @@ -134,6 +140,7 @@ impl TransferFunder { gas_unit_price_manager, gas_unit_price_override, outstanding_requests: RwLock::new(vec![]), + wait_for_outstanding_txns_secs, wait_for_transactions, } } @@ -246,6 +253,7 @@ impl FunderTrait for TransferFunder { &self.outstanding_requests, receiver_address, amount, + self.wait_for_outstanding_txns_secs, ) .await?; diff --git a/crates/aptos-faucet/core/src/server/run.rs b/crates/aptos-faucet/core/src/server/run.rs index ec29bee9043d9..87dcd4382b660 100644 --- a/crates/aptos-faucet/core/src/server/run.rs +++ b/crates/aptos-faucet/core/src/server/run.rs @@ -271,6 +271,7 @@ impl RunConfig { None, // gas_unit_price_override 500_000, // max_gas_amount 30, // transaction_expiration_secs + 35, // wait_for_outstanding_txns_secs false, // wait_for_transactions ), mint_account_address: Some(aptos_test_root_address()), diff --git a/crates/aptos-faucet/integration-tests/README.md b/crates/aptos-faucet/integration-tests/README.md index a289a75d6897a..9518c2627ae2a 100644 --- a/crates/aptos-faucet/integration-tests/README.md +++ b/crates/aptos-faucet/integration-tests/README.md @@ -27,11 +27,24 @@ poetry run python main.py -h For example: ``` -poetry run python main.py --base-network mainnet +poetry run python main.py --base-network testnet ``` +## Debugging +If you are get an error message similar to this: +``` +docker: no matching manifest for linux/arm64/v8 in the manifest list entries. +``` + +Try running the poetry command with this env var: +``` +DOCKER_DEFAULT_PLATFORM=linux/amd64 poetry run python main.py --base-network testnet +``` +This makes the docker commands use the x86_64 images since we don't publish images for ARM. + ## Formatting: ``` poetry run isort . poetry run black . ``` + diff --git a/crates/aptos-faucet/integration-tests/local_testnet.py b/crates/aptos-faucet/integration-tests/local_testnet.py index 9c45dfe5aa262..d030444970b1b 100644 --- a/crates/aptos-faucet/integration-tests/local_testnet.py +++ b/crates/aptos-faucet/integration-tests/local_testnet.py @@ -16,7 +16,7 @@ # stop running it later using the container name. For an explanation of these # arguments, see the argument parser in main.py. def run_node(network: Network, image_repo_with_project: str, external_test_dir: str): - image_name = build_image_name(image_repo_with_project, network) + image_name = build_image_name(image_repo_with_project, str(network)) container_name = f"local-testnet-{network}" internal_mount_path = "/mymount" LOG.info(f"Trying to run local testnet from image: {image_name}") @@ -45,6 +45,8 @@ def run_node(network: Network, image_repo_with_project: str, external_test_dir: [ "docker", "run", + "--pull", + "always", "--name", container_name, "--detach", diff --git a/crates/aptos-faucet/integration-tests/poetry.lock b/crates/aptos-faucet/integration-tests/poetry.lock index ca4af36423239..4587847cd3c62 100644 --- a/crates/aptos-faucet/integration-tests/poetry.lock +++ b/crates/aptos-faucet/integration-tests/poetry.lock @@ -1,3 +1,5 @@ +# This file is automatically @generated by Poetry and should not be changed by hand. + [[package]] name = "black" version = "22.12.0" @@ -5,6 +7,20 @@ description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] [package.dependencies] click = ">=8.0.0" @@ -28,6 +44,10 @@ description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] [[package]] name = "charset-normalizer" @@ -36,6 +56,83 @@ description = "The Real First Universal Charset Detector. Open, modern and activ category = "main" optional = false python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, +] [[package]] name = "click" @@ -44,6 +141,10 @@ description = "Composable command line interface toolkit" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -56,6 +157,10 @@ description = "Cross-platform colored terminal text." category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] [[package]] name = "idna" @@ -64,6 +169,10 @@ description = "Internationalized Domain Names in Applications (IDNA)" category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] [[package]] name = "importlib-metadata" @@ -72,6 +181,10 @@ description = "Read metadata from Python packages" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, + {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, +] [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} @@ -89,6 +202,10 @@ description = "A Python utility / library to sort Python imports." category = "dev" optional = false python-versions = ">=3.7.0" +files = [ + {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, + {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, +] [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] @@ -103,6 +220,10 @@ description = "Type system extensions for programs checked with the mypy type ch category = "dev" optional = false python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] [[package]] name = "pathspec" @@ -111,6 +232,10 @@ description = "Utility library for gitignore style pattern matching of file path category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, +] [[package]] name = "platformdirs" @@ -119,6 +244,10 @@ description = "A small Python package for determining appropriate platform-speci category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, + {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, +] [package.dependencies] typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""} @@ -129,21 +258,25 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "requests" -version = "2.28.2" +version = "2.31.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "tomli" @@ -152,6 +285,10 @@ description = "A lil' TOML parser" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] [[package]] name = "typed-ast" @@ -160,6 +297,32 @@ description = "a fork of Python 2 and 3 ast modules with type comment support" category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, + {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, + {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, + {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, + {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, + {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, + {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, + {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, + {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, +] [[package]] name = "typing-extensions" @@ -168,6 +331,10 @@ description = "Backported and Experimental Type Hints for Python 3.7+" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, + {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, +] [[package]] name = "urllib3" @@ -176,6 +343,10 @@ description = "HTTP library with thread-safe connection pooling, file post, and category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, + {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, +] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] @@ -189,187 +360,16 @@ description = "Backport of pathlib-compatible object wrapper for zip files" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-o", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [metadata] -lock-version = "1.1" +lock-version = "2.0" python-versions = ">=3.7 <4" -content-hash = "d59a1e6ca96b6ac970e24ab5bdcf04f606ef709a25b5ddeed17817a6e7288654" - -[metadata.files] -black = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, -] -certifi = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] -charset-normalizer = [ - {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, - {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -colorama = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] -idna = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] -importlib-metadata = [ - {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, - {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, -] -isort = [ - {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, - {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, -] -mypy-extensions = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] -pathspec = [ - {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, - {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, -] -platformdirs = [ - {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, - {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, -] -requests = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, -] -tomli = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] -typed-ast = [ - {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, - {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, - {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, - {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, - {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, - {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, - {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, - {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, - {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, - {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, - {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, - {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, - {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, - {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, - {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, - {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, - {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, - {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, - {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, - {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, - {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, - {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, - {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, - {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, -] -typing-extensions = [ - {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, - {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, -] -urllib3 = [ - {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, - {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, -] -zipp = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, -] +content-hash = "d8d532b40f150de92019a33b8abfa8290b7bdc07f544278c1216ec5fdf099941" diff --git a/crates/aptos-faucet/integration-tests/pyproject.toml b/crates/aptos-faucet/integration-tests/pyproject.toml index fb4e2575e9295..c45c7fc1a744f 100644 --- a/crates/aptos-faucet/integration-tests/pyproject.toml +++ b/crates/aptos-faucet/integration-tests/pyproject.toml @@ -7,7 +7,7 @@ license = "Apache-2.0" [tool.poetry.dependencies] python = ">=3.7 <4" -requests = "^2.28.2" +requests = "^2.31.0" [tool.poetry.dev-dependencies] black = "^22.6.0" diff --git a/crates/aptos-ledger/Cargo.toml b/crates/aptos-ledger/Cargo.toml index c5400c2a104ed..50032bcb9cc42 100644 --- a/crates/aptos-ledger/Cargo.toml +++ b/crates/aptos-ledger/Cargo.toml @@ -13,8 +13,10 @@ repository = "https://github.com/aptos-labs/aptos-core/tree/main/crates/aptos-le rust-version = { workspace = true } [dependencies] -hex = "0.4.3" +aptos-crypto = { workspace = true } +aptos-types = { workspace = true } +hex = { workspace = true } ledger-apdu = "0.10.0" ledger-transport-hid = "0.10.0" -once_cell = "1.10.0" -thiserror = "1.0.37" +once_cell = { workspace = true } +thiserror = { workspace = true } diff --git a/crates/aptos-ledger/src/lib.rs b/crates/aptos-ledger/src/lib.rs index 461cd6b489e4d..a2442044877d4 100644 --- a/crates/aptos-ledger/src/lib.rs +++ b/crates/aptos-ledger/src/lib.rs @@ -7,21 +7,26 @@ #![deny(missing_docs)] +pub use aptos_crypto::{ed25519::Ed25519PublicKey, ValidCryptoMaterialStringExt}; +pub use aptos_types::{ + account_address::AccountAddress, transaction::authenticator::AuthenticationKey, +}; use hex::encode; use ledger_apdu::APDUCommand; use ledger_transport_hid::{hidapi::HidApi, LedgerHIDError, TransportNativeHID}; -use once_cell::sync::Lazy; use std::{ + collections::HashMap, fmt, fmt::{Debug, Display}, + ops::Range, str, + string::ToString, }; use thiserror::Error; // A piece of data which tells a wallet how to derive a specific key within a tree of keys // 637 is the key for Aptos -// TODO: Add support for multiple index -const DERIVATIVE_PATH: &str = "m/44'/637'/0'/0'/0'"; +const DERIVATIVE_PATH: &str = "m/44'/637'/{index}'/0'/0'"; const CLA_APTOS: u8 = 0x5B; // Aptos CLA Instruction class const INS_GET_VERSION: u8 = 0x03; // Get version instruction code @@ -37,8 +42,6 @@ const P1_START: u8 = 0x00; const P2_MORE: u8 = 0x80; const P2_LAST: u8 = 0x00; -static SERIALIZED_BIP32: Lazy> = Lazy::new(|| serialize_bip32(DERIVATIVE_PATH)); - #[derive(Debug, Error)] /// Aptos Ledger Error pub enum AptosLedgerError { @@ -143,17 +146,96 @@ pub fn get_app_name() -> Result { } } -/// Returns the public key of your Aptos account in Ledger device +/// Returns the the batch/HashMap of the accounts for the account index in index_range +/// Note: We only allow a range of 10 for performance purpose +/// +/// # Arguments +/// +/// * `index_range` - start(inclusive) - end(exclusive) acounts, that you want to fetch, if None default to 0-10 +pub fn fetch_batch_accounts( + index_range: Option>, +) -> Result, AptosLedgerError> { + let range = if let Some(range) = index_range { + range + } else { + 0..10 + }; + + // Make sure the range is within 10 counts + if range.end - range.start > 10 { + return Err(AptosLedgerError::UnexpectedError( + "Unexpected Error: Make sure the range is less than or equal to 10".to_string(), + None, + )); + } + + // Open connection to ledger + let transport = open_ledger_transport()?; + + let mut accounts = HashMap::new(); + for i in range { + let path = DERIVATIVE_PATH.replace("{index}", &i.to_string()); + let cdata = serialize_bip32(&path); + + match transport.exchange(&APDUCommand { + cla: CLA_APTOS, + ins: INS_GET_PUB_KEY, + p1: P1_NON_CONFIRM, + p2: P2_LAST, + data: cdata, + }) { + Ok(response) => { + // Got the response from ledger after user has confirmed on the ledger wallet + if response.retcode() == APDU_CODE_SUCCESS { + // Extract the Public key from the response data + let mut offset = 0; + let response_buffer = response.data(); + let pub_key_len: usize = (response_buffer[offset] - 1).into(); + offset += 1; + + // Skipping weird 0x04 - because of how the Aptos Ledger parse works when return pub key + offset += 1; + + let pub_key_buffer = response_buffer[offset..offset + pub_key_len].to_vec(); + let hex_string = encode(pub_key_buffer); + let public_key = match Ed25519PublicKey::from_encoded_string(&hex_string) { + Ok(pk) => Ok(pk), + Err(err) => Err(AptosLedgerError::UnexpectedError( + err.to_string(), + Some(response.retcode()), + )), + }; + let account = account_address_from_public_key(&public_key?); + accounts.insert(path, account); + } else { + let error_string = response + .error_code() + .map(|error_code| error_code.to_string()) + .unwrap_or_else(|retcode| format!("Error with retcode: {:x}", retcode)); + return Err(AptosLedgerError::UnexpectedError( + error_string, + Option::from(response.retcode()), + )); + } + }, + Err(err) => return Err(AptosLedgerError::from(err)), + } + } + + Ok(accounts) +} + +/// Returns the public key of your Aptos account in Ledger device at index 0 /// /// # Arguments /// /// * `display` - If true, the public key will be displayed on the Ledger device, and confirmation is needed -pub fn get_public_key(display: bool) -> Result { +pub fn get_public_key(path: &str, display: bool) -> Result { // Open connection to ledger let transport = open_ledger_transport()?; // Serialize the derivative path - let cdata = SERIALIZED_BIP32.clone(); + let cdata = serialize_bip32(path); // APDU command's instruction parameter 1 or p1 let p1: u8 = match display { @@ -182,7 +264,10 @@ pub fn get_public_key(display: bool) -> Result { let pub_key_buffer = response_buffer[offset..offset + pub_key_len].to_vec(); let hex_string = encode(pub_key_buffer); - Ok(hex_string) + match Ed25519PublicKey::from_encoded_string(&hex_string) { + Ok(pk) => Ok(pk), + Err(err) => Err(AptosLedgerError::UnexpectedError(err.to_string(), None)), + } } else { let error_string = response .error_code() @@ -203,12 +288,12 @@ pub fn get_public_key(display: bool) -> Result { /// # Arguments /// /// * `raw_txn` - the serialized raw transaction that need to be signed -pub fn sign_txn(raw_txn: Vec) -> Result, AptosLedgerError> { +pub fn sign_txn(path: &str, raw_txn: Vec) -> Result, AptosLedgerError> { // open connection to ledger let transport = open_ledger_transport()?; // Serialize the derivative path - let derivative_path_bytes = SERIALIZED_BIP32.clone(); + let derivative_path_bytes = serialize_bip32(path); // Send the derivative path over as first message let sign_start = transport.exchange(&APDUCommand { @@ -307,3 +392,8 @@ fn open_ledger_transport() -> Result { Ok(transport) } + +fn account_address_from_public_key(public_key: &Ed25519PublicKey) -> AccountAddress { + let auth_key = AuthenticationKey::ed25519(public_key); + AccountAddress::new(*auth_key.derived_address()) +} diff --git a/crates/aptos-rest-client/src/client_builder.rs b/crates/aptos-rest-client/src/client_builder.rs new file mode 100644 index 0000000000000..e04228566e584 --- /dev/null +++ b/crates/aptos-rest-client/src/client_builder.rs @@ -0,0 +1,101 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + get_version_path_with_base, Client, DEFAULT_VERSION_PATH_BASE, X_APTOS_SDK_HEADER_VALUE, +}; +use anyhow::Result; +use aptos_api_types::X_APTOS_CLIENT; +use reqwest::{ + header::{HeaderMap, HeaderName, HeaderValue}, + Client as ReqwestClient, ClientBuilder as ReqwestClientBuilder, +}; +use std::{str::FromStr, time::Duration}; +use url::Url; + +pub enum AptosBaseUrl { + Mainnet, + Devnet, + Testnet, + Custom(Url), +} + +impl AptosBaseUrl { + pub fn to_url(&self) -> Url { + match self { + AptosBaseUrl::Mainnet => { + Url::from_str("https://fullnode.mainnet.aptoslabs.com").unwrap() + }, + AptosBaseUrl::Devnet => Url::from_str("https://fullnode.devnet.aptoslabs.com").unwrap(), + AptosBaseUrl::Testnet => { + Url::from_str("https://fullnode.testnet.aptoslabs.com").unwrap() + }, + AptosBaseUrl::Custom(url) => url.to_owned(), + } + } +} + +pub struct ClientBuilder { + reqwest_builder: ReqwestClientBuilder, + version_path_base: String, + base_url: Url, + timeout: Duration, + headers: HeaderMap, +} + +impl ClientBuilder { + pub fn new(aptos_base_url: AptosBaseUrl) -> Self { + let mut headers = HeaderMap::new(); + headers.insert( + X_APTOS_CLIENT, + HeaderValue::from_static(X_APTOS_SDK_HEADER_VALUE), + ); + + Self { + reqwest_builder: ReqwestClient::builder(), + base_url: aptos_base_url.to_url(), + version_path_base: DEFAULT_VERSION_PATH_BASE.to_string(), + timeout: Duration::from_secs(10), // Default to 10 seconds + headers, + } + } + + pub fn base_url(mut self, base_url: Url) -> Self { + self.base_url = base_url; + self + } + + pub fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + pub fn header(mut self, header_key: &str, header_val: &str) -> Result { + self.headers.insert( + HeaderName::from_str(header_key)?, + HeaderValue::from_str(header_val)?, + ); + Ok(self) + } + + pub fn version_path_base(mut self, version_path_base: String) -> Self { + self.version_path_base = version_path_base; + self + } + + pub fn build(self) -> Client { + let version_path_base = get_version_path_with_base(self.base_url.clone()); + + Client { + inner: self + .reqwest_builder + .default_headers(self.headers) + .timeout(self.timeout) + .cookie_store(true) + .build() + .unwrap(), + base_url: self.base_url, + version_path_base, + } + } +} diff --git a/crates/aptos-rest-client/src/lib.rs b/crates/aptos-rest-client/src/lib.rs index 790fad31c18ee..1a6f653636f47 100644 --- a/crates/aptos-rest-client/src/lib.rs +++ b/crates/aptos-rest-client/src/lib.rs @@ -10,9 +10,11 @@ pub mod faucet; pub use faucet::FaucetClient; pub mod response; pub use response::Response; +pub mod client_builder; pub mod state; pub mod types; +pub use crate::client_builder::{AptosBaseUrl, ClientBuilder}; use crate::{ aptos::{AptosVersion, Balance}, error::RestError, @@ -50,7 +52,6 @@ use tokio::time::Instant; pub use types::{deserialize_from_prefixed_hex_string, Account, Resource}; use url::Url; -pub const USER_AGENT: &str = concat!("aptos-client-sdk-rust / ", env!("CARGO_PKG_VERSION")); pub const DEFAULT_VERSION_PATH_BASE: &str = "v1/"; const DEFAULT_MAX_WAIT_MS: u64 = 60000; const DEFAULT_INTERVAL_MS: u64 = 1000; @@ -59,6 +60,7 @@ static DEFAULT_INTERVAL_DURATION: Duration = Duration::from_millis(DEFAULT_INTER const DEFAULT_MAX_SERVER_LAG_WAIT_DURATION: Duration = Duration::from_secs(60); const RESOURCES_PER_CALL_PAGINATION: u64 = 9999; const MODULES_PER_CALL_PAGINATION: u64 = 1000; +const X_APTOS_SDK_HEADER_VALUE: &str = concat!("aptos-rust-sdk/", env!("CARGO_PKG_VERSION")); type AptosResult = Result; @@ -70,45 +72,12 @@ pub struct Client { } impl Client { - pub fn new_with_timeout(base_url: Url, timeout: Duration) -> Self { - Client::new_with_timeout_and_user_agent(base_url, timeout, USER_AGENT) - } - - pub fn new_with_timeout_and_user_agent( - base_url: Url, - timeout: Duration, - user_agent: &str, - ) -> Self { - let inner = ReqwestClient::builder() - .timeout(timeout) - .user_agent(user_agent) - .cookie_store(true) - .build() - .unwrap(); - - // If the user provided no version in the path, use the default. If the - // provided version has no trailing slash, add it, otherwise url.join - // will ignore the version path base. - let version_path_base = match base_url.path() { - "/" => DEFAULT_VERSION_PATH_BASE.to_string(), - path => { - if !path.ends_with('/') { - format!("{}/", path) - } else { - path.to_string() - } - }, - }; - - Self { - inner, - base_url, - version_path_base, - } + pub fn builder(aptos_base_url: AptosBaseUrl) -> ClientBuilder { + ClientBuilder::new(aptos_base_url) } pub fn new(base_url: Url) -> Self { - Self::new_with_timeout(base_url, Duration::from_secs(10)) + Self::builder(AptosBaseUrl::Custom(base_url)).build() } pub fn path_prefix_string(&self) -> String { @@ -1583,6 +1552,22 @@ impl Client { } } +// If the user provided no version in the path, use the default. If the +// provided version has no trailing slash, add it, otherwise url.join +// will ignore the version path base. +pub fn get_version_path_with_base(base_url: Url) -> String { + match base_url.path() { + "/" => DEFAULT_VERSION_PATH_BASE.to_string(), + path => { + if !path.ends_with('/') { + format!("{}/", path) + } else { + path.to_string() + } + }, + } +} + pub fn retriable_with_404(status_code: StatusCode, aptos_error: Option) -> bool { retriable(status_code, aptos_error) | matches!(status_code, StatusCode::NOT_FOUND) } diff --git a/crates/aptos-rosetta/src/account.rs b/crates/aptos-rosetta/src/account.rs index 182a3062d7bc3..12d6d205994c5 100644 --- a/crates/aptos-rosetta/src/account.rs +++ b/crates/aptos-rosetta/src/account.rs @@ -94,6 +94,46 @@ async fn get_balances( maybe_filter_currencies: Option>, ) -> ApiResult<(u64, Option>, Vec, u64)> { let owner_address = account.account_address()?; + let pool_address = account.pool_address()?; + + let mut balances = vec![]; + let mut lockup_expiration: u64 = 0; + let mut total_requested_balance: Option = None; + + if pool_address.is_some() { + match get_delegation_stake_balances( + rest_client, + &account, + owner_address, + pool_address.unwrap(), + version, + ) + .await + { + Ok(Some(balance_result)) => { + if let Some(balance) = balance_result.balance { + total_requested_balance = Some( + total_requested_balance.unwrap_or_default() + + u64::from_str(&balance.value).unwrap_or_default(), + ); + } + lockup_expiration = balance_result.lockup_expiration; + if let Some(balance) = total_requested_balance { + balances.push(Amount { + value: balance.to_string(), + currency: native_coin(), + }) + } + }, + result => { + warn!( + "Failed to retrieve requested balance for delegator_address: {}, pool_address: {}: {:?}", + owner_address, pool_address.unwrap(), result + ) + }, + } + } + // Retrieve all account resources if let Ok(response) = rest_client .get_account_resources_at_version_bcs(owner_address, version) @@ -102,8 +142,6 @@ async fn get_balances( let resources = response.into_inner(); let mut maybe_sequence_number = None; let mut maybe_operators = None; - let mut balances = vec![]; - let mut lockup_expiration: u64 = 0; // Iterate through resources, converting balances for (struct_tag, bytes) in resources { @@ -132,12 +170,11 @@ async fn get_balances( } }, (AccountAddress::ONE, STAKING_CONTRACT_MODULE, STORE_RESOURCE) => { - if account.is_base_account() { + if account.is_base_account() || pool_address.is_some() { continue; } let store: Store = bcs::from_bytes(&bytes)?; - let mut total_requested_balance: Option = None; maybe_operators = Some(vec![]); for (operator, contract) in store.staking_contracts { // Keep track of operators diff --git a/crates/aptos-rosetta/src/client.rs b/crates/aptos-rosetta/src/client.rs index 2d325756712c8..276347a004f2f 100644 --- a/crates/aptos-rosetta/src/client.rs +++ b/crates/aptos-rosetta/src/client.rs @@ -550,6 +550,45 @@ impl RosettaClient { .await } + pub async fn withdraw_undelegated_stake( + &self, + network_identifier: &NetworkIdentifier, + private_key: &Ed25519PrivateKey, + pool_address: AccountAddress, + amount: Option, + expiry_time_secs: u64, + sequence_number: Option, + max_gas: Option, + gas_unit_price: Option, + ) -> anyhow::Result { + let sender = self + .get_account_address(network_identifier.clone(), private_key) + .await?; + let mut keys = HashMap::new(); + keys.insert(sender, private_key); + + let operations = vec![Operation::withdraw_undelegated_stake( + 0, + None, + sender, + AccountIdentifier::base_account(pool_address), + amount, + )]; + + self.submit_operations( + sender, + network_identifier.clone(), + &keys, + operations, + expiry_time_secs, + sequence_number, + max_gas, + gas_unit_price, + false, + ) + .await + } + /// Retrieves the account address from the derivation path if there isn't an overriding account specified async fn get_account_address( &self, diff --git a/crates/aptos-rosetta/src/construction.rs b/crates/aptos-rosetta/src/construction.rs index 706936a9ee615..25fb607310d0b 100644 --- a/crates/aptos-rosetta/src/construction.rs +++ b/crates/aptos-rosetta/src/construction.rs @@ -566,7 +566,12 @@ async fn construction_parse( DELEGATION_POOL_MODULE, DELEGATION_POOL_ADD_STAKE_FUNCTION, ) => parse_delegation_pool_add_stake_operation(sender, &type_args, &args)?, - (AccountAddress::ONE, DELEGATION_POOL_MODULE, DELEGATION_POOL_UNLOCK_FUNCTTON) => { + ( + AccountAddress::ONE, + DELEGATION_POOL_MODULE, + DELEGATION_POOL_WITHDRAW_FUNCTION, + ) => parse_delegation_pool_withdraw_operation(sender, &type_args, &args)?, + (AccountAddress::ONE, DELEGATION_POOL_MODULE, DELEGATION_POOL_UNLOCK_FUNCTION) => { parse_delegation_pool_unlock_operation(sender, &type_args, &args)? }, _ => { @@ -941,6 +946,30 @@ pub fn parse_delegation_pool_unlock_operation( )]) } +pub fn parse_delegation_pool_withdraw_operation( + delegator: AccountAddress, + type_args: &[TypeTag], + args: &[Vec], +) -> ApiResult> { + if !type_args.is_empty() { + return Err(ApiError::TransactionParseError(Some(format!( + "add_delegated_stake should not have type arguments: {:?}", + type_args + )))); + } + + let pool_address: AccountAddress = parse_function_arg("withdraw_undelegated", args, 0)?; + let amount: u64 = parse_function_arg("withdraw_undelegated", args, 1)?; + + Ok(vec![Operation::withdraw_undelegated_stake( + 0, + None, + delegator, + AccountIdentifier::base_account(pool_address), + Some(amount), + )]) +} + /// Construction payloads command (OFFLINE) /// /// Constructs payloads for given known operations @@ -1112,6 +1141,25 @@ async fn construction_payloads( )))); } }, + InternalOperation::WithdrawUndelegated(inner) => { + if let InternalOperation::WithdrawUndelegated(ref metadata_op) = + metadata.internal_operation + { + if inner.delegator != metadata_op.delegator + || inner.pool_address != metadata_op.pool_address + { + return Err(ApiError::InvalidInput(Some(format!( + "Withdraw undelegated operation doesn't match metadata {:?} vs {:?}", + inner, metadata.internal_operation + )))); + } + } else { + return Err(ApiError::InvalidInput(Some(format!( + "Withdraw undelegated operation doesn't match metadata {:?} vs {:?}", + inner, metadata.internal_operation + )))); + } + }, } // Encode operation diff --git a/crates/aptos-rosetta/src/types/identifiers.rs b/crates/aptos-rosetta/src/types/identifiers.rs index d5ccc38819779..131eca159de9a 100644 --- a/crates/aptos-rosetta/src/types/identifiers.rs +++ b/crates/aptos-rosetta/src/types/identifiers.rs @@ -36,6 +36,16 @@ impl AccountIdentifier { str_to_account_address(self.address.as_str()) } + pub fn pool_address(&self) -> ApiResult> { + if let Some(sub_account) = &self.sub_account { + if let Some(metadata) = &sub_account.metadata { + return str_to_account_address(metadata.pool_address.as_str()).map(Some); + } + } + + Ok(None) + } + pub fn base_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -132,6 +142,30 @@ impl AccountIdentifier { } } + pub fn is_delegator_active_stake(&self) -> bool { + if let Some(ref inner) = self.sub_account { + inner.is_delegator_active_stake() + } else { + false + } + } + + pub fn is_delegator_inactive_stake(&self) -> bool { + if let Some(ref inner) = self.sub_account { + inner.is_delegator_inactive_stake() + } else { + false + } + } + + pub fn is_delegator_pending_inactive_stake(&self) -> bool { + if let Some(ref inner) = self.sub_account { + inner.is_delegator_pending_inactive_stake() + } else { + false + } + } + pub fn is_operator_stake(&self) -> bool { if let Some(ref inner) = self.sub_account { !(inner.is_total_stake() @@ -167,6 +201,9 @@ fn str_to_account_address(address: &str) -> Result { pub struct SubAccountIdentifier { /// Hex encoded AccountAddress beginning with 0x pub address: String, + /// Metadata only used for delegated staking + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, } const STAKE: &str = "stake"; @@ -180,36 +217,42 @@ impl SubAccountIdentifier { pub fn new_total_stake() -> SubAccountIdentifier { SubAccountIdentifier { address: STAKE.to_string(), + metadata: None, } } pub fn new_pending_active_stake() -> SubAccountIdentifier { SubAccountIdentifier { address: PENDING_ACTIVE_STAKE.to_string(), + metadata: None, } } pub fn new_active_stake() -> SubAccountIdentifier { SubAccountIdentifier { address: ACTIVE_STAKE.to_string(), + metadata: None, } } pub fn new_pending_inactive_stake() -> SubAccountIdentifier { SubAccountIdentifier { address: PENDING_INACTIVE_STAKE.to_string(), + metadata: None, } } pub fn new_inactive_stake() -> SubAccountIdentifier { SubAccountIdentifier { address: INACTIVE_STAKE.to_string(), + metadata: None, } } pub fn new_operator_stake(operator: AccountAddress) -> SubAccountIdentifier { SubAccountIdentifier { address: format!("{}-{}", STAKE, to_hex_lower(&operator)), + metadata: None, } } @@ -222,15 +265,27 @@ impl SubAccountIdentifier { } pub fn is_active_stake(&self) -> bool { - self.address.as_str() == ACTIVE_STAKE + self.address.as_str() == ACTIVE_STAKE && self.metadata.is_none() } pub fn is_pending_inactive_stake(&self) -> bool { - self.address.as_str() == PENDING_INACTIVE_STAKE + self.address.as_str() == PENDING_INACTIVE_STAKE && self.metadata.is_none() } pub fn is_inactive_stake(&self) -> bool { - self.address.as_str() == INACTIVE_STAKE + self.address.as_str() == INACTIVE_STAKE && self.metadata.is_none() + } + + pub fn is_delegator_active_stake(&self) -> bool { + self.address.as_str() == ACTIVE_STAKE && self.metadata.is_some() + } + + pub fn is_delegator_inactive_stake(&self) -> bool { + self.address.as_str() == INACTIVE_STAKE && self.metadata.is_some() + } + + pub fn is_delegator_pending_inactive_stake(&self) -> bool { + self.address.as_str() == PENDING_INACTIVE_STAKE && self.metadata.is_some() } pub fn operator_address(&self) -> ApiResult { @@ -251,6 +306,20 @@ impl SubAccountIdentifier { } } +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct SubAccountIdentifierMetadata { + /// Hex encoded Pool beginning with 0x + pub pool_address: String, +} + +impl SubAccountIdentifierMetadata { + pub fn new_pool_address(pool_address: AccountAddress) -> Self { + SubAccountIdentifierMetadata { + pool_address: to_hex_lower(&pool_address), + } + } +} + /// Identifier for a "block". In aptos, we use a transaction model, so the index /// represents multiple transactions in a "block" grouping of transactions /// diff --git a/crates/aptos-rosetta/src/types/misc.rs b/crates/aptos-rosetta/src/types/misc.rs index 800a876afc2a0..06e1ddb462c33 100644 --- a/crates/aptos-rosetta/src/types/misc.rs +++ b/crates/aptos-rosetta/src/types/misc.rs @@ -7,7 +7,9 @@ use crate::{ types::{AccountIdentifier, Amount}, AccountAddress, ApiResult, }; +use aptos_rest_client::aptos_api_types::{EntryFunctionId, ViewRequest}; use aptos_types::stake_pool::StakePool; +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use std::{ convert::TryFrom, @@ -15,6 +17,11 @@ use std::{ str::FromStr, }; +static DELEGATION_POOL_GET_STAKE_FUNCTION: Lazy = + Lazy::new(|| "0x1::delegation_pool::get_stake".parse().unwrap()); +static STAKE_GET_LOCKUP_SECS_FUNCTION: Lazy = + Lazy::new(|| "0x1::stake::get_lockup_secs".parse().unwrap()); + /// Errors that can be returned by the API /// /// [API Spec](https://www.rosetta-api.org/docs/models/Error.html) @@ -102,6 +109,7 @@ pub enum OperationType { InitializeStakePool, ResetLockup, UnlockStake, + WithdrawUndelegatedFunds, DistributeStakingRewards, AddDelegatedStake, UnlockDelegatedStake, @@ -123,6 +131,7 @@ impl OperationType { const UNLOCK_DELEGATED_STAKE: &'static str = "unlock_delegated_stake"; const UNLOCK_STAKE: &'static str = "unlock_stake"; const WITHDRAW: &'static str = "withdraw"; + const WITHDRAW_UNDELEGATED_FUNDS: &'static str = "withdraw_undelegated_funds"; pub fn all() -> Vec { use OperationType::*; @@ -137,6 +146,7 @@ impl OperationType { InitializeStakePool, ResetLockup, UnlockStake, + WithdrawUndelegatedFunds, DistributeStakingRewards, AddDelegatedStake, UnlockDelegatedStake, @@ -162,6 +172,7 @@ impl FromStr for OperationType { Self::DISTRIBUTE_STAKING_REWARDS => Ok(OperationType::DistributeStakingRewards), Self::ADD_DELEGATED_STAKE => Ok(OperationType::AddDelegatedStake), Self::UNLOCK_DELEGATED_STAKE => Ok(OperationType::UnlockDelegatedStake), + Self::WITHDRAW_UNDELEGATED_FUNDS => Ok(OperationType::WithdrawUndelegatedFunds), _ => Err(ApiError::DeserializationFailed(Some(format!( "Invalid OperationType: {}", s @@ -186,6 +197,7 @@ impl Display for OperationType { DistributeStakingRewards => Self::DISTRIBUTE_STAKING_REWARDS, AddDelegatedStake => Self::ADD_DELEGATED_STAKE, UnlockDelegatedStake => Self::UNLOCK_DELEGATED_STAKE, + WithdrawUndelegatedFunds => Self::WITHDRAW_UNDELEGATED_FUNDS, Fee => Self::FEE, }) } @@ -315,3 +327,74 @@ pub async fn get_stake_balances( Ok(None) } } + +pub async fn get_delegation_stake_balances( + rest_client: &aptos_rest_client::Client, + account_identifier: &AccountIdentifier, + owner_address: AccountAddress, + pool_address: AccountAddress, + version: u64, +) -> ApiResult> { + let mut requested_balance: Option = None; + + // get requested_balance + let balances_response = rest_client + .view( + &ViewRequest { + function: DELEGATION_POOL_GET_STAKE_FUNCTION.clone(), + type_arguments: vec![], + arguments: vec![ + serde_json::Value::String(pool_address.to_string()), + serde_json::Value::String(owner_address.to_string()), + ], + }, + Some(version), + ) + .await?; + + let balances_result = balances_response.into_inner(); + if account_identifier.is_delegator_active_stake() { + requested_balance = balances_result + .get(0) + .and_then(|v| v.as_str().map(|s| s.to_owned())); + } else if account_identifier.is_delegator_inactive_stake() { + requested_balance = balances_result + .get(1) + .and_then(|v| v.as_str().map(|s| s.to_owned())); + } else if account_identifier.is_delegator_pending_inactive_stake() { + requested_balance = balances_result + .get(2) + .and_then(|v| v.as_str().map(|s| s.to_owned())); + } + + // get lockup_secs + let lockup_secs_response = rest_client + .view( + &ViewRequest { + function: STAKE_GET_LOCKUP_SECS_FUNCTION.clone(), + type_arguments: vec![], + arguments: vec![serde_json::Value::String(pool_address.to_string())], + }, + Some(version), + ) + .await?; + let lockup_secs_result = lockup_secs_response.into_inner(); + let lockup_expiration = lockup_secs_result + .get(0) + .and_then(|v| v.as_str().and_then(|s| s.parse::().ok())) + .unwrap_or(0); + + if let Some(balance) = requested_balance { + Ok(Some(BalanceResult { + balance: Some(Amount { + value: balance, + currency: native_coin(), + }), + lockup_expiration, + })) + } else { + Err(ApiError::InternalError(Some( + "Unable to construct BalanceResult instance".to_string(), + ))) + } +} diff --git a/crates/aptos-rosetta/src/types/move_types.rs b/crates/aptos-rosetta/src/types/move_types.rs index 58eb18a2e4fdf..3315b7bee335b 100644 --- a/crates/aptos-rosetta/src/types/move_types.rs +++ b/crates/aptos-rosetta/src/types/move_types.rs @@ -41,7 +41,7 @@ pub const DISTRIBUTE_STAKING_REWARDS_FUNCTION: &str = "distribute"; // Delegation Pool Contract pub const DELEGATION_POOL_ADD_STAKE_FUNCTION: &str = "add_stake"; -pub const DELEGATION_POOL_UNLOCK_FUNCTTON: &str = "unlock"; +pub const DELEGATION_POOL_UNLOCK_FUNCTION: &str = "unlock"; pub const DELEGATION_POOL_WITHDRAW_FUNCTION: &str = "withdraw"; pub const DECIMALS_FIELD: &str = "decimal"; diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index e8070b56994be..3b3e3c0447f3a 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -9,9 +9,9 @@ use crate::{ common::{is_native_coin, native_coin, native_coin_tag}, construction::{ parse_create_stake_pool_operation, parse_delegation_pool_add_stake_operation, - parse_delegation_pool_unlock_operation, parse_distribute_staking_rewards_operation, - parse_reset_lockup_operation, parse_set_operator_operation, parse_set_voter_operation, - parse_unlock_stake_operation, + parse_delegation_pool_unlock_operation, parse_delegation_pool_withdraw_operation, + parse_distribute_staking_rewards_operation, parse_reset_lockup_operation, + parse_set_operator_operation, parse_set_voter_operation, parse_unlock_stake_operation, }, error::ApiResult, types::{ @@ -545,6 +545,26 @@ impl Operation { )), ) } + + pub fn withdraw_undelegated_stake( + operation_index: u64, + status: Option, + owner: AccountAddress, + pool_address: AccountIdentifier, + amount: Option, + ) -> Operation { + Operation::new( + OperationType::WithdrawUndelegatedFunds, + operation_index, + status, + AccountIdentifier::base_account(owner), + None, + Some(OperationMetadata::withdraw_undelegated_stake( + pool_address, + amount, + )), + ) + } } impl std::cmp::PartialOrd for Operation { @@ -686,6 +706,17 @@ impl OperationMetadata { ..Default::default() } } + + pub fn withdraw_undelegated_stake( + pool_address: AccountIdentifier, + amount: Option, + ) -> Self { + OperationMetadata { + pool_address: Some(pool_address), + amount: amount.map(U64::from), + ..Default::default() + } + } } /// Public key used for the rosetta implementation. All private keys will never be handled @@ -1020,7 +1051,18 @@ fn parse_failed_operations_from_txn_payload( warn!("Failed to parse delegation_pool::add_stake {:?}", inner); } }, - (AccountAddress::ONE, DELEGATION_POOL_MODULE, DELEGATION_POOL_UNLOCK_FUNCTTON) => { + (AccountAddress::ONE, DELEGATION_POOL_MODULE, DELEGATION_POOL_WITHDRAW_FUNCTION) => { + if let Ok(mut ops) = + parse_delegation_pool_withdraw_operation(sender, inner.ty_args(), inner.args()) + { + if let Some(operation) = ops.get_mut(0) { + operation.status = Some(OperationStatusType::Failure.to_string()); + } + } else { + warn!("Failed to parse delegation_pool::withdraw {:?}", inner); + } + }, + (AccountAddress::ONE, DELEGATION_POOL_MODULE, DELEGATION_POOL_UNLOCK_FUNCTION) => { if let Ok(mut ops) = parse_delegation_pool_unlock_operation(sender, inner.ty_args(), inner.args()) { @@ -1671,6 +1713,7 @@ pub enum InternalOperation { InitializeStakePool(InitializeStakePool), ResetLockup(ResetLockup), UnlockStake(UnlockStake), + WithdrawUndelegated(WithdrawUndelegated), DistributeStakingRewards(DistributeStakingRewards), AddDelegatedStake(AddDelegatedStake), UnlockDelegatedStake(UnlockDelegatedStake), @@ -1871,6 +1914,23 @@ impl InternalOperation { })); } }, + Ok(OperationType::WithdrawUndelegatedFunds) => { + if let ( + Some(OperationMetadata { + pool_address: Some(pool_address), + amount, + .. + }), + Some(account), + ) = (&operation.metadata, &operation.account) + { + return Ok(Self::WithdrawUndelegated(WithdrawUndelegated { + delegator: account.account_address()?, + amount_withdrawn: amount.map(u64::from).unwrap_or_default(), + pool_address: pool_address.account_address()?, + })); + } + }, _ => {}, } } @@ -1899,6 +1959,7 @@ impl InternalOperation { Self::InitializeStakePool(inner) => inner.owner, Self::ResetLockup(inner) => inner.owner, Self::UnlockStake(inner) => inner.owner, + Self::WithdrawUndelegated(inner) => inner.delegator, Self::DistributeStakingRewards(inner) => inner.sender, Self::AddDelegatedStake(inner) => inner.delegator, Self::UnlockDelegatedStake(inner) => inner.delegator, @@ -1990,6 +2051,13 @@ impl InternalOperation { ), unlock_delegated_stake.delegator, ), + InternalOperation::WithdrawUndelegated(withdraw_undelegated) => ( + aptos_stdlib::delegation_pool_withdraw( + withdraw_undelegated.pool_address, + withdraw_undelegated.amount_withdrawn, + ), + withdraw_undelegated.delegator, + ), }) } } @@ -2156,6 +2224,13 @@ pub struct UnlockStake { pub amount: u64, } +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct WithdrawUndelegated { + pub delegator: AccountAddress, + pub pool_address: AccountAddress, + pub amount_withdrawn: u64, +} + #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct DistributeStakingRewards { pub sender: AccountAddress, diff --git a/crates/aptos-rosetta/src/types/requests.rs b/crates/aptos-rosetta/src/types/requests.rs index 624312ec58a8d..32d74addcff11 100644 --- a/crates/aptos-rosetta/src/types/requests.rs +++ b/crates/aptos-rosetta/src/types/requests.rs @@ -375,9 +375,10 @@ pub struct PreprocessMetadata { } /// A gas price priority for what gas price to use -#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] pub enum GasPricePriority { Low, + #[default] Normal, High, } @@ -392,12 +393,6 @@ impl GasPricePriority { } } -impl Default for GasPricePriority { - fn default() -> Self { - GasPricePriority::Normal - } -} - impl Display for GasPricePriority { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str(self.as_str()) diff --git a/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs b/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs index 7cc4e92367517..50b1bc0966c1c 100644 --- a/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs +++ b/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs @@ -11,9 +11,12 @@ use crate::{ types::{auth::Claims, common::NodeType}, }; use reqwest::{header::CONTENT_ENCODING, StatusCode}; +use std::time::Duration; use tokio::time::Instant; use warp::{filters::BoxedFilter, hyper::body::Bytes, reject, reply, Filter, Rejection, Reply}; +const MAX_METRICS_POST_WAIT_DURATION_SECS: u64 = 5; + pub fn metrics_ingest(context: Context) -> BoxedFilter<(impl Reply,)> { warp::path!("ingest" / "metrics") .and(warp::post()) @@ -58,16 +61,18 @@ pub async fn handle_metrics_ingest( let start_timer = Instant::now(); let post_futures = client.iter().map(|(name, client)| async { - let result = client - .post_prometheus_metrics( + let result = tokio::time::timeout( + Duration::from_secs(MAX_METRICS_POST_WAIT_DURATION_SECS), + client.post_prometheus_metrics( metrics_body.clone(), extra_labels.clone(), encoding.clone().unwrap_or_default(), - ) - .await; + ), + ) + .await; match result { - Ok(res) => { + Ok(Ok(res)) => { METRICS_INGEST_BACKEND_REQUEST_DURATION .with_label_values(&[&claims.peer_id.to_string(), name, res.status().as_str()]) .observe(start_timer.elapsed().as_secs_f64()); @@ -82,7 +87,7 @@ pub async fn handle_metrics_ingest( return Err(()); } }, - Err(err) => { + Ok(Err(err)) => { METRICS_INGEST_BACKEND_REQUEST_DURATION .with_label_values(&[name, "Unknown"]) .observe(start_timer.elapsed().as_secs_f64()); @@ -93,6 +98,7 @@ pub async fn handle_metrics_ingest( ); return Err(()); }, + Err(_) => return Err(()), } Ok(()) }); @@ -267,7 +273,7 @@ mod test { handle_metrics_ingest(test_context.inner, claims, Some("gzip".into()), body).await; mock1.assert(); - mock2.assert_hits(4); + assert!(mock2.hits_async().await >= 1); assert!(result.is_ok()); } @@ -302,7 +308,7 @@ mod test { let result = handle_metrics_ingest(test_context.inner, claims, Some("gzip".into()), body).await; - mock1.assert_hits(4); + assert!(mock1.hits_async().await >= 1); mock2.assert(); assert!(result.is_err()); } diff --git a/crates/aptos-telemetry/src/sender.rs b/crates/aptos-telemetry/src/sender.rs index 2c28206a1db8a..a9cb87bc93f36 100644 --- a/crates/aptos-telemetry/src/sender.rs +++ b/crates/aptos-telemetry/src/sender.rs @@ -21,11 +21,14 @@ use prometheus::{default_registry, Registry}; use reqwest::{header::CONTENT_ENCODING, Response, StatusCode, Url}; use reqwest_middleware::{ClientBuilder, ClientWithMiddleware, RequestBuilder}; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; -use std::{io::Write, sync::Arc}; +use std::{io::Write, sync::Arc, time::Duration}; use uuid::Uuid; pub const DEFAULT_VERSION_PATH_BASE: &str = "api/v1/"; +pub const PROMETHEUS_PUSH_METRICS_TIMEOUT_SECS: u64 = 8; +pub const TELEMETRY_SERVICE_TOTAL_RETRY_DURATION_SECS: u64 = 10; + struct AuthContext { noise_config: Option, token: RwLock>, @@ -56,7 +59,9 @@ pub(crate) struct TelemetrySender { impl TelemetrySender { pub fn new(base_url: Url, chain_id: ChainId, node_config: &NodeConfig) -> Self { - let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3); + let retry_policy = ExponentialBackoff::builder().build_with_total_retry_duration( + Duration::from_secs(TELEMETRY_SERVICE_TOTAL_RETRY_DURATION_SECS), + ); let reqwest_client = reqwest::Client::new(); let client = ClientBuilder::new(reqwest_client) @@ -135,7 +140,8 @@ impl TelemetrySender { self.client .post(self.build_path("ingest/metrics")?) .header(CONTENT_ENCODING, "gzip") - .body(compressed_bytes), + .body(compressed_bytes) + .timeout(Duration::from_secs(PROMETHEUS_PUSH_METRICS_TIMEOUT_SECS)), ) .await; diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index c1f5b5db04ae5..ba6df3898eadc 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -1,17 +1,27 @@ # Aptos CLI Changelog All notable changes to the Aptos CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). -## [1.0.14] - 2023/05/26 + +## In Progress ### Added -- Add nested vector arg support -- Updated DB bootstrap command with new DB restore features +- Added account lookup by authentication key + - Example: `account lookup-address --auth-key {your_auth_key}` -## [1.0.14] - 2023/05/25 +## [2.0.1] - 2023/06/05 +### Fixed +- Updated txn expiration configuration for the faucet built into the CLI to make local testnet startup more reliable. +## [2.0.0] - 2023/06/01 ### Added -- Recursive nested vector parsing - Multisig v2 governance support -- JSON support for both input files and CLI argument input +- JSON input file support +- Builder Pattern support for RestClient + - NOTE: Methods **new_with_timeout** and **new_with_timeout_and_user_agent** are no longer available. +- Added custom header *x-aptos-client* for analytic purpose + +## [1.0.14] - 2023/05/26 +- Updated DB bootstrap command with new DB restore features +- Nested vector arg support - **Breaking change**: You can no longer pass in a vector like this: `--arg vector
:0x1,0x2`, you must do it like this: `--arg 'address:["0x1", "0x2"]'` ## [1.0.13] - 2023/04/27 diff --git a/crates/aptos/Cargo.toml b/crates/aptos/Cargo.toml index f601f45b0a484..03b7174280024 100644 --- a/crates/aptos/Cargo.toml +++ b/crates/aptos/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "aptos" description = "Aptos tool for management of nodes and interacting with the blockchain" -version = "1.0.14" +version = "2.0.1" # Workspace inherited keys authors = { workspace = true } @@ -14,6 +14,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } +aptos-api-types = { workspace = true } aptos-backup-cli = { workspace = true } aptos-bitvec = { workspace = true } aptos-build-info = { workspace = true } diff --git a/crates/aptos/e2e/README.md b/crates/aptos/e2e/README.md index 4d9c12343b101..740d6ff7facbe 100644 --- a/crates/aptos/e2e/README.md +++ b/crates/aptos/e2e/README.md @@ -10,6 +10,7 @@ curl -sSL https://install.python-poetry.org | python3 - Once you have Poetry, you can install the dependencies for the testing framework like this: ``` +poetry config virtualenvs.in-project true # This helps with IDE integration poetry install ``` @@ -18,9 +19,14 @@ To learn how to use the CLI testing framework, run this: poetry run python main.py -h ``` -For example: +For example, using the CLI from an image: ``` -poetry run python main.py --base-network mainnet --test-cli-tag mainnet +poetry run python main.py --base-network mainnet --test-cli-tag nightly +``` + +Using the CLI from a local path: +``` +poetry run python main.py -d --base-network mainnet --test-cli-path ~/aptos-core/target/debug/aptos ``` ## Debugging diff --git a/crates/aptos/e2e/cases/account.py b/crates/aptos/e2e/cases/account.py index 5359c83ee8edd..1a4c91ec05d7e 100644 --- a/crates/aptos/e2e/cases/account.py +++ b/crates/aptos/e2e/cases/account.py @@ -60,3 +60,23 @@ def test_account_create(run_helper: RunHelper, test_name=None): raise TestError( f"Account {OTHER_ACCOUNT_ONE.account_address} has balance {balance}, expected 0" ) + + +@test_case +def test_account_lookup_address(run_helper: RunHelper, test_name=None): + # Create the new account. + result_addr = run_helper.run_command( + test_name, + [ + "aptos", + "account", + "lookup-address", + "--auth-key", + run_helper.get_account_info().account_address, # initially the account address is the auth key + ], + ) + + if run_helper.get_account_info().account_address not in result_addr.stdout: + raise TestError( + f"lookup-address result does not match {run_helper.get_account_info().account_address}" + ) diff --git a/crates/aptos/e2e/cases/init.py b/crates/aptos/e2e/cases/init.py index 12b8df0865cbe..096e628ada6ad 100644 --- a/crates/aptos/e2e/cases/init.py +++ b/crates/aptos/e2e/cases/init.py @@ -3,6 +3,7 @@ import os +import requests from common import TestError from test_helpers import RunHelper from test_results import test_case @@ -41,3 +42,21 @@ def test_init(run_helper: RunHelper, test_name=None): raise TestError( f"Failed to query local testnet for account {account_info.account_address}" ) from e + + +@test_case +def test_metrics_accessible(run_helper: RunHelper, test_name=None): + # Assert that the metrics endpoint is accessible and returns valid json if + # requested. If the endpoint is not accessible or does not return valid + # JSON this will throw an exception which will be caught as a test failure. + metrics_url = run_helper.get_metrics_url(json=True) + requests.get(metrics_url).json() + + +@test_case +def test_aptos_header_included(run_helper: RunHelper, test_name=None): + # Make sure the aptos-cli header is included on the original request + response = requests.get(run_helper.get_metrics_url()) + + if 'request_source_client="aptos-cli' not in response.text: + raise TestError("Request should contain the correct aptos header: aptos-cli") diff --git a/crates/aptos/e2e/cases/move.py b/crates/aptos/e2e/cases/move.py new file mode 100644 index 0000000000000..a2b146cbb5ae1 --- /dev/null +++ b/crates/aptos/e2e/cases/move.py @@ -0,0 +1,61 @@ +# Copyright © Aptos Foundation +# SPDX-License-Identifier: Apache-2.0 + +import json + +from common import TestError +from test_helpers import RunHelper +from test_results import test_case + + +@test_case +def test_move_publish(run_helper: RunHelper, test_name=None): + # Prior to this function running the move/ directory was moved into the working + # directory in the host, which is then mounted into the container. The CLI is + # then run in this directory, meaning the move/ directory is in the same directory + # as the CLI is run from. This is why we can just refer to the package dir starting + # with move/ here. + package_dir = f"move/{run_helper.base_network}" + + # Publish the module. + run_helper.run_command( + test_name, + [ + "aptos", + "move", + "publish", + "--assume-yes", + "--package-dir", + package_dir, + "--named-addresses", + f"addr={run_helper.get_account_info().account_address}", + ], + ) + + # Get what modules exist on chain. + response = run_helper.run_command( + test_name, + [ + "aptos", + "account", + "list", + "--account", + run_helper.get_account_info().account_address, + "--query", + "modules", + ], + ) + + # Confirm that the module exists on chain. + response = json.loads(response.stdout) + for module in response["Result"]: + if ( + module["abi"]["address"] + == f"0x{run_helper.get_account_info().account_address}" + and module["abi"]["name"] == "cli_e2e_tests" + ): + return + + raise TestError( + "Module apparently published successfully but it could not be found on chain" + ) diff --git a/crates/aptos/e2e/common.py b/crates/aptos/e2e/common.py index 0240163cf91d7..88fd2c1dcde92 100644 --- a/crates/aptos/e2e/common.py +++ b/crates/aptos/e2e/common.py @@ -6,6 +6,7 @@ from enum import Enum NODE_PORT = 8080 +METRICS_PORT = 9101 FAUCET_PORT = 8081 diff --git a/crates/aptos/e2e/local_testnet.py b/crates/aptos/e2e/local_testnet.py index 0f8ced174be12..688d8210e2af0 100644 --- a/crates/aptos/e2e/local_testnet.py +++ b/crates/aptos/e2e/local_testnet.py @@ -8,7 +8,7 @@ import time import requests -from common import FAUCET_PORT, NODE_PORT, Network, build_image_name +from common import FAUCET_PORT, METRICS_PORT, NODE_PORT, Network, build_image_name LOG = logging.getLogger(__name__) @@ -51,6 +51,8 @@ def run_node(network: Network, image_repo_with_project: str): "-p", f"{NODE_PORT}:{NODE_PORT}", "-p", + f"{METRICS_PORT}:{METRICS_PORT}", + "-p", f"{FAUCET_PORT}:{FAUCET_PORT}", image_name, "aptos", diff --git a/crates/aptos/e2e/main.py b/crates/aptos/e2e/main.py index faad4fb68e195..d56a552a82af3 100644 --- a/crates/aptos/e2e/main.py +++ b/crates/aptos/e2e/main.py @@ -29,8 +29,13 @@ import shutil import sys -from cases.account import test_account_create, test_account_fund_with_faucet -from cases.init import test_init +from cases.account import ( + test_account_create, + test_account_fund_with_faucet, + test_account_lookup_address, +) +from cases.init import test_aptos_header_included, test_init, test_metrics_accessible +from cases.move import test_move_publish from common import Network from local_testnet import run_node, stop_node, wait_for_startup from test_helpers import RunHelper @@ -96,12 +101,22 @@ def parse_args(): def run_tests(run_helper): + # Make sure the metrics port is accessible. + test_metrics_accessible(run_helper) + # Run init tests. We run these first to set up the CLI. test_init(run_helper) # Run account tests. test_account_fund_with_faucet(run_helper) test_account_create(run_helper) + test_account_lookup_address(run_helper) + + # Make sure the aptos-cli header is included on the original request + test_aptos_header_included(run_helper) + + # Run move subcommand group tests. + test_move_publish(run_helper) def main(): @@ -113,30 +128,36 @@ def main(): else: logging.getLogger().setLevel(logging.INFO) - # Run a node + faucet and wait for them to start up. - container_name = run_node(args.base_network, args.image_repo_with_project) - wait_for_startup(container_name, args.base_startup_timeout) - # Create the dir the test CLI will run from. shutil.rmtree(args.working_directory, ignore_errors=True) pathlib.Path(args.working_directory).mkdir(parents=True, exist_ok=True) - # Build the RunHelper object. - run_helper = RunHelper( - host_working_directory=args.working_directory, - image_repo_with_project=args.image_repo_with_project, - image_tag=args.test_cli_tag, - cli_path=args.test_cli_path, - ) - - # Prepare the run helper. This ensures in advance that everything needed is there. - run_helper.prepare() - - # Run tests. - run_tests(run_helper) + # Run a node + faucet and wait for them to start up. + container_name = run_node(args.base_network, args.image_repo_with_project) - # Stop the node + faucet. - stop_node(container_name) + # We run these in a try finally so that if something goes wrong, such as the + # local testnet not starting up correctly or some unexpected error in the + # test framework, we still stop the node + faucet. + try: + wait_for_startup(container_name, args.base_startup_timeout) + + # Build the RunHelper object. + run_helper = RunHelper( + host_working_directory=args.working_directory, + image_repo_with_project=args.image_repo_with_project, + image_tag=args.test_cli_tag, + cli_path=args.test_cli_path, + base_network=args.base_network, + ) + + # Prepare the run helper. This ensures in advance that everything needed is there. + run_helper.prepare() + + # Run tests. + run_tests(run_helper) + finally: + # Stop the node + faucet. + stop_node(container_name) # Print out the results. if test_results.passed: diff --git a/crates/aptos/e2e/poetry.lock b/crates/aptos/e2e/poetry.lock index 2708f4a9a8f32..fb6928af541c0 100644 --- a/crates/aptos/e2e/poetry.lock +++ b/crates/aptos/e2e/poetry.lock @@ -1,3 +1,5 @@ +# This file is automatically @generated by Poetry and should not be changed by hand. + [[package]] name = "anyio" version = "3.6.2" @@ -5,6 +7,10 @@ description = "High level compatibility layer for multiple asynchronous event lo category = "main" optional = false python-versions = ">=3.6.2" +files = [ + {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"}, + {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"}, +] [package.dependencies] idna = ">=2.8" @@ -23,6 +29,9 @@ description = "" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "aptos_sdk-0.5.1.tar.gz", hash = "sha256:3711ad2bf1120fff463cd5f494162c4658f03dd6bfbf1f523ee9aea01a4cb0f0"}, +] [package.dependencies] httpx = ">=0.23.0,<0.24.0" @@ -36,6 +45,20 @@ description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] [package.dependencies] click = ">=8.0.0" @@ -59,6 +82,10 @@ description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] [[package]] name = "cffi" @@ -67,6 +94,72 @@ description = "Foreign Function Interface for Python calling C code." category = "main" optional = false python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] [package.dependencies] pycparser = "*" @@ -78,6 +171,83 @@ description = "The Real First Universal Charset Detector. Open, modern and activ category = "main" optional = false python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, +] [[package]] name = "click" @@ -86,6 +256,10 @@ description = "Composable command line interface toolkit" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -98,6 +272,10 @@ description = "Cross-platform colored terminal text." category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] [[package]] name = "h11" @@ -106,6 +284,10 @@ description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] [package.dependencies] typing-extensions = {version = "*", markers = "python_version < \"3.8\""} @@ -117,6 +299,10 @@ description = "A minimal low-level HTTP client." category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "httpcore-0.16.3-py3-none-any.whl", hash = "sha256:da1fb708784a938aa084bde4feb8317056c55037247c787bd7e19eb2c2949dc0"}, + {file = "httpcore-0.16.3.tar.gz", hash = "sha256:c5d6f04e2fc530f39e0c077e6a30caa53f1451096120f1f38b954afd0b17c0cb"}, +] [package.dependencies] anyio = ">=3.0,<5.0" @@ -135,6 +321,10 @@ description = "The next generation HTTP client." category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "httpx-0.23.3-py3-none-any.whl", hash = "sha256:a211fcce9b1254ea24f0cd6af9869b3d29aba40154e947d2a07bb499b3e310d6"}, + {file = "httpx-0.23.3.tar.gz", hash = "sha256:9818458eb565bb54898ccb9b8b251a28785dd4a55afbc23d0eb410754fe7d0f9"}, +] [package.dependencies] certifi = "*" @@ -155,6 +345,10 @@ description = "Internationalized Domain Names in Applications (IDNA)" category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] [[package]] name = "importlib-metadata" @@ -163,6 +357,10 @@ description = "Read metadata from Python packages" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, + {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, +] [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} @@ -180,6 +378,10 @@ description = "A Python utility / library to sort Python imports." category = "dev" optional = false python-versions = ">=3.7.0" +files = [ + {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, + {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, +] [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] @@ -194,6 +396,32 @@ description = "Optional static typing for Python" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "mypy-0.982-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5085e6f442003fa915aeb0a46d4da58128da69325d8213b4b35cc7054090aed5"}, + {file = "mypy-0.982-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:41fd1cf9bc0e1c19b9af13a6580ccb66c381a5ee2cf63ee5ebab747a4badeba3"}, + {file = "mypy-0.982-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f793e3dd95e166b66d50e7b63e69e58e88643d80a3dcc3bcd81368e0478b089c"}, + {file = "mypy-0.982-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86ebe67adf4d021b28c3f547da6aa2cce660b57f0432617af2cca932d4d378a6"}, + {file = "mypy-0.982-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:175f292f649a3af7082fe36620369ffc4661a71005aa9f8297ea473df5772046"}, + {file = "mypy-0.982-cp310-cp310-win_amd64.whl", hash = "sha256:8ee8c2472e96beb1045e9081de8e92f295b89ac10c4109afdf3a23ad6e644f3e"}, + {file = "mypy-0.982-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58f27ebafe726a8e5ccb58d896451dd9a662a511a3188ff6a8a6a919142ecc20"}, + {file = "mypy-0.982-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6af646bd46f10d53834a8e8983e130e47d8ab2d4b7a97363e35b24e1d588947"}, + {file = "mypy-0.982-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7aeaa763c7ab86d5b66ff27f68493d672e44c8099af636d433a7f3fa5596d40"}, + {file = "mypy-0.982-cp37-cp37m-win_amd64.whl", hash = "sha256:724d36be56444f569c20a629d1d4ee0cb0ad666078d59bb84f8f887952511ca1"}, + {file = "mypy-0.982-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14d53cdd4cf93765aa747a7399f0961a365bcddf7855d9cef6306fa41de01c24"}, + {file = "mypy-0.982-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:26ae64555d480ad4b32a267d10cab7aec92ff44de35a7cd95b2b7cb8e64ebe3e"}, + {file = "mypy-0.982-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6389af3e204975d6658de4fb8ac16f58c14e1bacc6142fee86d1b5b26aa52bda"}, + {file = "mypy-0.982-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b35ce03a289480d6544aac85fa3674f493f323d80ea7226410ed065cd46f206"}, + {file = "mypy-0.982-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c6e564f035d25c99fd2b863e13049744d96bd1947e3d3d2f16f5828864506763"}, + {file = "mypy-0.982-cp38-cp38-win_amd64.whl", hash = "sha256:cebca7fd333f90b61b3ef7f217ff75ce2e287482206ef4a8b18f32b49927b1a2"}, + {file = "mypy-0.982-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a705a93670c8b74769496280d2fe6cd59961506c64f329bb179970ff1d24f9f8"}, + {file = "mypy-0.982-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:75838c649290d83a2b83a88288c1eb60fe7a05b36d46cbea9d22efc790002146"}, + {file = "mypy-0.982-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:91781eff1f3f2607519c8b0e8518aad8498af1419e8442d5d0afb108059881fc"}, + {file = "mypy-0.982-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaa97b9ddd1dd9901a22a879491dbb951b5dec75c3b90032e2baa7336777363b"}, + {file = "mypy-0.982-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a692a8e7d07abe5f4b2dd32d731812a0175626a90a223d4b58f10f458747dd8a"}, + {file = "mypy-0.982-cp39-cp39-win_amd64.whl", hash = "sha256:eb7a068e503be3543c4bd329c994103874fa543c1727ba5288393c21d912d795"}, + {file = "mypy-0.982-py3-none-any.whl", hash = "sha256:1021c241e8b6e1ca5a47e4d52601274ac078a89845cfde66c6d5f769819ffa1d"}, + {file = "mypy-0.982.tar.gz", hash = "sha256:85f7a343542dc8b1ed0a888cdd34dca56462654ef23aa673907305b260b3d746"}, +] [package.dependencies] mypy-extensions = ">=0.4.3" @@ -213,6 +441,10 @@ description = "Type system extensions for programs checked with the mypy type ch category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] [[package]] name = "pathspec" @@ -221,6 +453,10 @@ description = "Utility library for gitignore style pattern matching of file path category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.0-py3-none-any.whl", hash = "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229"}, + {file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"}, +] [[package]] name = "platformdirs" @@ -229,6 +465,10 @@ description = "A small Python package for determining appropriate platform-speci category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.1.0-py3-none-any.whl", hash = "sha256:13b08a53ed71021350c9e300d4ea8668438fb0046ab3937ac9a29913a1a1350a"}, + {file = "platformdirs-3.1.0.tar.gz", hash = "sha256:accc3665857288317f32c7bebb5a8e482ba717b474f3fc1d18ca7f9214be0cef"}, +] [package.dependencies] typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""} @@ -244,6 +484,10 @@ description = "C parser in Python" category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] [[package]] name = "pynacl" @@ -252,6 +496,18 @@ description = "Python binding to the Networking and Cryptography (NaCl) library" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] [package.dependencies] cffi = ">=1.4.1" @@ -262,21 +518,25 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "requests" -version = "2.28.2" +version = "2.31.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rfc3986" @@ -285,6 +545,10 @@ description = "Validating URI References per RFC 3986" category = "main" optional = false python-versions = "*" +files = [ + {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"}, + {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"}, +] [package.dependencies] idna = {version = "*", optional = true, markers = "extra == \"idna2008\""} @@ -299,6 +563,10 @@ description = "Sniff out which async library your code is running under" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] [[package]] name = "tomli" @@ -307,6 +575,10 @@ description = "A lil' TOML parser" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] [[package]] name = "typed-ast" @@ -315,317 +587,7 @@ description = "a fork of Python 2 and 3 ast modules with type comment support" category = "main" optional = false python-versions = ">=3.6" - -[[package]] -name = "typing-extensions" -version = "4.5.0" -description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "urllib3" -version = "1.26.15" -description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "zipp" -version = "3.15.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-o", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[metadata] -lock-version = "1.1" -python-versions = ">=3.7 <4" -content-hash = "e9e3c9c792c90300ff2f22bcfadc9ad737060eb3142c17a21e687073fa54e877" - -[metadata.files] -anyio = [ - {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"}, - {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"}, -] -aptos-sdk = [ - {file = "aptos_sdk-0.5.1.tar.gz", hash = "sha256:3711ad2bf1120fff463cd5f494162c4658f03dd6bfbf1f523ee9aea01a4cb0f0"}, -] -black = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, -] -certifi = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] -cffi = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] -charset-normalizer = [ - {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, - {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -colorama = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] -h11 = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] -httpcore = [ - {file = "httpcore-0.16.3-py3-none-any.whl", hash = "sha256:da1fb708784a938aa084bde4feb8317056c55037247c787bd7e19eb2c2949dc0"}, - {file = "httpcore-0.16.3.tar.gz", hash = "sha256:c5d6f04e2fc530f39e0c077e6a30caa53f1451096120f1f38b954afd0b17c0cb"}, -] -httpx = [ - {file = "httpx-0.23.3-py3-none-any.whl", hash = "sha256:a211fcce9b1254ea24f0cd6af9869b3d29aba40154e947d2a07bb499b3e310d6"}, - {file = "httpx-0.23.3.tar.gz", hash = "sha256:9818458eb565bb54898ccb9b8b251a28785dd4a55afbc23d0eb410754fe7d0f9"}, -] -idna = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] -importlib-metadata = [ - {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, - {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, -] -isort = [ - {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, - {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, -] -mypy = [ - {file = "mypy-0.982-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5085e6f442003fa915aeb0a46d4da58128da69325d8213b4b35cc7054090aed5"}, - {file = "mypy-0.982-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:41fd1cf9bc0e1c19b9af13a6580ccb66c381a5ee2cf63ee5ebab747a4badeba3"}, - {file = "mypy-0.982-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f793e3dd95e166b66d50e7b63e69e58e88643d80a3dcc3bcd81368e0478b089c"}, - {file = "mypy-0.982-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86ebe67adf4d021b28c3f547da6aa2cce660b57f0432617af2cca932d4d378a6"}, - {file = "mypy-0.982-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:175f292f649a3af7082fe36620369ffc4661a71005aa9f8297ea473df5772046"}, - {file = "mypy-0.982-cp310-cp310-win_amd64.whl", hash = "sha256:8ee8c2472e96beb1045e9081de8e92f295b89ac10c4109afdf3a23ad6e644f3e"}, - {file = "mypy-0.982-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58f27ebafe726a8e5ccb58d896451dd9a662a511a3188ff6a8a6a919142ecc20"}, - {file = "mypy-0.982-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6af646bd46f10d53834a8e8983e130e47d8ab2d4b7a97363e35b24e1d588947"}, - {file = "mypy-0.982-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7aeaa763c7ab86d5b66ff27f68493d672e44c8099af636d433a7f3fa5596d40"}, - {file = "mypy-0.982-cp37-cp37m-win_amd64.whl", hash = "sha256:724d36be56444f569c20a629d1d4ee0cb0ad666078d59bb84f8f887952511ca1"}, - {file = "mypy-0.982-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14d53cdd4cf93765aa747a7399f0961a365bcddf7855d9cef6306fa41de01c24"}, - {file = "mypy-0.982-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:26ae64555d480ad4b32a267d10cab7aec92ff44de35a7cd95b2b7cb8e64ebe3e"}, - {file = "mypy-0.982-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6389af3e204975d6658de4fb8ac16f58c14e1bacc6142fee86d1b5b26aa52bda"}, - {file = "mypy-0.982-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b35ce03a289480d6544aac85fa3674f493f323d80ea7226410ed065cd46f206"}, - {file = "mypy-0.982-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c6e564f035d25c99fd2b863e13049744d96bd1947e3d3d2f16f5828864506763"}, - {file = "mypy-0.982-cp38-cp38-win_amd64.whl", hash = "sha256:cebca7fd333f90b61b3ef7f217ff75ce2e287482206ef4a8b18f32b49927b1a2"}, - {file = "mypy-0.982-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a705a93670c8b74769496280d2fe6cd59961506c64f329bb179970ff1d24f9f8"}, - {file = "mypy-0.982-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:75838c649290d83a2b83a88288c1eb60fe7a05b36d46cbea9d22efc790002146"}, - {file = "mypy-0.982-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:91781eff1f3f2607519c8b0e8518aad8498af1419e8442d5d0afb108059881fc"}, - {file = "mypy-0.982-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaa97b9ddd1dd9901a22a879491dbb951b5dec75c3b90032e2baa7336777363b"}, - {file = "mypy-0.982-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a692a8e7d07abe5f4b2dd32d731812a0175626a90a223d4b58f10f458747dd8a"}, - {file = "mypy-0.982-cp39-cp39-win_amd64.whl", hash = "sha256:eb7a068e503be3543c4bd329c994103874fa543c1727ba5288393c21d912d795"}, - {file = "mypy-0.982-py3-none-any.whl", hash = "sha256:1021c241e8b6e1ca5a47e4d52601274ac078a89845cfde66c6d5f769819ffa1d"}, - {file = "mypy-0.982.tar.gz", hash = "sha256:85f7a343542dc8b1ed0a888cdd34dca56462654ef23aa673907305b260b3d746"}, -] -mypy-extensions = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] -pathspec = [ - {file = "pathspec-0.11.0-py3-none-any.whl", hash = "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229"}, - {file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"}, -] -platformdirs = [ - {file = "platformdirs-3.1.0-py3-none-any.whl", hash = "sha256:13b08a53ed71021350c9e300d4ea8668438fb0046ab3937ac9a29913a1a1350a"}, - {file = "platformdirs-3.1.0.tar.gz", hash = "sha256:accc3665857288317f32c7bebb5a8e482ba717b474f3fc1d18ca7f9214be0cef"}, -] -pycparser = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] -pynacl = [ - {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, - {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, -] -requests = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, -] -rfc3986 = [ - {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"}, - {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"}, -] -sniffio = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, -] -tomli = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] -typed-ast = [ +files = [ {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, @@ -651,15 +613,53 @@ typed-ast = [ {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, ] -typing-extensions = [ + +[[package]] +name = "typing-extensions" +version = "4.5.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, ] -urllib3 = [ + +[[package]] +name = "urllib3" +version = "1.26.15" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, ] -zipp = [ + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "zipp" +version = "3.15.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, ] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.7 <3.10" +content-hash = "14e2791cee9e92d4f03b7c3589d91543f762b83983f49747126aecd3a3cdcc3f" diff --git a/crates/aptos/e2e/pyproject.toml b/crates/aptos/e2e/pyproject.toml index d543cea4f292e..2254fcfc09189 100644 --- a/crates/aptos/e2e/pyproject.toml +++ b/crates/aptos/e2e/pyproject.toml @@ -6,9 +6,9 @@ authors = ["Aptos Labs "] license = "Apache-2.0" [tool.poetry.dependencies] -python = ">=3.7 <4" +python = ">=3.7 <3.10" aptos-sdk = "^0.5.1" -requests = "^2.28.2" +requests = "^2.31.0" [tool.poetry.dev-dependencies] black = "^22.6.0" diff --git a/crates/aptos/e2e/test_helpers.py b/crates/aptos/e2e/test_helpers.py index e3af62d430367..8ab427d573be3 100644 --- a/crates/aptos/e2e/test_helpers.py +++ b/crates/aptos/e2e/test_helpers.py @@ -4,12 +4,13 @@ import logging import os import pathlib +import shutil import subprocess import traceback from dataclasses import dataclass from aptos_sdk.client import RestClient -from common import AccountInfo, build_image_name +from common import METRICS_PORT, NODE_PORT, AccountInfo, Network, build_image_name LOG = logging.getLogger(__name__) @@ -23,13 +24,20 @@ class RunHelper: image_repo_with_project: str image_tag: str cli_path: str + base_network: Network + test_count: int - # This can be used by the tests to query the local testnet. + # This can be used by the tests to query the local testnet node. api_client: RestClient def __init__( - self, host_working_directory, image_repo_with_project, image_tag, cli_path + self, + host_working_directory, + image_repo_with_project, + image_tag, + cli_path, + base_network, ): if image_tag and cli_path: raise RuntimeError("Cannot specify both image_tag and cli_path") @@ -38,9 +46,11 @@ def __init__( self.host_working_directory = host_working_directory self.image_repo_with_project = image_repo_with_project self.image_tag = image_tag + self.base_network = base_network self.cli_path = os.path.abspath(cli_path) if cli_path else cli_path + self.base_network = base_network self.test_count = 0 - self.api_client = RestClient(f"http://127.0.0.1:8080/v1") + self.api_client = RestClient(f"http://127.0.0.1:{NODE_PORT}/v1") def build_image_name(self): return build_image_name(self.image_repo_with_project, self.image_tag) @@ -53,25 +63,41 @@ def run_command(self, test_name, command, *args, **kwargs): file_name = f"{self.test_count:03}_{test_name}" self.test_count += 1 + # If we're in a CI environment it is necessary to set the --user, otherwise it + # is not possible to interact with the files in the bindmount. For more details + # see here: https://github.com/community/community/discussions/44243. + if os.environ.get("CI"): + user_args = ["--user", f"{os.getuid()}:{os.getgid()}"] + else: + user_args = [] + # Build command. if self.image_tag: - full_command = [ - "docker", - "run", - # For why we have to set --user, see here: - # https://github.com/community/community/discussions/44243 - "--user", - f"{os.getuid()}:{os.getgid()}", - "--rm", - "--network", - "host", - "-i", - "-v", - f"{self.host_working_directory}:{WORKING_DIR_IN_CONTAINER}", - "--workdir", - WORKING_DIR_IN_CONTAINER, - self.build_image_name(), - ] + command + full_command = ( + [ + "docker", + "run", + ] + + user_args + + [ + "-e", + # This is necessary to force the CLI to place the `.move` directory + # inside the bindmount dir, which is the only writeable directory + # inside the container when in CI. It's fine to do it outside of CI + # as well. + f"HOME={WORKING_DIR_IN_CONTAINER}", + "--rm", + "--network", + "host", + "-i", + "-v", + f"{self.host_working_directory}:{WORKING_DIR_IN_CONTAINER}", + "--workdir", + WORKING_DIR_IN_CONTAINER, + self.build_image_name(), + ] + + command + ) else: full_command = [self.cli_path] + command[1:] LOG.debug(f"Running command: {full_command}") @@ -122,10 +148,23 @@ def run_command(self, test_name, command, *args, **kwargs): raise + # Top level function to run any preparation. + def prepare(self): + self.prepare_move() + self.prepare_cli() + + # Move any Move files into the working directory. + def prepare_move(self): + shutil.copytree( + "../../../aptos-move/move-examples/cli-e2e-tests", + os.path.join(self.host_working_directory, "move"), + ignore=shutil.ignore_patterns("build"), + ) + # If image_Tag is set, pull the test CLI image. We don't technically have to do # this separately but it makes the steps clearer. Otherwise, cli_path must be # set, in which case we ensure the file is there. - def prepare(self): + def prepare_cli(self): if self.image_tag: image_name = self.build_image_name() LOG.info(f"Pre-pulling image for CLI we're testing: {image_name}") @@ -161,6 +200,10 @@ def get_account_info(self): account_address=account_address, ) + def get_metrics_url(self, json=False): + path = "metrics" if not json else "json_metrics" + return f"http://127.0.0.1:{METRICS_PORT}/{path}" + # This function helps with writing the stdout / stderr of a subprocess to files. def write_subprocess_out(out_path, file_name, command_output): diff --git a/crates/aptos/e2e/test_results.py b/crates/aptos/e2e/test_results.py index 661fe43be85f1..c206d5a60ad40 100644 --- a/crates/aptos/e2e/test_results.py +++ b/crates/aptos/e2e/test_results.py @@ -17,7 +17,8 @@ class TestResults: # This is a decorator that you put above every test case. It handles capturing test -# success / failure so it can be reported at the end of the test suite. +# success / failure so it can be reported at the end of the test suite. It also handles +# passing in test_name based on the name of the function so the caller doesn't have to. def build_test_case_decorator(test_results: TestResults): def test_case_inner(f): @wraps(f) diff --git a/crates/aptos/src/account/derive_resource_account.rs b/crates/aptos/src/account/derive_resource_account.rs index 2b14449dfbe7b..a2a065442e831 100644 --- a/crates/aptos/src/account/derive_resource_account.rs +++ b/crates/aptos/src/account/derive_resource_account.rs @@ -9,8 +9,9 @@ use clap::Parser; use std::{fmt::Formatter, str::FromStr}; /// Encoding for the Resource account seed -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Default, Clone, Copy)] pub enum SeedEncoding { + #[default] Bcs, Hex, Utf8, @@ -20,12 +21,6 @@ const BCS: &str = "bcs"; const UTF_8: &str = "utf8"; const HEX: &str = "hex"; -impl Default for SeedEncoding { - fn default() -> Self { - SeedEncoding::Bcs - } -} - impl std::fmt::Display for SeedEncoding { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str(match self { diff --git a/crates/aptos/src/account/key_rotation.rs b/crates/aptos/src/account/key_rotation.rs index c330c4ac9b6da..9937307ad3d78 100644 --- a/crates/aptos/src/account/key_rotation.rs +++ b/crates/aptos/src/account/key_rotation.rs @@ -3,7 +3,8 @@ use crate::common::{ types::{ - account_address_from_public_key, CliCommand, CliConfig, CliError, CliTypedResult, + account_address_from_auth_key, account_address_from_public_key, + AuthenticationKeyInputOptions, CliCommand, CliConfig, CliError, CliTypedResult, ConfigSearchMode, EncodingOptions, EncodingType, ExtractPublicKey, ParsePrivateKey, ProfileConfig, ProfileOptions, PublicKeyInputOptions, RestOptions, RotationProofChallenge, TransactionOptions, TransactionSummary, @@ -20,7 +21,10 @@ use aptos_rest_client::{ error::{AptosErrorResponse, RestError}, Client, }; -use aptos_types::{account_address::AccountAddress, account_config::CORE_CODE_ADDRESS}; +use aptos_types::{ + account_address::AccountAddress, account_config::CORE_CODE_ADDRESS, + transaction::authenticator::AuthenticationKey, +}; use async_trait::async_trait; use clap::Parser; use serde::{Deserialize, Serialize}; @@ -260,6 +264,9 @@ pub struct LookupAddress { #[clap(flatten)] pub(crate) rest_options: RestOptions, + + #[clap(flatten)] + pub(crate) authentication_key_options: AuthenticationKeyInputOptions, } impl LookupAddress { @@ -268,6 +275,11 @@ impl LookupAddress { .extract_public_key(self.encoding_options.encoding, &self.profile_options) } + pub(crate) fn auth_key(&self) -> CliTypedResult> { + self.authentication_key_options + .extract_auth_key(self.encoding_options.encoding) + } + /// Builds a rest client fn rest_client(&self) -> CliTypedResult { self.rest_options.client(&self.profile_options) @@ -284,7 +296,10 @@ impl CliCommand for LookupAddress { let rest_client = self.rest_client()?; // TODO: Support arbitrary auth key to support other types like multie25519 - let address = account_address_from_public_key(&self.public_key()?); + let address = match self.auth_key()? { + Some(key) => account_address_from_auth_key(&key), + None => account_address_from_public_key(&self.public_key()?), + }; Ok(lookup_address(&rest_client, address, true).await?) } } diff --git a/crates/aptos/src/common/types.rs b/crates/aptos/src/common/types.rs index 449459999fb8c..5b073eb7de8d7 100644 --- a/crates/aptos/src/common/types.rs +++ b/crates/aptos/src/common/types.rs @@ -29,7 +29,7 @@ use aptos_logger::Level; use aptos_rest_client::{ aptos_api_types::{EntryFunctionId, HashValue, MoveType, ViewRequest}, error::RestError, - Client, Transaction, + AptosBaseUrl, Client, Transaction, }; use aptos_sdk::{transaction_builder::TransactionFactory, types::LocalAccount}; use aptos_types::{ @@ -63,6 +63,9 @@ const ACCEPTED_CLOCK_SKEW_US: u64 = 5 * US_IN_SECS; pub const DEFAULT_EXPIRATION_SECS: u64 = 30; pub const DEFAULT_PROFILE: &str = "default"; +// Custom header value to identify the client +const X_APTOS_CLIENT_VALUE: &str = concat!("aptos-cli/", env!("CARGO_PKG_VERSION")); + /// A common result to be returned to users pub type CliResult = Result; @@ -449,11 +452,12 @@ impl ProfileOptions { } /// Types of encodings used by the blockchain -#[derive(ArgEnum, Clone, Copy, Debug)] +#[derive(ArgEnum, Clone, Copy, Debug, Default)] pub enum EncodingType { /// Binary Canonical Serialization BCS, /// Hex encoded e.g. 0xABCDE12345 + #[default] Hex, /// Base 64 encoded Base64, @@ -553,12 +557,6 @@ impl RngArgs { } } -impl Default for EncodingType { - fn default() -> Self { - EncodingType::Hex - } -} - impl Display for EncodingType { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let str = match self { @@ -618,6 +616,41 @@ pub struct EncodingOptions { pub encoding: EncodingType, } +#[derive(Debug, Parser)] +pub struct AuthenticationKeyInputOptions { + /// Authentication Key file input + #[clap(long, group = "authentication_key_input", parse(from_os_str))] + auth_key_file: Option, + + /// Authentication key input + #[clap(long, group = "authentication_key_input")] + auth_key: Option, +} + +impl AuthenticationKeyInputOptions { + pub fn extract_auth_key( + &self, + encoding: EncodingType, + ) -> CliTypedResult> { + if let Some(ref file) = self.auth_key_file { + Ok(Some(encoding.load_key("--auth-key-file", file.as_path())?)) + } else if let Some(ref key) = self.auth_key { + let key = key.as_bytes().to_vec(); + Ok(Some(encoding.decode_key("--auth-key", key)?)) + } else { + Ok(None) + } + } + + pub fn from_public_key(key: &Ed25519PublicKey) -> AuthenticationKeyInputOptions { + let auth_key = AuthenticationKey::ed25519(key); + AuthenticationKeyInputOptions { + auth_key: Some(auth_key.to_encoded_string().unwrap()), + auth_key_file: None, + } + } +} + #[derive(Debug, Parser)] pub struct PublicKeyInputOptions { /// Ed25519 Public key input file name @@ -832,6 +865,10 @@ pub trait ExtractPublicKey { pub fn account_address_from_public_key(public_key: &Ed25519PublicKey) -> AccountAddress { let auth_key = AuthenticationKey::ed25519(public_key); + account_address_from_auth_key(&auth_key) +} + +pub fn account_address_from_auth_key(auth_key: &AuthenticationKey) -> AccountAddress { AccountAddress::new(*auth_key.derived_address()) } @@ -905,11 +942,10 @@ impl RestOptions { } pub fn client(&self, profile: &ProfileOptions) -> CliTypedResult { - Ok(Client::new_with_timeout_and_user_agent( - self.url(profile)?, - Duration::from_secs(self.connection_timeout_secs), - USER_AGENT, - )) + Ok(Client::builder(AptosBaseUrl::Custom(self.url(profile)?)) + .timeout(Duration::from_secs(self.connection_timeout_secs)) + .header(aptos_api_types::X_APTOS_CLIENT, X_APTOS_CLIENT_VALUE)? + .build()) } } diff --git a/crates/aptos/src/common/utils.rs b/crates/aptos/src/common/utils.rs index 8bacbd1b4d849..0a9c766ff3074 100644 --- a/crates/aptos/src/common/utils.rs +++ b/crates/aptos/src/common/utils.rs @@ -35,6 +35,7 @@ use std::{ str::FromStr, time::{Duration, Instant, SystemTime}, }; +use tokio::time::timeout; /// Prompts for confirmation until a yes or no is given explicitly pub fn prompt_yes(prompt: &str) -> bool { @@ -81,7 +82,15 @@ pub async fn to_common_result( } else { None }; - send_telemetry_event(command, latency, !is_err, error).await; + + if let Err(err) = timeout( + Duration::from_millis(2000), + send_telemetry_event(command, latency, !is_err, error), + ) + .await + { + debug!("send_telemetry_event timeout from CLI: {}", err.to_string()) + } } let result: ResultWrapper = result.into(); diff --git a/crates/aptos/src/ffi.rs b/crates/aptos/src/ffi.rs index 9e06529025514..968ef24c4485b 100644 --- a/crates/aptos/src/ffi.rs +++ b/crates/aptos/src/ffi.rs @@ -30,8 +30,7 @@ pub unsafe extern "C" fn run_aptos_sync(s: *const c_char) -> *const c_char { // Create a new Tokio runtime and block on the execution of `cli.execute()` let result_string = Runtime::new().unwrap().block_on(async move { let cli = Tool::parse_from(input_string); - let result = cli.execute().await; - result + cli.execute().await }); let res_cstr = CString::new(result_string.unwrap()).unwrap(); diff --git a/crates/aptos/src/move_tool/mod.rs b/crates/aptos/src/move_tool/mod.rs index ca18b93ea491d..50a401ebd807e 100644 --- a/crates/aptos/src/move_tool/mod.rs +++ b/crates/aptos/src/move_tool/mod.rs @@ -313,7 +313,6 @@ impl CliCommand> for CompilePackage { } let ids = pack .modules() - .into_iter() .map(|m| m.self_id().to_string()) .collect::>(); Ok(ids) @@ -662,7 +661,8 @@ impl TryInto for &PublishPackage { self.move_options.named_addresses(), self.move_options.bytecode_version, ); - let package = BuiltPackage::build(package_path, options)?; + let package = BuiltPackage::build(package_path, options) + .map_err(|e| CliError::MoveCompilationError(format!("{:#}", e)))?; let compiled_units = package.extract_code(); let metadata_serialized = bcs::to_bytes(&package.extract_metadata()?).expect("PackageMetadata has BCS"); diff --git a/crates/aptos/src/test/mod.rs b/crates/aptos/src/test/mod.rs index d2d97cf7ad352..41751b6c1334a 100644 --- a/crates/aptos/src/test/mod.rs +++ b/crates/aptos/src/test/mod.rs @@ -12,12 +12,12 @@ use crate::{ common::{ init::{InitTool, Network}, types::{ - account_address_from_public_key, AccountAddressWrapper, ArgWithTypeVec, CliError, - CliTypedResult, EncodingOptions, EntryFunctionArguments, FaucetOptions, GasOptions, - KeyType, MoveManifestAccountWrapper, MovePackageDir, OptionalPoolAddressArgs, - PoolAddressArgs, PrivateKeyInputOptions, PromptOptions, PublicKeyInputOptions, - RestOptions, RngArgs, SaveFile, ScriptFunctionArguments, TransactionOptions, - TransactionSummary, TypeArgVec, + account_address_from_public_key, AccountAddressWrapper, ArgWithTypeVec, + AuthenticationKeyInputOptions, CliError, CliTypedResult, EncodingOptions, + EntryFunctionArguments, FaucetOptions, GasOptions, KeyType, MoveManifestAccountWrapper, + MovePackageDir, OptionalPoolAddressArgs, PoolAddressArgs, PrivateKeyInputOptions, + PromptOptions, PublicKeyInputOptions, RestOptions, RngArgs, SaveFile, + ScriptFunctionArguments, TransactionOptions, TransactionSummary, TypeArgVec, }, utils::write_to_file, }, @@ -242,6 +242,7 @@ impl CliTestFramework { rest_options: self.rest_options(), encoding_options: Default::default(), profile_options: Default::default(), + authentication_key_options: AuthenticationKeyInputOptions::from_public_key(public_key), } .execute() .await diff --git a/crates/indexer/migrations/2023-05-24-052435_token_properties_v2/down.sql b/crates/indexer/migrations/2023-05-24-052435_token_properties_v2/down.sql new file mode 100644 index 0000000000000..759d57bc385cb --- /dev/null +++ b/crates/indexer/migrations/2023-05-24-052435_token_properties_v2/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_collection_ownership_v2_view; +DROP TABLE IF EXISTS current_token_v2_metadata; +ALTER TABLE token_datas_v2 DROP COLUMN IF EXISTS decimals; +ALTER TABLE current_token_datas_v2 DROP COLUMN IF EXISTS decimals; +ALTER TABLE token_ownerships_v2 DROP COLUMN IF EXISTS non_transferrable_by_owner; +ALTER TABLE current_token_ownerships_v2 DROP COLUMN IF EXISTS non_transferrable_by_owner; \ No newline at end of file diff --git a/crates/indexer/migrations/2023-05-24-052435_token_properties_v2/up.sql b/crates/indexer/migrations/2023-05-24-052435_token_properties_v2/up.sql new file mode 100644 index 0000000000000..a8f630220496a --- /dev/null +++ b/crates/indexer/migrations/2023-05-24-052435_token_properties_v2/up.sql @@ -0,0 +1,32 @@ +-- Your SQL goes here +-- need this for getting NFTs grouped by collections +create or replace view current_collection_ownership_v2_view as +select owner_address, + b.collection_id, + MAX(a.last_transaction_version) as last_transaction_version, + COUNT(distinct a.token_data_id) as distinct_tokens +from current_token_ownerships_v2 a + join current_token_datas_v2 b on a.token_data_id = b.token_data_id +where a.amount > 0 +group by 1, + 2; +-- create table for all structs in token object core +CREATE TABLE IF NOT EXISTS current_token_v2_metadata ( + object_address VARCHAR(66) NOT NULL, + resource_type VARCHAR(128) NOT NULL, + data jsonb NOT NULL, + state_key_hash VARCHAR(66) NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- constraints + PRIMARY KEY (object_address, resource_type) +); +-- create table for all structs in token object core +ALTER TABLE token_datas_v2 +ADD COLUMN IF NOT EXISTS decimals BIGINT NOT NULL DEFAULT 0; +ALTER TABLE current_token_datas_v2 +ADD COLUMN IF NOT EXISTS decimals BIGINT NOT NULL DEFAULT 0; +ALTER TABLE token_ownerships_v2 +ADD COLUMN IF NOT EXISTS non_transferrable_by_owner BOOLEAN; +ALTER TABLE current_token_ownerships_v2 +ADD COLUMN IF NOT EXISTS non_transferrable_by_owner BOOLEAN; \ No newline at end of file diff --git a/crates/indexer/src/models/coin_models/mod.rs b/crates/indexer/src/models/coin_models/mod.rs index c748a32a04e99..f898fec32f381 100644 --- a/crates/indexer/src/models/coin_models/mod.rs +++ b/crates/indexer/src/models/coin_models/mod.rs @@ -6,3 +6,4 @@ pub mod coin_balances; pub mod coin_infos; pub mod coin_supply; pub mod coin_utils; +pub mod v2_fungible_asset_utils; diff --git a/crates/indexer/src/models/coin_models/v2_fungible_asset_utils.rs b/crates/indexer/src/models/coin_models/v2_fungible_asset_utils.rs new file mode 100644 index 0000000000000..5bb0455dc79a6 --- /dev/null +++ b/crates/indexer/src/models/coin_models/v2_fungible_asset_utils.rs @@ -0,0 +1,220 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + models::{ + move_resources::MoveResource, + token_models::{token_utils::URI_LENGTH, v2_token_utils::ResourceReference}, + }, + util::truncate_str, +}; +use anyhow::{Context, Result}; +use aptos_api_types::{deserialize_from_string, WriteResource}; +use bigdecimal::BigDecimal; +use serde::{Deserialize, Serialize}; + +const FUNGIBLE_ASSET_LENGTH: usize = 32; +const FUNGIBLE_ASSET_SYMBOL: usize = 10; + +/* Section on fungible assets resources */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FungibleAssetMetadata { + name: String, + symbol: String, + pub decimals: i32, + icon_uri: String, + project_uri: String, +} + +impl FungibleAssetMetadata { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::FungibleAssetMetadata(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_name(&self) -> String { + truncate_str(&self.name, FUNGIBLE_ASSET_LENGTH) + } + + pub fn get_symbol(&self) -> String { + truncate_str(&self.name, FUNGIBLE_ASSET_SYMBOL) + } + + pub fn get_icon_uri(&self) -> String { + truncate_str(&self.icon_uri, URI_LENGTH) + } + + pub fn get_project_uri(&self) -> String { + truncate_str(&self.project_uri, URI_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FungibleAssetStore { + pub metadata: ResourceReference, + #[serde(deserialize_with = "deserialize_from_string")] + pub balance: BigDecimal, + pub frozen: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FungibleAssetSupply { + #[serde(deserialize_with = "deserialize_from_string")] + pub current: BigDecimal, + pub maximum: OptionalBigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OptionalBigDecimal { + vec: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct BigDecimalWrapper(#[serde(deserialize_with = "deserialize_from_string")] pub BigDecimal); + +impl FungibleAssetSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::FungibleAssetSupply(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_maximum(&self) -> Option { + self.maximum.vec.first().map(|x| x.0.clone()) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2FungibleAssetResource { + FungibleAssetMetadata(FungibleAssetMetadata), + FungibleAssetStore(FungibleAssetStore), + FungibleAssetSupply(FungibleAssetSupply), +} + +impl V2FungibleAssetResource { + pub fn is_resource_supported(data_type: &str) -> bool { + matches!( + data_type, + "0x1::fungible_asset::Supply" + | "0x1::fungible_asset::Metadata" + | "0x1::fungible_asset::FungibleStore" + ) + } + + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result { + match data_type { + "0x1::fungible_asset::Supply" => serde_json::from_value(data.clone()) + .map(|inner| Some(Self::FungibleAssetSupply(inner))), + "0x1::fungible_asset::Metadata" => serde_json::from_value(data.clone()) + .map(|inner| Some(Self::FungibleAssetMetadata(inner))), + "0x1::fungible_asset::FungibleStore" => serde_json::from_value(data.clone()) + .map(|inner| Some(Self::FungibleAssetStore(inner))), + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_fungible_asset_supply_null() { + let test = r#"{"current": "0", "maximum": {"vec": []}}"#; + let test: serde_json::Value = serde_json::from_str(test).unwrap(); + let supply = serde_json::from_value(test) + .map(V2FungibleAssetResource::FungibleAssetSupply) + .unwrap(); + if let V2FungibleAssetResource::FungibleAssetSupply(supply) = supply { + assert_eq!(supply.current, BigDecimal::from(0)); + assert_eq!(supply.get_maximum(), None); + } else { + panic!("Wrong type") + } + } + + #[test] + fn test_fungible_asset_supply_nonnull() { + let test = r#"{"current": "100", "maximum": {"vec": ["5000"]}}"#; + let test: serde_json::Value = serde_json::from_str(test).unwrap(); + let supply = serde_json::from_value(test) + .map(V2FungibleAssetResource::FungibleAssetSupply) + .unwrap(); + if let V2FungibleAssetResource::FungibleAssetSupply(supply) = supply { + assert_eq!(supply.current, BigDecimal::from(100)); + assert_eq!(supply.get_maximum(), Some(BigDecimal::from(5000))); + } else { + panic!("Wrong type") + } + } +} diff --git a/crates/indexer/src/models/token_models/mod.rs b/crates/indexer/src/models/token_models/mod.rs index acde51b236948..c6c6d17f7b951 100644 --- a/crates/indexer/src/models/token_models/mod.rs +++ b/crates/indexer/src/models/token_models/mod.rs @@ -13,5 +13,6 @@ pub mod tokens; pub mod v2_collections; pub mod v2_token_activities; pub mod v2_token_datas; +pub mod v2_token_metadata; pub mod v2_token_ownerships; pub mod v2_token_utils; diff --git a/crates/indexer/src/models/token_models/v2_token_activities.rs b/crates/indexer/src/models/token_models/v2_token_activities.rs index a5cd7873170f6..1f94bb4bfe1f9 100644 --- a/crates/indexer/src/models/token_models/v2_token_activities.rs +++ b/crates/indexer/src/models/token_models/v2_token_activities.rs @@ -78,7 +78,7 @@ impl TokenActivityV2 { }; if let Some(metadata) = token_v2_metadata.get(&token_data_id) { - let object_core = &metadata.object; + let object_core = &metadata.object.object_core; let token_activity_helper = match token_event { V2TokenEvent::MintEvent(_) => TokenActivityHelperV2 { from_address: Some(object_core.get_owner_address()), diff --git a/crates/indexer/src/models/token_models/v2_token_datas.rs b/crates/indexer/src/models/token_models/v2_token_datas.rs index ddad577fefecc..0200ebcfa143c 100644 --- a/crates/indexer/src/models/token_models/v2_token_datas.rs +++ b/crates/indexer/src/models/token_models/v2_token_datas.rs @@ -39,6 +39,7 @@ pub struct TokenDataV2 { pub token_standard: String, pub is_fungible_v2: Option, pub transaction_timestamp: chrono::NaiveDateTime, + pub decimals: i64, } #[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] @@ -58,6 +59,7 @@ pub struct CurrentTokenDataV2 { pub is_fungible_v2: Option, pub last_transaction_version: i64, pub last_transaction_timestamp: chrono::NaiveDateTime, + pub decimals: i64, } impl TokenDataV2 { @@ -71,10 +73,21 @@ impl TokenDataV2 { if let Some(inner) = &TokenV2::from_write_resource(write_resource, txn_version)? { let token_data_id = standardize_address(&write_resource.address.to_string()); // Get maximum, supply, and is fungible from fungible asset if this is a fungible token - let (maximum, supply, is_fungible_v2) = (None, BigDecimal::zero(), Some(false)); + let (mut maximum, mut supply, mut decimals, mut is_fungible_v2) = + (None, BigDecimal::zero(), 0, Some(false)); // Get token properties from 0x4::property_map::PropertyMap let mut token_properties = serde_json::Value::Null; if let Some(metadata) = token_v2_metadata.get(&token_data_id) { + let fungible_asset_metadata = metadata.fungible_asset_metadata.as_ref(); + let fungible_asset_supply = metadata.fungible_asset_supply.as_ref(); + if let Some(metadata) = fungible_asset_metadata { + if let Some(fa_supply) = fungible_asset_supply { + maximum = fa_supply.get_maximum(); + supply = fa_supply.current.clone(); + decimals = metadata.decimals as i64; + is_fungible_v2 = Some(true); + } + } token_properties = metadata .property_map .as_ref() @@ -105,6 +118,7 @@ impl TokenDataV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2, transaction_timestamp: txn_timestamp, + decimals, }, CurrentTokenDataV2 { token_data_id, @@ -120,6 +134,7 @@ impl TokenDataV2 { is_fungible_v2, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + decimals, }, ))) } else { @@ -177,6 +192,7 @@ impl TokenDataV2 { token_standard: TokenStandard::V1.to_string(), is_fungible_v2: None, transaction_timestamp: txn_timestamp, + decimals: 0, }, CurrentTokenDataV2 { token_data_id, @@ -192,6 +208,7 @@ impl TokenDataV2 { is_fungible_v2: None, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + decimals: 0, }, ))); } else { diff --git a/crates/indexer/src/models/token_models/v2_token_metadata.rs b/crates/indexer/src/models/token_models/v2_token_metadata.rs new file mode 100644 index 0000000000000..f5e86065877dd --- /dev/null +++ b/crates/indexer/src/models/token_models/v2_token_metadata.rs @@ -0,0 +1,71 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{token_utils::NAME_LENGTH, v2_token_utils::TokenV2AggregatedDataMapping}; +use crate::{ + models::move_resources::MoveResource, + schema::current_token_v2_metadata, + util::{standardize_address, truncate_str}, +}; +use anyhow::Context; +use aptos_api_types::WriteResource; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +// PK of current_objects, i.e. object_address, resource_type +pub type CurrentTokenV2MetadataPK = (String, String); + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(object_address, resource_type))] +#[diesel(table_name = current_token_v2_metadata)] +pub struct CurrentTokenV2Metadata { + pub object_address: String, + pub resource_type: String, + pub data: Value, + pub state_key_hash: String, + pub last_transaction_version: i64, +} + +impl CurrentTokenV2Metadata { + /// Parsing unknown resources with 0x4::token::Token + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + token_v2_metadata: &TokenV2AggregatedDataMapping, + ) -> anyhow::Result> { + let object_address = standardize_address(&write_resource.address.to_string()); + if let Some(metadata) = token_v2_metadata.get(&object_address) { + // checking if token_v2 + if metadata.token.is_some() { + let resource_type_addr = write_resource.data.typ.address.to_string(); + if matches!(resource_type_addr.as_str(), "0x1" | "0x3" | "0x4") { + return Ok(None); + } + + let resource = MoveResource::from_write_resource(write_resource, 0, txn_version, 0); + + let state_key_hash = metadata.object.get_state_key_hash(); + if state_key_hash != resource.state_key_hash { + return Ok(None); + } + + let resource_type = truncate_str(&resource.type_, NAME_LENGTH); + return Ok(Some(CurrentTokenV2Metadata { + object_address, + resource_type, + data: resource + .data + .context("data must be present in write resource")?, + state_key_hash: resource.state_key_hash, + last_transaction_version: txn_version, + })); + } + } + Ok(None) + } +} diff --git a/crates/indexer/src/models/token_models/v2_token_ownerships.rs b/crates/indexer/src/models/token_models/v2_token_ownerships.rs index 0c3a50d22c101..8da58f398ec10 100644 --- a/crates/indexer/src/models/token_models/v2_token_ownerships.rs +++ b/crates/indexer/src/models/token_models/v2_token_ownerships.rs @@ -10,10 +10,15 @@ use super::{ token_utils::TokenWriteSet, tokens::TableHandleToOwner, v2_token_datas::TokenDataV2, - v2_token_utils::{ObjectCore, TokenStandard, TokenV2AggregatedDataMapping, TokenV2Burned}, + v2_token_utils::{ + ObjectWithMetadata, TokenStandard, TokenV2AggregatedDataMapping, TokenV2Burned, + }, }; use crate::{ database::PgPoolConnection, + models::{ + coin_models::v2_fungible_asset_utils::V2FungibleAssetResource, move_resources::MoveResource, + }, schema::{current_token_ownerships_v2, token_ownerships_v2}, util::{ensure_not_negative, standardize_address}, }; @@ -48,6 +53,7 @@ pub struct TokenOwnershipV2 { pub token_standard: String, pub is_fungible_v2: Option, pub transaction_timestamp: chrono::NaiveDateTime, + pub non_transferrable_by_owner: Option, } #[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] @@ -66,6 +72,7 @@ pub struct CurrentTokenOwnershipV2 { pub is_fungible_v2: Option, pub last_transaction_version: i64, pub last_transaction_timestamp: chrono::NaiveDateTime, + pub non_transferrable_by_owner: Option, } // Facilitate tracking when a token is burned @@ -94,6 +101,7 @@ pub struct CurrentTokenOwnershipV2Query { pub last_transaction_version: i64, pub last_transaction_timestamp: chrono::NaiveDateTime, pub inserted_at: chrono::NaiveDateTime, + pub non_transferrable_by_owner: Option, } impl TokenOwnershipV2 { @@ -101,16 +109,22 @@ impl TokenOwnershipV2 { pub fn get_nft_v2_from_token_data( token_data: &TokenDataV2, token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result<( - Self, - CurrentTokenOwnershipV2, - Option, // If token was transferred, the previous ownership record - Option, // If token was transferred, the previous ownership record - )> { + ) -> anyhow::Result< + Option<( + Self, + CurrentTokenOwnershipV2, + Option, // If token was transferred, the previous ownership record + Option, // If token was transferred, the previous ownership record + )>, + > { + // We should be indexing v1 token or v2 fungible token here + if token_data.is_fungible_v2 != Some(false) { + return Ok(None); + } let metadata = token_v2_metadata .get(&token_data.token_data_id) .context("If token data exists objectcore must exist")?; - let object_core = metadata.object.clone(); + let object_core = metadata.object.object_core.clone(); let token_data_id = token_data.token_data_id.clone(); let owner_address = object_core.get_owner_address(); let storage_id = token_data_id.clone(); @@ -130,6 +144,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2: token_data.is_fungible_v2, transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(is_soulbound), }; let current_ownership = CurrentTokenOwnershipV2 { token_data_id: token_data_id.clone(), @@ -144,11 +159,12 @@ impl TokenOwnershipV2 { is_fungible_v2: token_data.is_fungible_v2, last_transaction_version: token_data.transaction_version, last_transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(is_soulbound), }; // check if token was transferred if let Some((event_index, transfer_event)) = &metadata.transfer_event { - Ok(( + Ok(Some(( ownership, current_ownership, Some(Self { @@ -168,6 +184,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2: token_data.is_fungible_v2, transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(is_soulbound), }), Some(CurrentTokenOwnershipV2 { token_data_id, @@ -184,10 +201,11 @@ impl TokenOwnershipV2 { is_fungible_v2: token_data.is_fungible_v2, last_transaction_version: token_data.transaction_version, last_transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(is_soulbound), }), - )) + ))) } else { - Ok((ownership, current_ownership, None, None)) + Ok(Some((ownership, current_ownership, None, None))) } } @@ -202,9 +220,10 @@ impl TokenOwnershipV2 { if let Some(token_address) = tokens_burned.get(&standardize_address(&write_resource.address.to_string())) { - if let Some(object_core) = - &ObjectCore::from_write_resource(write_resource, txn_version)? + if let Some(object) = + &ObjectWithMetadata::from_write_resource(write_resource, txn_version)? { + let object_core = &object.object_core; let token_data_id = token_address.clone(); let owner_address = object_core.get_owner_address(); let storage_id = token_data_id.clone(); @@ -225,6 +244,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2: Some(false), transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: Some(is_soulbound), }, CurrentTokenOwnershipV2 { token_data_id, @@ -239,6 +259,7 @@ impl TokenOwnershipV2 { is_fungible_v2: Some(false), last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: Some(is_soulbound), }, ))); } @@ -287,6 +308,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2: Some(false), transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: is_soulbound, }, CurrentTokenOwnershipV2 { token_data_id, @@ -301,12 +323,90 @@ impl TokenOwnershipV2 { is_fungible_v2: Some(false), last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: is_soulbound, }, ))); } Ok(None) } + // Getting this from 0x1::fungible_asset::FungibleStore + pub fn get_ft_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + token_v2_metadata: &TokenV2AggregatedDataMapping, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::FungibleAssetStore(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + if let Some(metadata) = token_v2_metadata.get(&resource.address) { + let object_core = &metadata.object.object_core; + let token_data_id = inner.metadata.get_reference_address(); + let storage_id = token_data_id.clone(); + let is_soulbound = inner.frozen; + let amount = inner.balance; + let owner_address = object_core.get_owner_address(); + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(owner_address.clone()), + storage_id: storage_id.clone(), + amount: amount.clone(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(true), + transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: Some(is_soulbound), + }, + CurrentTokenOwnershipV2 { + token_data_id, + property_version_v1: BigDecimal::zero(), + owner_address, + storage_id, + amount, + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(true), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: Some(is_soulbound), + }, + ))); + } + } + Ok(None) + } + /// We want to track tokens in any offer/claims and tokenstore pub fn get_v1_from_write_table_item( table_item: &APIWriteTableItem, @@ -354,6 +454,7 @@ impl TokenOwnershipV2 { is_fungible_v2: None, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, }), Some(owner_address), Some(tm.table_type.clone()), @@ -385,6 +486,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V1.to_string(), is_fungible_v2: None, transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, }, curr_token_ownership, ))) @@ -438,6 +540,7 @@ impl TokenOwnershipV2 { is_fungible_v2: None, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, }), Some(owner_address), Some(tm.table_type.clone()), @@ -469,6 +572,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V1.to_string(), is_fungible_v2: None, transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, }, curr_token_ownership, ))) diff --git a/crates/indexer/src/models/token_models/v2_token_utils.rs b/crates/indexer/src/models/token_models/v2_token_utils.rs index 62c8f366dc6ff..f4e36600298bd 100644 --- a/crates/indexer/src/models/token_models/v2_token_utils.rs +++ b/crates/indexer/src/models/token_models/v2_token_utils.rs @@ -6,7 +6,11 @@ use super::token_utils::{NAME_LENGTH, URI_LENGTH}; use crate::{ - models::{move_resources::MoveResource, v2_objects::CurrentObjectPK}, + models::{ + coin_models::v2_fungible_asset_utils::{FungibleAssetMetadata, FungibleAssetSupply}, + move_resources::MoveResource, + v2_objects::CurrentObjectPK, + }, util::{ deserialize_token_object_property_map_from_bcs_hexstring, standardize_address, truncate_str, }, @@ -31,11 +35,13 @@ pub type EventIndex = i64; pub struct TokenV2AggregatedData { pub aptos_collection: Option, pub fixed_supply: Option, - pub object: ObjectCore, - pub unlimited_supply: Option, + pub fungible_asset_metadata: Option, + pub fungible_asset_supply: Option, + pub object: ObjectWithMetadata, pub property_map: Option, - pub transfer_event: Option<(EventIndex, TransferEvent)>, pub token: Option, + pub transfer_event: Option<(EventIndex, TransferEvent)>, + pub unlimited_supply: Option, } /// Tracks which token standard a token / collection is built upon @@ -64,6 +70,18 @@ pub struct ObjectCore { } impl ObjectCore { + pub fn get_owner_address(&self) -> String { + standardize_address(&self.owner) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ObjectWithMetadata { + pub object_core: ObjectCore, + state_key_hash: String, +} + +impl ObjectWithMetadata { pub fn from_write_resource( write_resource: &WriteResource, txn_version: i64, @@ -82,17 +100,21 @@ impl ObjectCore { &serde_json::to_value(&write_resource.data.data).unwrap(), txn_version, )? { - Ok(Some(inner)) + Ok(Some(Self { + object_core: inner, + state_key_hash: standardize_address(write_resource.state_key_hash.as_str()), + })) } else { Ok(None) } } - pub fn get_owner_address(&self) -> String { - standardize_address(&self.owner) + pub fn get_state_key_hash(&self) -> String { + standardize_address(&self.state_key_hash) } } +/* Section on Collection / Token */ #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Collection { creator: String, @@ -164,7 +186,7 @@ pub struct TokenV2 { impl TokenV2 { pub fn get_collection_address(&self) -> String { - standardize_address(&self.collection.inner) + self.collection.get_reference_address() } pub fn get_uri_trunc(&self) -> String { @@ -210,6 +232,13 @@ pub struct ResourceReference { inner: String, } +impl ResourceReference { + pub fn get_reference_address(&self) -> String { + standardize_address(&self.inner) + } +} + +/* Section on Supply */ #[derive(Serialize, Deserialize, Debug, Clone)] pub struct FixedSupply { #[serde(deserialize_with = "deserialize_from_string")] @@ -290,6 +319,7 @@ impl UnlimitedSupply { } } +/* Section on Events */ #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MintEvent { #[serde(deserialize_with = "deserialize_from_string")] @@ -366,6 +396,7 @@ impl TransferEvent { } } +/* Section on Property Maps */ #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PropertyMap { #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] @@ -481,7 +512,7 @@ impl V2TokenEvent { data_type: &str, data: &serde_json::Value, txn_version: i64, - ) -> Result> { + ) -> Result> { match data_type { "0x4::collection::MintEvent" => { serde_json::from_value(data.clone()).map(|inner| Some(Self::MintEvent(inner))) diff --git a/crates/indexer/src/models/v2_objects.rs b/crates/indexer/src/models/v2_objects.rs index 1cab00cfac1ae..517450bebfc13 100644 --- a/crates/indexer/src/models/v2_objects.rs +++ b/crates/indexer/src/models/v2_objects.rs @@ -5,7 +5,7 @@ #![allow(clippy::extra_unused_lifetimes)] #![allow(clippy::unused_unit)] -use super::token_models::v2_token_utils::ObjectCore; +use super::token_models::v2_token_utils::ObjectWithMetadata; use crate::{ models::move_resources::MoveResource, schema::{current_objects, objects}, @@ -83,30 +83,31 @@ impl Object { txn_version: i64, write_set_change_index: i64, ) -> anyhow::Result> { - if let Some(inner) = ObjectCore::from_write_resource(write_resource, txn_version)? { + if let Some(inner) = ObjectWithMetadata::from_write_resource(write_resource, txn_version)? { let resource = MoveResource::from_write_resource( write_resource, 0, // Placeholder, this isn't used anyway txn_version, 0, // Placeholder, this isn't used anyway ); + let object_core = &inner.object_core; Ok(Some(( Self { transaction_version: txn_version, write_set_change_index, object_address: resource.address.clone(), - owner_address: Some(inner.get_owner_address()), + owner_address: Some(object_core.get_owner_address()), state_key_hash: resource.state_key_hash.clone(), - guid_creation_num: Some(inner.guid_creation_num.clone()), - allow_ungated_transfer: Some(inner.allow_ungated_transfer), + guid_creation_num: Some(object_core.guid_creation_num.clone()), + allow_ungated_transfer: Some(object_core.allow_ungated_transfer), is_deleted: false, }, CurrentObject { object_address: resource.address, - owner_address: Some(inner.get_owner_address()), + owner_address: Some(object_core.get_owner_address()), state_key_hash: resource.state_key_hash, - allow_ungated_transfer: Some(inner.allow_ungated_transfer), - last_guid_creation_num: Some(inner.guid_creation_num), + allow_ungated_transfer: Some(object_core.allow_ungated_transfer), + last_guid_creation_num: Some(object_core.guid_creation_num.clone()), last_transaction_version: txn_version, is_deleted: false, }, diff --git a/crates/indexer/src/processors/token_processor.rs b/crates/indexer/src/processors/token_processor.rs index c4102ac6743cb..5dec69b6c6118 100644 --- a/crates/indexer/src/processors/token_processor.rs +++ b/crates/indexer/src/processors/token_processor.rs @@ -10,7 +10,10 @@ use crate::{ transaction_processor::TransactionProcessor, }, models::{ - coin_models::coin_activities::MAX_ENTRY_FUNCTION_LENGTH, + coin_models::{ + coin_activities::MAX_ENTRY_FUNCTION_LENGTH, + v2_fungible_asset_utils::{FungibleAssetMetadata, FungibleAssetSupply}, + }, token_models::{ ans_lookup::{CurrentAnsLookup, CurrentAnsLookupPK}, collection_datas::{CollectionData, CurrentCollectionData}, @@ -26,12 +29,13 @@ use crate::{ v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, v2_token_activities::TokenActivityV2, v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, + v2_token_metadata::{CurrentTokenV2Metadata, CurrentTokenV2MetadataPK}, v2_token_ownerships::{ CurrentTokenOwnershipV2, CurrentTokenOwnershipV2PK, NFTOwnershipV2, TokenOwnershipV2, }, v2_token_utils::{ - AptosCollection, BurnEvent, FixedSupply, ObjectCore, PropertyMap, TokenV2, + AptosCollection, BurnEvent, FixedSupply, ObjectWithMetadata, PropertyMap, TokenV2, TokenV2AggregatedData, TokenV2AggregatedDataMapping, TokenV2Burned, TransferEvent, UnlimitedSupply, }, @@ -105,6 +109,7 @@ fn insert_to_db_impl( current_token_datas_v2, current_token_ownerships_v2, token_activities_v2, + current_token_v2_metadata, ): ( &[CollectionV2], &[TokenDataV2], @@ -113,6 +118,7 @@ fn insert_to_db_impl( &[CurrentTokenDataV2], &[CurrentTokenOwnershipV2], &[TokenActivityV2], + &[CurrentTokenV2Metadata], ), ) -> Result<(), diesel::result::Error> { let (tokens, token_ownerships, token_datas, collection_datas) = basic_token_transaction_lists; @@ -136,6 +142,7 @@ fn insert_to_db_impl( insert_current_token_datas_v2(conn, current_token_datas_v2)?; insert_current_token_ownerships_v2(conn, current_token_ownerships_v2)?; insert_token_activities_v2(conn, token_activities_v2)?; + insert_current_token_v2_metadatas(conn, current_token_v2_metadata)?; Ok(()) } @@ -167,6 +174,7 @@ fn insert_to_db( current_token_datas_v2, current_token_ownerships_v2, token_activities_v2, + current_token_v2_metadata, ): ( Vec, Vec, @@ -175,6 +183,7 @@ fn insert_to_db( Vec, Vec, Vec, + Vec, ), ) -> Result<(), diesel::result::Error> { aptos_logger::trace!( @@ -210,6 +219,7 @@ fn insert_to_db( ¤t_token_datas_v2, ¤t_token_ownerships_v2, &token_activities_v2, + ¤t_token_v2_metadata, ), ) }) { @@ -237,6 +247,7 @@ fn insert_to_db( let current_token_ownerships_v2 = clean_data_for_db(current_token_ownerships_v2, true); let token_activities_v2 = clean_data_for_db(token_activities_v2, true); + let current_token_v2_metadata = clean_data_for_db(current_token_v2_metadata, true); insert_to_db_impl( pg_conn, @@ -258,6 +269,7 @@ fn insert_to_db( ¤t_token_datas_v2, ¤t_token_ownerships_v2, &token_activities_v2, + ¤t_token_v2_metadata, ), ) }), @@ -626,10 +638,8 @@ fn insert_token_datas_v2( .on_conflict((transaction_version, write_set_change_index)) .do_update() .set(( - maximum.eq(excluded(maximum)), - supply.eq(excluded(supply)), - token_properties.eq(excluded(token_properties)), inserted_at.eq(excluded(inserted_at)), + decimals.eq(excluded(decimals)), )), None, )?; @@ -651,7 +661,22 @@ fn insert_token_ownerships_v2( diesel::insert_into(schema::token_ownerships_v2::table) .values(&items_to_insert[start_ind..end_ind]) .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), + .do_update() + .set(( + token_data_id.eq(excluded(token_data_id)), + property_version_v1.eq(excluded(property_version_v1)), + owner_address.eq(excluded(owner_address)), + storage_id.eq(excluded(storage_id)), + amount.eq(excluded(amount)), + table_type_v1.eq(excluded(table_type_v1)), + token_properties_mutated_v1.eq(excluded(token_properties_mutated_v1)), + is_soulbound_v2.eq(excluded(is_soulbound_v2)), + token_standard.eq(excluded(token_standard)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + transaction_timestamp.eq(excluded(transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + non_transferrable_by_owner.eq(excluded(non_transferrable_by_owner)), + )), None, )?; } @@ -724,6 +749,7 @@ fn insert_current_token_datas_v2( last_transaction_version.eq(excluded(last_transaction_version)), last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), inserted_at.eq(excluded(inserted_at)), + decimals.eq(excluded(decimals)), )), Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), )?; @@ -753,11 +779,13 @@ fn insert_current_token_ownerships_v2( amount.eq(excluded(amount)), table_type_v1.eq(excluded(table_type_v1)), token_properties_mutated_v1.eq(excluded(token_properties_mutated_v1)), + is_soulbound_v2.eq(excluded(is_soulbound_v2)), token_standard.eq(excluded(token_standard)), is_fungible_v2.eq(excluded(is_fungible_v2)), last_transaction_version.eq(excluded(last_transaction_version)), last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), inserted_at.eq(excluded(inserted_at)), + non_transferrable_by_owner.eq(excluded(non_transferrable_by_owner)), )), Some(" WHERE current_token_ownerships_v2.last_transaction_version <= excluded.last_transaction_version "), )?; @@ -786,6 +814,33 @@ fn insert_token_activities_v2( Ok(()) } +fn insert_current_token_v2_metadatas( + conn: &mut PgConnection, + items_to_insert: &[CurrentTokenV2Metadata], +) -> Result<(), diesel::result::Error> { + use schema::current_token_v2_metadata::dsl::*; + + let chunks = get_chunks(items_to_insert.len(), CurrentTokenV2Metadata::field_count()); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::current_token_v2_metadata::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict((object_address, resource_type)) + .do_update() + .set(( + data.eq(excluded(data)), + state_key_hash.eq(excluded(state_key_hash)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_v2_metadata.last_transaction_version <= excluded.last_transaction_version "), + )?; + } + Ok(()) +} + #[async_trait] impl TransactionProcessor for TokenTransactionProcessor { fn name(&self) -> &'static str { @@ -925,6 +980,7 @@ impl TransactionProcessor for TokenTransactionProcessor { current_token_ownerships_v2, current_token_datas_v2, token_activities_v2, + current_token_v2_metadata, ) = parse_v2_token(&transactions, &table_handle_to_owner, &mut conn); let tx_result = insert_to_db( @@ -956,6 +1012,7 @@ impl TransactionProcessor for TokenTransactionProcessor { current_token_ownerships_v2, current_token_datas_v2, token_activities_v2, + current_token_v2_metadata, ), ); match tx_result { @@ -990,6 +1047,7 @@ fn parse_v2_token( Vec, Vec, Vec, + Vec, ) { // Token V2 and V1 combined let mut collections_v2 = vec![]; @@ -1009,7 +1067,10 @@ fn parse_v2_token( // Get Metadata for token v2 by object // We want to persist this through the entire batch so that even if a token is burned, // we can still get the object core metadata for it - let mut token_v2_metadata: TokenV2AggregatedDataMapping = HashMap::new(); + let mut token_v2_metadata_helper: TokenV2AggregatedDataMapping = HashMap::new(); + // Basically token properties + let mut current_token_v2_metadata: HashMap = + HashMap::new(); // Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure for txn in transactions { @@ -1029,19 +1090,21 @@ fn parse_v2_token( // Need to do a first pass to get all the objects for (_, wsc) in user_txn.info.changes.iter().enumerate() { if let WriteSetChange::WriteResource(wr) = wsc { - if let Some(object_core) = - ObjectCore::from_write_resource(wr, txn_version).unwrap() + if let Some(object) = + ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() { - token_v2_metadata.insert( + token_v2_metadata_helper.insert( standardize_address(&wr.address.to_string()), TokenV2AggregatedData { aptos_collection: None, fixed_supply: None, - object: object_core, + object, unlimited_supply: None, property_map: None, transfer_event: None, token: None, + fungible_asset_metadata: None, + fungible_asset_supply: None, }, ); } @@ -1052,7 +1115,7 @@ fn parse_v2_token( for (_, wsc) in user_txn.info.changes.iter().enumerate() { if let WriteSetChange::WriteResource(wr) = wsc { let address = standardize_address(&wr.address.to_string()); - if let Some(aggregated_data) = token_v2_metadata.get_mut(&address) { + if let Some(aggregated_data) = token_v2_metadata_helper.get_mut(&address) { if let Some(fixed_supply) = FixedSupply::from_write_resource(wr, txn_version).unwrap() { @@ -1077,6 +1140,16 @@ fn parse_v2_token( { aggregated_data.token = Some(token); } + if let Some(fungible_asset_metadata) = + FungibleAssetMetadata::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.fungible_asset_metadata = Some(fungible_asset_metadata); + } + if let Some(fungible_asset_supply) = + FungibleAssetSupply::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.fungible_asset_supply = Some(fungible_asset_supply); + } } } } @@ -1091,7 +1164,7 @@ fn parse_v2_token( if let Some(transfer_event) = TransferEvent::from_event(event, txn_version).unwrap() { if let Some(aggregated_data) = - token_v2_metadata.get_mut(&transfer_event.get_object_address()) + token_v2_metadata_helper.get_mut(&transfer_event.get_object_address()) { // we don't want index to be 0 otherwise we might have collision with write set change index let index = if index == 0 { @@ -1121,7 +1194,7 @@ fn parse_v2_token( txn_timestamp, index as i64, &entry_function_id_str, - &token_v2_metadata, + &token_v2_metadata_helper, ) .unwrap() { @@ -1237,7 +1310,7 @@ fn parse_v2_token( txn_version, wsc_index, txn_timestamp, - &token_v2_metadata, + &token_v2_metadata_helper, ) .unwrap() { @@ -1253,28 +1326,77 @@ fn parse_v2_token( txn_version, wsc_index, txn_timestamp, - &token_v2_metadata, + &token_v2_metadata_helper, ) .unwrap() { // Add NFT ownership - let ( - nft_ownership, - current_nft_ownership, - from_nft_ownership, - from_current_nft_ownership, - ) = TokenOwnershipV2::get_nft_v2_from_token_data( + if let Some(inner) = TokenOwnershipV2::get_nft_v2_from_token_data( &token_data, - &token_v2_metadata, + &token_v2_metadata_helper, ) - .unwrap(); + .unwrap() + { + let ( + nft_ownership, + current_nft_ownership, + from_nft_ownership, + from_current_nft_ownership, + ) = inner; + token_ownerships_v2.push(nft_ownership); + // this is used to persist latest owner for burn event handling + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership.owner_address.clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); + // Add the previous owner of the token transfer + if let Some(from_nft_ownership) = from_nft_ownership { + let from_current_nft_ownership = + from_current_nft_ownership.unwrap(); + token_ownerships_v2.push(from_nft_ownership); + current_token_ownerships_v2.insert( + ( + from_current_nft_ownership.token_data_id.clone(), + from_current_nft_ownership.property_version_v1.clone(), + from_current_nft_ownership.owner_address.clone(), + from_current_nft_ownership.storage_id.clone(), + ), + from_current_nft_ownership, + ); + } + } token_datas_v2.push(token_data); current_token_datas_v2.insert( current_token_data.token_data_id.clone(), current_token_data, ); + } + + // Add burned NFT handling + if let Some((nft_ownership, current_nft_ownership)) = + TokenOwnershipV2::get_burned_nft_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &tokens_burned, + ) + .unwrap() + { token_ownerships_v2.push(nft_ownership); - // this is used to persist latest owner for burn event handling prior_nft_ownership.insert( current_nft_ownership.token_data_id.clone(), NFTOwnershipV2 { @@ -1292,52 +1414,46 @@ fn parse_v2_token( ), current_nft_ownership, ); - // Add the previous owner of the token transfer - if let Some(from_nft_ownership) = from_nft_ownership { - let from_current_nft_ownership = - from_current_nft_ownership.unwrap(); - token_ownerships_v2.push(from_nft_ownership); - current_token_ownerships_v2.insert( - ( - from_current_nft_ownership.token_data_id.clone(), - from_current_nft_ownership.property_version_v1.clone(), - from_current_nft_ownership.owner_address.clone(), - from_current_nft_ownership.storage_id.clone(), - ), - from_current_nft_ownership, - ); - } + } - // Add burned NFT handling - if let Some((nft_ownership, current_nft_ownership)) = - TokenOwnershipV2::get_burned_nft_v2_from_write_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &tokens_burned, - ) - .unwrap() - { - token_ownerships_v2.push(nft_ownership); - prior_nft_ownership.insert( - current_nft_ownership.token_data_id.clone(), - NFTOwnershipV2 { - token_data_id: current_nft_ownership.token_data_id.clone(), - owner_address: current_nft_ownership.owner_address.clone(), - is_soulbound: current_nft_ownership.is_soulbound_v2, - }, - ); - current_token_ownerships_v2.insert( - ( - current_nft_ownership.token_data_id.clone(), - current_nft_ownership.property_version_v1.clone(), - current_nft_ownership.owner_address.clone(), - current_nft_ownership.storage_id.clone(), - ), - current_nft_ownership, - ); - } + // Add fungible token handling + if let Some((ft_ownership, current_ft_ownership)) = + TokenOwnershipV2::get_ft_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata_helper, + ) + .unwrap() + { + token_ownerships_v2.push(ft_ownership); + current_token_ownerships_v2.insert( + ( + current_ft_ownership.token_data_id.clone(), + current_ft_ownership.property_version_v1.clone(), + current_ft_ownership.owner_address.clone(), + current_ft_ownership.storage_id.clone(), + ), + current_ft_ownership, + ); + } + + // Track token properties + if let Some(token_metadata) = CurrentTokenV2Metadata::from_write_resource( + resource, + txn_version, + &token_v2_metadata_helper, + ) + .unwrap() + { + current_token_v2_metadata.insert( + ( + token_metadata.object_address.clone(), + token_metadata.resource_type.clone(), + ), + token_metadata, + ); } }, WriteSetChange::DeleteResource(resource) => { @@ -1390,6 +1506,9 @@ fn parse_v2_token( let mut current_token_ownerships_v2 = current_token_ownerships_v2 .into_values() .collect::>(); + let mut current_token_v2_metadata = current_token_v2_metadata + .into_values() + .collect::>(); // Sort by PK current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); @@ -1408,6 +1527,9 @@ fn parse_v2_token( &b.storage_id, )) }); + current_token_v2_metadata.sort_by(|a, b| { + (&a.object_address, &a.resource_type).cmp(&(&b.object_address, &b.resource_type)) + }); ( collections_v2, @@ -1417,5 +1539,6 @@ fn parse_v2_token( current_token_datas_v2, current_token_ownerships_v2, token_activities_v2, + current_token_v2_metadata, ) } diff --git a/crates/indexer/src/schema.rs b/crates/indexer/src/schema.rs index acdff3ee82739..72e28c765dfea 100644 --- a/crates/indexer/src/schema.rs +++ b/crates/indexer/src/schema.rs @@ -285,6 +285,7 @@ diesel::table! { last_transaction_version -> Int8, last_transaction_timestamp -> Timestamp, inserted_at -> Timestamp, + decimals -> Int8, } } @@ -321,6 +322,7 @@ diesel::table! { last_transaction_version -> Int8, last_transaction_timestamp -> Timestamp, inserted_at -> Timestamp, + non_transferrable_by_owner -> Nullable, } } @@ -344,6 +346,17 @@ diesel::table! { } } +diesel::table! { + current_token_v2_metadata (object_address, resource_type) { + object_address -> Varchar, + resource_type -> Varchar, + data -> Jsonb, + state_key_hash -> Varchar, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + } +} + diesel::table! { delegated_staking_activities (transaction_version, event_index) { transaction_version -> Int8, @@ -628,6 +641,7 @@ diesel::table! { is_fungible_v2 -> Nullable, transaction_timestamp -> Timestamp, inserted_at -> Timestamp, + decimals -> Int8, } } @@ -665,6 +679,7 @@ diesel::table! { is_fungible_v2 -> Nullable, transaction_timestamp -> Timestamp, inserted_at -> Timestamp, + non_transferrable_by_owner -> Nullable, } } @@ -757,6 +772,7 @@ diesel::allow_tables_to_appear_in_same_query!( current_token_ownerships, current_token_ownerships_v2, current_token_pending_claims, + current_token_v2_metadata, delegated_staking_activities, delegated_staking_pool_balances, delegated_staking_pools, diff --git a/crates/transaction-emitter-lib/src/args.rs b/crates/transaction-emitter-lib/src/args.rs index 8402a1524eff8..d7b22fcbd1f20 100644 --- a/crates/transaction-emitter-lib/src/args.rs +++ b/crates/transaction-emitter-lib/src/args.rs @@ -84,11 +84,11 @@ pub struct ClusterArgs { impl ClusterArgs { pub fn get_targets(&self) -> Result> { - return match (&self.targets, &self.targets_file) { + match (&self.targets, &self.targets_file) { (Some(targets), _) => Ok(targets.clone()), (None, Some(target_file)) => Self::get_targets_from_file(target_file), (_, _) => Err(anyhow::anyhow!("Expected either targets or target_file")), - }; + } } fn get_targets_from_file(path: &String) -> Result> { diff --git a/crates/transaction-emitter-lib/src/emitter/account_minter.rs b/crates/transaction-emitter-lib/src/emitter/account_minter.rs index 88de068f20f2c..7b56d04549544 100644 --- a/crates/transaction-emitter-lib/src/emitter/account_minter.rs +++ b/crates/transaction-emitter-lib/src/emitter/account_minter.rs @@ -196,8 +196,10 @@ impl<'t> AccountMinter<'t> { request_counters.show_simple(), ); info!( - "Creating additional {} accounts with {} coins each", - num_accounts, coins_per_account + "Creating additional {} accounts with {} coins each (txn {} gas price)", + num_accounts, + coins_per_account, + txn_factory.get_gas_unit_price(), ); let seed_rngs = gen_rng_for_reusable_account(actual_num_seed_accounts); @@ -281,7 +283,10 @@ impl<'t> AccountMinter<'t> { max_submit_batch_size: usize, counters: &CounterState, ) -> Result> { - info!("Creating and funding seeds accounts"); + info!( + "Creating and funding seeds accounts (txn {} gas price)", + self.txn_factory.get_gas_unit_price() + ); let mut i = 0; let mut seed_accounts = vec![]; while i < seed_account_num { diff --git a/crates/transaction-emitter-lib/src/emitter/mod.rs b/crates/transaction-emitter-lib/src/emitter/mod.rs index 489de880ab8c3..7ce901dafec27 100644 --- a/crates/transaction-emitter-lib/src/emitter/mod.rs +++ b/crates/transaction-emitter-lib/src/emitter/mod.rs @@ -16,7 +16,7 @@ use again::RetryPolicy; use anyhow::{ensure, format_err, Result}; use aptos_config::config::DEFAULT_MAX_SUBMIT_TRANSACTION_BATCH_SIZE; use aptos_logger::{debug, error, info, sample, sample::SampleRate, warn}; -use aptos_rest_client::Client as RestClient; +use aptos_rest_client::{aptos_api_types::AptosErrorCode, error::RestError, Client as RestClient}; use aptos_sdk::{ move_types::account_address::AccountAddress, transaction_builder::{aptos_stdlib, TransactionFactory}, @@ -68,7 +68,7 @@ pub struct EmitModeParams { pub worker_offset_mode: WorkerOffsetMode, pub wait_millis: u64, pub check_account_sequence_only_once_fraction: f32, - pub check_account_sequence_sleep_millis: u64, + pub check_account_sequence_sleep: Duration, } #[derive(Clone, Debug)] @@ -140,6 +140,8 @@ pub struct EmitJobRequest { prompt_before_spending: bool, coordination_delay_between_instances: Duration, + + latency_polling_interval: Duration, } impl Default for EmitJobRequest { @@ -163,6 +165,7 @@ impl Default for EmitJobRequest { expected_gas_per_txn: aptos_global_constants::MAX_GAS_AMOUNT, prompt_before_spending: false, coordination_delay_between_instances: Duration::from_secs(0), + latency_polling_interval: Duration::from_millis(300), } } } @@ -257,6 +260,11 @@ impl EmitJobRequest { self } + pub fn latency_polling_interval(mut self, latency_polling_interval: Duration) -> Self { + self.latency_polling_interval = latency_polling_interval; + self + } + pub fn calculate_mode_params(&self) -> EmitModeParams { let clients_count = self.rest_clients.len(); @@ -294,7 +302,7 @@ impl EmitJobRequest { workers_per_endpoint: num_workers_per_endpoint, endpoints: clients_count, check_account_sequence_only_once_fraction: 0.0, - check_account_sequence_sleep_millis: 300, + check_account_sequence_sleep: self.latency_polling_interval, } }, EmitJobMode::ConstTps { tps } @@ -382,7 +390,7 @@ impl EmitJobRequest { workers_per_endpoint: num_workers_per_endpoint, endpoints: clients_count, check_account_sequence_only_once_fraction: 1.0 - sample_latency_fraction, - check_account_sequence_sleep_millis: 300, + check_account_sequence_sleep: self.latency_polling_interval, } }, } @@ -485,9 +493,41 @@ impl EmitJob { self.stats.accumulate(&self.phase_starts) } - pub fn accumulate(&self) -> Vec { + pub fn peek_and_accumulate(&self) -> Vec { self.stats.accumulate(&self.phase_starts) } + + pub async fn stop_job(self) -> Vec { + self.stop_and_accumulate().await + } + + pub async fn periodic_stat(&self, duration: Duration, interval_secs: u64) { + let deadline = Instant::now() + duration; + let mut prev_stats: Option> = None; + let default_stats = TxnStats::default(); + let window = Duration::from_secs(max(interval_secs, 1)); + loop { + let left = deadline.saturating_duration_since(Instant::now()); + if left.is_zero() { + break; + } + tokio::time::sleep(window.min(left)).await; + let cur_phase = self.stats.get_cur_phase(); + let stats = self.peek_and_accumulate(); + let delta = &stats[cur_phase] + - prev_stats + .as_ref() + .map(|p| &p[cur_phase]) + .unwrap_or(&default_stats); + prev_stats = Some(stats); + info!("phase {}: {}", cur_phase, delta.rate()); + } + } + + pub async fn periodic_stat_forward(self, duration: Duration, interval_secs: u64) -> Self { + self.periodic_stat(duration, interval_secs).await; + self + } } #[derive(Debug)] @@ -607,17 +647,23 @@ impl TxnEmitter { ); let all_start_sleep_durations = mode_params.get_all_start_sleep_durations(self.from_rng()); - let mut all_accounts_iter = all_accounts.into_iter(); - let mut workers = vec![]; + + // Creating workers is slow with many workers (TODO check why) + // so we create them all first, before starting them - so they start at the right time for + // traffic pattern to be correct. + info!("Tx emitter creating workers"); + let mut submission_workers = + Vec::with_capacity(workers_per_endpoint * req.rest_clients.len()); for _ in 0..workers_per_endpoint { for client in &req.rest_clients { - let accounts = (&mut all_accounts_iter) - .take(mode_params.accounts_per_worker) - .collect::>(); + let accounts = + all_accounts.split_off(all_accounts.len() - mode_params.accounts_per_worker); + assert!(accounts.len() == mode_params.accounts_per_worker); + let stop = stop.clone(); let stats = Arc::clone(&stats); let txn_generator = txn_generator_creator.create_transaction_generator(); - let worker_index = workers.len(); + let worker_index = submission_workers.len(); let worker = SubmissionWorker::new( accounts, @@ -630,47 +676,28 @@ impl TxnEmitter { check_account_sequence_only_once_for.contains(&worker_index), self.from_rng(), ); - let join_handle = tokio_handle.spawn(worker.run().boxed()); - workers.push(Worker { join_handle }); + submission_workers.push(worker); } } + + info!("Tx emitter workers created"); + let phase_start = Instant::now(); + let workers = submission_workers + .into_iter() + .map(|worker| Worker { + join_handle: tokio_handle.spawn(worker.run(phase_start).boxed()), + }) + .collect(); info!("Tx emitter workers started"); Ok(EmitJob { workers, stop, stats, - phase_starts: vec![Instant::now()], + phase_starts: vec![phase_start], }) } - pub async fn stop_job(self, job: EmitJob) -> Vec { - job.stop_and_accumulate().await - } - - pub fn peek_job_stats(&self, job: &EmitJob) -> Vec { - job.accumulate() - } - - pub async fn periodic_stat(&mut self, job: &EmitJob, duration: Duration, interval_secs: u64) { - let deadline = Instant::now() + duration; - let mut prev_stats: Option> = None; - let default_stats = TxnStats::default(); - let window = Duration::from_secs(max(interval_secs, 1)); - while Instant::now() < deadline { - tokio::time::sleep(window).await; - let cur_phase = job.stats.get_cur_phase(); - let stats = self.peek_job_stats(job); - let delta = &stats[cur_phase] - - prev_stats - .as_ref() - .map(|p| &p[cur_phase]) - .unwrap_or(&default_stats); - prev_stats = Some(stats); - info!("phase {}: {}", cur_phase, delta.rate()); - } - } - async fn emit_txn_for_impl( mut self, source_account: &mut LocalAccount, @@ -696,14 +723,13 @@ impl TxnEmitter { job.start_next_phase(); } if let Some(interval_secs) = print_stats_interval { - self.periodic_stat(&job, per_phase_duration, interval_secs) - .await; + job.periodic_stat(per_phase_duration, interval_secs).await; } else { time::sleep(per_phase_duration).await; } } info!("Ran for {} secs, stopping job...", duration.as_secs()); - let stats = self.stop_job(job).await; + let stats = job.stop_job().await; info!("Stopped job"); Ok(stats.into_iter().next().unwrap()) } @@ -908,27 +934,13 @@ pub async fn query_sequence_numbers<'a, I>( where I: Iterator, { - let (addresses, futures): (Vec<_>, Vec<_>) = addresses - .map(|address| { - ( - *address, - RETRY_POLICY.retry(move || client.get_account_bcs(*address)), - ) - }) - .unzip(); + let futures = addresses + .map(|address| RETRY_POLICY.retry(move || get_account_if_exists(client, *address))); let (seq_nums, timestamps): (Vec<_>, Vec<_>) = try_join_all(futures) .await .map_err(|e| format_err!("Get accounts failed: {:?}", e))? .into_iter() - .zip(addresses.iter()) - .map(|(resp, address)| { - let (account, state) = resp.into_parts(); - ( - (*address, account.sequence_number()), - Duration::from_micros(state.timestamp_usecs).as_secs(), - ) - }) .unzip(); // return min for the timestamp, to make sure @@ -936,6 +948,33 @@ where Ok((seq_nums, timestamps.into_iter().min().unwrap())) } +async fn get_account_if_exists( + client: &RestClient, + address: AccountAddress, +) -> Result<((AccountAddress, u64), u64)> { + let result = client.get_account_bcs(address).await; + match &result { + Ok(resp) => Ok(( + (address, resp.inner().sequence_number()), + Duration::from_micros(resp.state().timestamp_usecs).as_secs(), + )), + Err(e) => { + // if account is not present, that is equivalent to sequence_number = 0 + if let RestError::Api(api_error) = e { + if let AptosErrorCode::AccountNotFound = api_error.error.error_code { + return Ok(( + (address, 0), + Duration::from_micros(api_error.state.as_ref().unwrap().timestamp_usecs) + .as_secs(), + )); + } + } + result?; + unreachable!() + }, + } +} + pub fn gen_transfer_txn_request( sender: &mut LocalAccount, receiver: &AccountAddress, diff --git a/crates/transaction-emitter-lib/src/emitter/stats.rs b/crates/transaction-emitter-lib/src/emitter/stats.rs index 471757400e394..e95d092453718 100644 --- a/crates/transaction-emitter-lib/src/emitter/stats.rs +++ b/crates/transaction-emitter-lib/src/emitter/stats.rs @@ -40,8 +40,12 @@ impl fmt::Display for TxnStatsRate { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "submitted: {} txn/s, committed: {} txn/s, expired: {} txn/s, failed submission: {} tnx/s, latency: {} ms, (p50: {} ms, p90: {} ms, p99: {} ms), latency samples: {}", - self.submitted, self.committed, self.expired, self.failed_submission, self.latency, self.p50_latency, self.p90_latency, self.p99_latency, self.latency_samples, + "committed: {} txn/s{}{}{}, latency: {} ms, (p50: {} ms, p90: {} ms, p99: {} ms), latency samples: {}", + self.committed, + if self.submitted != self.committed { format!(", submitted: {} txn/s", self.submitted) } else { "".to_string()}, + if self.failed_submission != 0 { format!(", failed submission: {} txn/s", self.failed_submission) } else { "".to_string()}, + if self.expired != 0 { format!(", expired: {} txn/s", self.expired) } else { "".to_string()}, + self.latency, self.p50_latency, self.p90_latency, self.p99_latency, self.latency_samples, ) } } diff --git a/crates/transaction-emitter-lib/src/emitter/submission_worker.rs b/crates/transaction-emitter-lib/src/emitter/submission_worker.rs index 2215c6834fd55..499c17e0f091f 100644 --- a/crates/transaction-emitter-lib/src/emitter/submission_worker.rs +++ b/crates/transaction-emitter-lib/src/emitter/submission_worker.rs @@ -8,7 +8,7 @@ use crate::{ }, EmitModeParams, }; -use aptos_logger::{sample, sample::SampleRate, warn}; +use aptos_logger::{info, sample, sample::SampleRate, warn}; use aptos_rest_client::Client as RestClient; use aptos_sdk::{ move_types::account_address::AccountAddress, @@ -69,21 +69,22 @@ impl SubmissionWorker { } #[allow(clippy::collapsible_if)] - pub(crate) async fn run(mut self) -> Vec { - let start_time = Instant::now() + self.start_sleep_duration; - - self.sleep_check_done(self.start_sleep_duration).await; + pub(crate) async fn run(mut self, start_instant: Instant) -> Vec { + let mut wait_until = start_instant + self.start_sleep_duration; + let now = Instant::now(); + if wait_until > now { + self.sleep_check_done(wait_until - now).await; + } let wait_duration = Duration::from_millis(self.params.wait_millis); - let mut wait_until = start_time; while !self.stop.load(Ordering::Relaxed) { let stats_clone = self.stats.clone(); let loop_stats = stats_clone.get_cur(); - let loop_start_time = Arc::new(Instant::now()); + let loop_start_time = Instant::now(); if wait_duration.as_secs() > 0 - && loop_start_time.duration_since(wait_until) > wait_duration + && loop_start_time.duration_since(wait_until) > Duration::from_secs(5) { sample!( SampleRate::Duration(Duration::from_secs(120)), @@ -98,74 +99,99 @@ impl SubmissionWorker { wait_until += wait_duration; let requests = self.gen_requests(); + if !requests.is_empty() { + let mut account_to_start_and_end_seq_num = HashMap::new(); + for req in requests.iter() { + let cur = req.sequence_number(); + let _ = *account_to_start_and_end_seq_num + .entry(req.sender()) + .and_modify(|(start, end)| { + if *start > cur { + *start = cur; + } + if *end < cur + 1 { + *end = cur + 1; + } + }) + .or_insert((cur, cur + 1)); + } + // Some transaction generators use burner accounts, and will have different + // number of accounts per transaction, so useful to very rarely log. + sample!( + SampleRate::Duration(Duration::from_secs(300)), + info!( + "[{:?}] txn_emitter worker: handling {} accounts, generated txns for: {}", + self.client.path_prefix_string(), + self.accounts.len(), + account_to_start_and_end_seq_num.len(), + ) + ); - let mut account_to_start_and_end_seq_num = HashMap::new(); - for req in requests.iter() { - let cur = req.sequence_number(); - let _ = *account_to_start_and_end_seq_num - .entry(req.sender()) - .and_modify(|(start, end)| { - if *start > cur { - *start = cur; - } - if *end < cur + 1 { - *end = cur + 1; - } - }) - .or_insert((cur, cur + 1)); - } + let txn_expiration_time = requests + .iter() + .map(|txn| txn.expiration_timestamp_secs()) + .max() + .unwrap_or(0); - let txn_expiration_time = requests - .iter() - .map(|txn| txn.expiration_timestamp_secs()) - .max() - .unwrap_or(0); + let txn_offset_time = Arc::new(AtomicU64::new(0)); - let txn_offset_time = Arc::new(AtomicU64::new(0)); + join_all( + requests + .chunks(self.params.max_submit_batch_size) + .map(|reqs| { + submit_transactions( + &self.client, + reqs, + loop_start_time, + txn_offset_time.clone(), + loop_stats, + ) + }), + ) + .await; - join_all( - requests - .chunks(self.params.max_submit_batch_size) - .map(|reqs| { - submit_transactions( - &self.client, - reqs, - loop_start_time.clone(), - txn_offset_time.clone(), - loop_stats, + let submitted_after = loop_start_time.elapsed(); + if submitted_after.as_secs() > 5 { + sample!( + SampleRate::Duration(Duration::from_secs(120)), + warn!( + "[{:?}] txn_emitter worker waited for more than 5s to submit transactions: {}s after loop start", + self.client.path_prefix_string(), + submitted_after.as_secs(), ) - }), - ) - .await; + ); + } - if self.skip_latency_stats { - // we also don't want to be stuck waiting for txn_expiration_time_secs - // after stop is called, so we sleep until time or stop is set. - self.sleep_check_done(Duration::from_secs( - self.params.txn_expiration_time_secs + 20, - )) - .await - } + if self.skip_latency_stats { + // we also don't want to be stuck waiting for txn_expiration_time_secs + // after stop is called, so we sleep until time or stop is set. + self.sleep_check_done(Duration::from_secs( + self.params.txn_expiration_time_secs + 20, + )) + .await + } - self.wait_and_update_stats( - *loop_start_time, - txn_offset_time.load(Ordering::Relaxed) / (requests.len() as u64), - account_to_start_and_end_seq_num, - // skip latency if asked to check seq_num only once - // even if we check more often due to stop (to not affect sampling) - self.skip_latency_stats, - txn_expiration_time, - // if we don't care about latency, we can recheck less often. - // generally, we should never need to recheck, as we wait enough time - // before calling here, but in case of shutdown/or client we are talking - // to being stale (having stale transaction_version), we might need to wait. - Duration::from_millis( - if self.skip_latency_stats { 10 } else { 1 } - * self.params.check_account_sequence_sleep_millis, - ), - loop_stats, - ) - .await; + self.wait_and_update_stats( + loop_start_time, + txn_offset_time.load(Ordering::Relaxed) / (requests.len() as u64), + account_to_start_and_end_seq_num, + // skip latency if asked to check seq_num only once + // even if we check more often due to stop (to not affect sampling) + self.skip_latency_stats, + txn_expiration_time, + // if we don't care about latency, we can recheck less often. + // generally, we should never need to recheck, as we wait enough time + // before calling here, but in case of shutdown/or client we are talking + // to being stale (having stale transaction_version), we might need to wait. + if self.skip_latency_stats { + (10 * self.params.check_account_sequence_sleep).max(Duration::from_secs(3)) + } else { + self.params.check_account_sequence_sleep + }, + loop_stats, + ) + .await; + } let now = Instant::now(); if wait_until > now { @@ -289,12 +315,12 @@ impl SubmissionWorker { pub async fn submit_transactions( client: &RestClient, txns: &[SignedTransaction], - loop_start_time: Arc, + loop_start_time: Instant, txn_offset_time: Arc, stats: &StatsAccumulator, ) { let cur_time = Instant::now(); - let offset = cur_time - *loop_start_time; + let offset = cur_time - loop_start_time; txn_offset_time.fetch_add( txns.len() as u64 * offset.as_millis() as u64, Ordering::Relaxed, @@ -356,11 +382,12 @@ pub async fn submit_transactions( .map_or(-1, |v| v.into_inner().get() as i64); warn!( - "[{:?}] Failed to submit {} txns in a batch, first failure due to {:?}, for account {}, first asked: {}, failed seq nums: {:?}, failed error codes: {:?}, balance of {} and last transaction for account: {:?}", + "[{:?}] Failed to submit {} txns in a batch, first failure due to {:?}, for account {}, chain id: {:?}, first asked: {}, failed seq nums: {:?}, failed error codes: {:?}, balance of {} and last transaction for account: {:?}", client.path_prefix_string(), failures.len(), failure, sender, + txns[0].chain_id(), txns[0].sequence_number(), failures.iter().map(|f| txns[f.transaction_index].sequence_number()).collect::>(), by_error, diff --git a/crates/transaction-emitter-lib/src/wrappers.rs b/crates/transaction-emitter-lib/src/wrappers.rs index b690f2e992b4a..14cf2c53fd6cf 100644 --- a/crates/transaction-emitter-lib/src/wrappers.rs +++ b/crates/transaction-emitter-lib/src/wrappers.rs @@ -22,8 +22,7 @@ pub async fn emit_transactions( let cluster = Cluster::try_from_cluster_args(cluster_args) .await .context("Failed to build cluster")?; - return emit_transactions_with_cluster(&cluster, emit_args, cluster_args.reuse_accounts) - .await; + emit_transactions_with_cluster(&cluster, emit_args, cluster_args.reuse_accounts).await } else { let initial_delay_after_minting = emit_args.coordination_delay_between_instances.unwrap(); let start_time = Instant::now(); @@ -173,6 +172,7 @@ pub async fn emit_transactions_with_cluster( if !cluster.coin_source_is_root { emit_job_request = emit_job_request.prompt_before_spending(); } + let stats = emitter .emit_txn_for_with_stats( &mut coin_source_account, diff --git a/crates/transaction-generator-lib/src/args.rs b/crates/transaction-generator-lib/src/args.rs index d50e4fb8ca7fa..5c5906e707e60 100644 --- a/crates/transaction-generator-lib/src/args.rs +++ b/crates/transaction-generator-lib/src/args.rs @@ -6,13 +6,15 @@ use clap::{ArgEnum, Parser}; use serde::{Deserialize, Serialize}; /// Utility class for specifying transaction type with predefined configurations through CLI -#[derive(Debug, Copy, Clone, ArgEnum, Deserialize, Parser, Serialize)] +#[derive(Debug, Copy, Clone, ArgEnum, Default, Deserialize, Parser, Serialize)] pub enum TransactionTypeArg { NoOp, NoOp2Signers, NoOp5Signers, + #[default] CoinTransfer, CoinTransferWithInvalid, + NonConflictingCoinTransfer, AccountGeneration, AccountGenerationLargePool, PublishPackage, @@ -30,12 +32,6 @@ pub enum TransactionTypeArg { TokenV2AmbassadorMint, } -impl Default for TransactionTypeArg { - fn default() -> Self { - TransactionTypeArg::CoinTransfer - } -} - impl TransactionTypeArg { pub fn materialize_default(&self) -> TransactionType { self.materialize(1, false) @@ -51,6 +47,12 @@ impl TransactionTypeArg { invalid_transaction_ratio: 0, sender_use_account_pool, }, + TransactionTypeArg::NonConflictingCoinTransfer => { + TransactionType::NonConflictingCoinTransfer { + invalid_transaction_ratio: 0, + sender_use_account_pool, + } + }, TransactionTypeArg::CoinTransferWithInvalid => TransactionType::CoinTransfer { invalid_transaction_ratio: 10, sender_use_account_pool, diff --git a/crates/transaction-generator-lib/src/entry_points.rs b/crates/transaction-generator-lib/src/entry_points.rs index 511c68e1ea61f..f1287f6d8f157 100644 --- a/crates/transaction-generator-lib/src/entry_points.rs +++ b/crates/transaction-generator-lib/src/entry_points.rs @@ -56,7 +56,6 @@ impl UserModuleTransactionGenerator for EntryPointTransactionGenerator { MultiSigConfig::Random(num) => { let new_accounts = Arc::new( (0..num) - .into_iter() .map(|_| LocalAccount::generate(rng)) .collect::>(), ); diff --git a/crates/transaction-generator-lib/src/lib.rs b/crates/transaction-generator-lib/src/lib.rs index 06dc51558a2ae..2cd27808c5347 100644 --- a/crates/transaction-generator-lib/src/lib.rs +++ b/crates/transaction-generator-lib/src/lib.rs @@ -42,7 +42,7 @@ use self::{ use crate::{ accounts_pool_wrapper::AccountsPoolWrapperCreator, batch_transfer::BatchTransferTransactionGeneratorCreator, - entry_points::EntryPointTransactionGenerator, + entry_points::EntryPointTransactionGenerator, p2p_transaction_generator::SamplingMode, }; pub use publishing::module_simple::EntryPoints; @@ -50,6 +50,10 @@ pub const SEND_AMOUNT: u64 = 1; #[derive(Debug, Copy, Clone)] pub enum TransactionType { + NonConflictingCoinTransfer { + invalid_transaction_ratio: usize, + sender_use_account_pool: bool, + }, CoinTransfer { invalid_transaction_ratio: usize, sender_use_account_pool: bool, @@ -211,6 +215,20 @@ pub async fn create_txn_generator_creator( for (transaction_type, weight) in transaction_mix { let txn_generator_creator: Box = match transaction_type { + TransactionType::NonConflictingCoinTransfer { + invalid_transaction_ratio, + sender_use_account_pool, + } => wrap_accounts_pool( + Box::new(P2PTransactionGeneratorCreator::new( + txn_factory.clone(), + SEND_AMOUNT, + addresses_pool.clone(), + *invalid_transaction_ratio, + SamplingMode::BurnAndRecycle(addresses_pool.read().len() / 2), + )), + *sender_use_account_pool, + accounts_pool.clone(), + ), TransactionType::CoinTransfer { invalid_transaction_ratio, sender_use_account_pool, @@ -220,6 +238,7 @@ pub async fn create_txn_generator_creator( SEND_AMOUNT, addresses_pool.clone(), *invalid_transaction_ratio, + SamplingMode::Basic, )), *sender_use_account_pool, accounts_pool.clone(), diff --git a/crates/transaction-generator-lib/src/p2p_transaction_generator.rs b/crates/transaction-generator-lib/src/p2p_transaction_generator.rs index 7a3f9a6c1bf0e..e45c453f58f45 100644 --- a/crates/transaction-generator-lib/src/p2p_transaction_generator.rs +++ b/crates/transaction-generator-lib/src/p2p_transaction_generator.rs @@ -13,29 +13,161 @@ use rand::{ rngs::StdRng, Rng, RngCore, SeedableRng, }; -use std::{cmp::max, sync::Arc}; +use std::{ + cmp::{max, min}, + sync::Arc, +}; + +pub enum SamplingMode { + /// See `BasicSampler`. + Basic, + /// See `BurnAndRecycleSampler`. + BurnAndRecycle(usize), +} + +/// Specifies how to get a given number of samples from an item pool. +pub trait Sampler: Send + Sync { + fn sample_from_pool( + &mut self, + rng: &mut StdRng, + pool: &mut Vec, + num_samples: usize, + ) -> Vec; +} + +/// A sampler that samples a random subset of the pool. Samples are replaced immediately. +pub struct BasicSampler {} + +impl BasicSampler { + fn new() -> Self { + Self {} + } +} + +impl Sampler for BasicSampler { + fn sample_from_pool( + &mut self, + rng: &mut StdRng, + pool: &mut Vec, + num_samples: usize, + ) -> Vec { + let mut samples = Vec::with_capacity(num_samples); + let num_available = pool.len(); + for _ in 0..num_samples { + let idx = rng.gen_range(0, num_available); + samples.push(pool[idx].clone()); + } + samples + } +} + +/// A samplers that samples from a pool but do not replace items until the pool is depleted. +/// The pool is divided into sub-pools. Replacement is done with with each sub-pool shuffled internally. +/// +/// Here is an example. Say the initial pool is `[I, J, K, X, Y, Z]`. +/// A `BurnAndRecycleSampler` is created with `replace_batch_size=3` to sample from the pool. +/// The first 6 samples are guaranteed to be `Z`, `Y`, `X`, `K`, `J`, `I`. +/// Then at the beginning of the 7-th sampling, +/// sub-pools `{I, J, K}`, `{X, Y, Z}` are shuffled and replaced. +/// A possible state of the pool is `[K, I, J, Y, X, Z]`. +/// +/// This behavior helps generate a block of non-conflicting coin transfer transactions, +/// when there are 2+ sub-pools of size larger than or equal to the block size. +pub struct BurnAndRecycleSampler { + /// We store all sub-pools together in 1 Vec: `item_pool[segment_size * x..segment_size * (x+1)]` being the x-th sub-pool. + to_be_replaced: Vec, + sub_pool_size: usize, +} + +impl BurnAndRecycleSampler { + fn new(replace_batch_size: usize) -> Self { + Self { + to_be_replaced: vec![], + sub_pool_size: replace_batch_size, + } + } + + fn sample_one_from_pool(&mut self, rng: &mut StdRng, pool: &mut Vec) -> T { + if pool.is_empty() { + let num_addresses = self.to_be_replaced.len(); + for replace_batch_start in (0..num_addresses).step_by(self.sub_pool_size) { + let end = min(replace_batch_start + self.sub_pool_size, num_addresses); + self.to_be_replaced[replace_batch_start..end].shuffle(rng); + } + for _ in 0..num_addresses { + pool.push(self.to_be_replaced.pop().unwrap()); + } + } + let sample = pool.pop().unwrap(); + self.to_be_replaced.push(sample.clone()); + sample + } +} + +impl Sampler for BurnAndRecycleSampler { + fn sample_from_pool( + &mut self, + rng: &mut StdRng, + pool: &mut Vec, + num_samples: usize, + ) -> Vec { + (0..num_samples) + .map(|_| self.sample_one_from_pool(rng, pool)) + .collect() + } +} + +#[test] +fn test_burn_and_recycle_sampler() { + use std::collections::HashSet; + let mut rng = StdRng::from_entropy(); + let mut sampler = BurnAndRecycleSampler::new(3); + let mut pool: Vec = (0..8).collect(); + let samples = (0..16) + .map(|_| sampler.sample_one_from_pool(&mut rng, &mut pool)) + .collect::>(); + // `samples[0..3]` and `samples[8..11]` are 2 permutations of sub-pool 0. + assert_eq!( + samples[0..3].iter().collect::>(), + samples[8..11].iter().collect::>() + ); + // `samples[3..6]` and `samples[11..14]` are 2 permutations of sub-pool 1. + assert_eq!( + samples[3..6].iter().collect::>(), + samples[11..14].iter().collect::>() + ); + // `samples[6..8]` and `samples[14..16]` are 2 permutations of sub-pool 1. + assert_eq!( + samples[6..8].iter().collect::>(), + samples[14..16].iter().collect::>() + ); +} pub struct P2PTransactionGenerator { rng: StdRng, send_amount: u64, txn_factory: TransactionFactory, all_addresses: Arc>>, + sampler: Box>, invalid_transaction_ratio: usize, } impl P2PTransactionGenerator { pub fn new( - rng: StdRng, + mut rng: StdRng, send_amount: u64, txn_factory: TransactionFactory, all_addresses: Arc>>, invalid_transaction_ratio: usize, + sampler: Box>, ) -> Self { + all_addresses.write().shuffle(&mut rng); Self { rng, send_amount, txn_factory, all_addresses, + sampler, invalid_transaction_ratio, } } @@ -53,7 +185,7 @@ impl P2PTransactionGenerator { } fn generate_invalid_transaction( - &self, + &mut self, rng: &mut StdRng, sender: &mut LocalAccount, receiver: &AccountAddress, @@ -131,12 +263,12 @@ impl TransactionGenerator for P2PTransactionGenerator { }; let mut num_valid_tx = num_to_create * (1 - invalid_size); - let receivers = self - .all_addresses - .read() - .choose_multiple(&mut self.rng, num_to_create) - .cloned() - .collect::>(); + let receivers: Vec = { + let mut all_addrs = self.all_addresses.write(); + self.sampler + .sample_from_pool(&mut self.rng, all_addrs.as_mut(), num_to_create) + }; + assert!( receivers.len() >= num_to_create, "failed: {} >= {}", @@ -167,6 +299,7 @@ pub struct P2PTransactionGeneratorCreator { amount: u64, all_addresses: Arc>>, invalid_transaction_ratio: usize, + sampling_mode: SamplingMode, } impl P2PTransactionGeneratorCreator { @@ -175,24 +308,34 @@ impl P2PTransactionGeneratorCreator { amount: u64, all_addresses: Arc>>, invalid_transaction_ratio: usize, + sampling_mode: SamplingMode, ) -> Self { Self { txn_factory, amount, all_addresses, invalid_transaction_ratio, + sampling_mode, } } } impl TransactionGeneratorCreator for P2PTransactionGeneratorCreator { fn create_transaction_generator(&mut self) -> Box { + let rng = StdRng::from_entropy(); + let sampler: Box> = match self.sampling_mode { + SamplingMode::Basic => Box::new(BasicSampler::new()), + SamplingMode::BurnAndRecycle(recycle_batch_size) => { + Box::new(BurnAndRecycleSampler::new(recycle_batch_size)) + }, + }; Box::new(P2PTransactionGenerator::new( - StdRng::from_entropy(), + rng, self.amount, self.txn_factory.clone(), self.all_addresses.clone(), self.invalid_transaction_ratio, + sampler, )) } } diff --git a/dashboards/blockchain-health.json b/dashboards/blockchain-health.json index c3e82309d5c80..fe5cc5a3afcab 100644 --- a/dashboards/blockchain-health.json +++ b/dashboards/blockchain-health.json @@ -58,7 +58,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 20, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -1182,7 +1181,7 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "sum by (role, bucket) (rate(aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", stage=\"commit_accepted\"}[$interval])) / sum by (role, bucket) (rate(aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", stage=\"commit_accepted\"}[$interval]))", + "expr": "sum by (role, scope, bucket, submitted_by) (rate(aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", stage=\"commit_accepted\"}[$interval])) / sum by (role, scope, bucket, submitted_by) (rate(aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", stage=\"commit_accepted\"}[$interval]))", "legendFormat": "__auto", "range": true, "refId": "A" @@ -1218,7 +1217,13 @@ }, "mappings": [], "min": 0, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "short" }, "overrides": [] @@ -1271,7 +1276,13 @@ }, "mappings": [], "min": 0, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "short" }, "overrides": [] @@ -1297,6 +1308,64 @@ "title": "Transactions rejected by gas price bucket", "type": "timeseries" }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Data on PFN nodes only. Unless clients send duplicate transactions, this is a reliable way to see E2E latency.\n\nThe time between Mempool receiving the transaction and time to be committed.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 51 }, + "id": 193, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sum by (bucket, submitted_by) (rate(aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", kubernetes_pod_name=~\"pfn.*\", stage=\"commit_accepted\"}[$interval])) / sum by (bucket, submitted_by) (rate(aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", kubernetes_pod_name=~\"pfn.*\", stage=\"commit_accepted\"}[$interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Aptos PFN-only Per Bucket Avg E2E Txn Commit Latency", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "description": "Rate of consensus expired transactions per ranking score bucket", @@ -1324,12 +1393,18 @@ }, "mappings": [], "min": 0, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 51 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 59 }, "id": 181, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1352,7 +1427,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 59 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 67 }, "id": 169, "panels": [], "title": "Mempool", @@ -1388,7 +1463,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 60 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 68 }, "id": 170, "interval": "2m", "options": { @@ -1410,7 +1485,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 68 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 76 }, "id": 137, "panels": [], "title": "Detailed Health", @@ -1446,7 +1521,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 69 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 77 }, "id": 142, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1495,7 +1570,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 69 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 77 }, "id": 143, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1544,7 +1619,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 69 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 77 }, "id": 151, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1593,7 +1668,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 77 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 85 }, "id": 135, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1642,7 +1717,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 77 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 85 }, "id": 138, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1691,7 +1766,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 77 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 85 }, "id": 139, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1749,7 +1824,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 85 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 93 }, "id": 144, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1834,7 +1909,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 85 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 93 }, "id": 145, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -1892,7 +1967,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 85 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 93 }, "id": 171, "options": { "footer": { "countRows": false, "enablePagination": false, "fields": "", "reducer": ["sum"], "show": false }, @@ -1943,9 +2018,8 @@ }, "overrides": [] }, - "gridPos": { "h": 7, "w": 8, "x": 0, "y": 93 }, + "gridPos": { "h": 7, "w": 8, "x": 0, "y": 101 }, "id": 189, - "isNew": false, "options": { "footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false }, "showHeader": true @@ -2028,7 +2102,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 93 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 101 }, "id": 190, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2086,7 +2160,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 93 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 101 }, "id": 191, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2107,7 +2181,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 101 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 109 }, "id": 174, "panels": [], "title": "Halted Round Details", @@ -2143,7 +2217,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 102 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 110 }, "id": 176, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2192,7 +2266,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 102 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 110 }, "id": 192, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2241,7 +2315,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 102 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 110 }, "id": 177, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2290,7 +2364,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 110 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 118 }, "id": 175, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2340,7 +2414,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 110 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 118 }, "id": 178, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2364,9 +2438,8 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 118 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 126 }, "id": 18, - "isNew": false, "panels": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -2483,9 +2556,8 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 119 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 127 }, "id": 59, - "isNew": false, "panels": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -2600,9 +2672,8 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 120 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 128 }, "id": 55, - "isNew": false, "panels": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -2772,9 +2843,8 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 121 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 129 }, "id": 52, - "isNew": false, "panels": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -2940,7 +3010,7 @@ "type": "row" } ], - "refresh": false, + "refresh": "", "revision": 1, "schemaVersion": 38, "style": "dark", @@ -3158,6 +3228,6 @@ "timezone": "browser", "title": "blockchain-health", "uid": "JnOvNs4Vk", - "version": 7, + "version": 12, "weekStart": "" } diff --git a/dashboards/blockchain-health.json.gz b/dashboards/blockchain-health.json.gz index 1f51b2c6c53be..ffda3c34025a1 100644 Binary files a/dashboards/blockchain-health.json.gz and b/dashboards/blockchain-health.json.gz differ diff --git a/dashboards/developer-platform-client-metrics.json b/dashboards/developer-platform-client-metrics.json index a87ec93fea775..c26e3cf32f0ce 100644 --- a/dashboards/developer-platform-client-metrics.json +++ b/dashboards/developer-platform-client-metrics.json @@ -19,9 +19,22 @@ "links": [], "liveNow": false, "panels": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 0 }, + "id": 9, + "options": { + "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, + "content": "In order to view metrics for:\n- mainnet\n - datasource: VictoriaMetrics Mainnet\n - chain_name: mainnet\n- testnet\n - datasource: VictoriaMetrics Global (Non-mainnet)\n - chain_name: testnet\n- devnet\n - datasource: VictoriaMetrics Global (Non-mainnet)\n - chain_name: devnet", + "mode": "markdown" + }, + "pluginVersion": "10.0.0-cloud.3.b04cc88b", + "title": "Guide", + "type": "text" + }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 8 }, "id": 5, "panels": [], "title": "Aggregations", @@ -29,20 +42,20 @@ }, { "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, - "gridPos": { "h": 3, "w": 24, "x": 0, "y": 1 }, + "gridPos": { "h": 3, "w": 24, "x": 0, "y": 9 }, "id": 7, "options": { "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, "content": "This section contains queries that aggregate across all clients. This means the `source_client` variable above doesn't do anything.", "mode": "markdown" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "title": "Explanation", "type": "text" }, { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "description": "This shows the most popular clients.", + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "This shows the most popular clients by request count over the configured time window.", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -51,7 +64,7 @@ }, "overrides": [] }, - "gridPos": { "h": 15, "w": 12, "x": 0, "y": 4 }, + "gridPos": { "h": 15, "w": 12, "x": 0, "y": 12 }, "id": 1, "options": { "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -61,9 +74,9 @@ }, "targets": [ { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "editorMode": "builder", - "expr": "count by(request_source_client) (label_replace(aptos_api_request_source_client, \"request_source_client\", \"$1\", \"request_source_client\", \"(.*)/.*\"))", + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "editorMode": "code", + "expr": "sum by(request_source_client) (increase(label_replace(aptos_api_request_source_client{chain_name=\"$chain_name\"}, \"request_source_client\", \"$1\", \"request_source_client\", \"(.*)/.*\")[$__range])) != 0", "legendFormat": "__auto", "range": true, "refId": "A" @@ -73,8 +86,8 @@ "type": "piechart" }, { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "description": "This shows the most popular clients. There is a separate entry per client + client version.", + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "This shows the most popular clients by request count over the configured time window. There is a separate entry per client + client version. ", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -83,7 +96,7 @@ }, "overrides": [] }, - "gridPos": { "h": 15, "w": 12, "x": 12, "y": 4 }, + "gridPos": { "h": 15, "w": 12, "x": 12, "y": 12 }, "id": 8, "options": { "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -93,9 +106,9 @@ }, "targets": [ { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "editorMode": "builder", - "expr": "count by(request_source_client) (aptos_api_request_source_client)", + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "editorMode": "code", + "expr": "sum by(request_source_client) (increase(aptos_api_request_source_client{chain_name=\"$chain_name\"}[$__range])) != 0", "legendFormat": "__auto", "range": true, "refId": "A" @@ -105,93 +118,116 @@ "type": "piechart" }, { - "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 19 }, - "id": 4, - "panels": [], - "repeat": "source_client", - "repeatDirection": "h", - "title": "Per client", - "type": "row" - }, - { - "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, - "gridPos": { "h": 3, "w": 24, "x": 0, "y": 20 }, - "id": 6, - "options": { - "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, - "content": "This section contains queries that show data for a specific client. To select which client to view metrics for, select one in the `source_client` variable dropdown above.", - "mode": "markdown" - }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", - "title": "Explanation", - "type": "text" - }, - { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "description": "This shows what are the top 5 most common endpoints called by users of this client.", + "datasource": { "type": "grafana-falconlogscale-datasource", "uid": "b4f0e2cd-2eea-4ada-a4c0-261e41369ed5" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, - "custom": { "hideFrom": { "legend": false, "tooltip": false, "viz": false } }, - "mappings": [] + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineWidth": 1, + "scaleDistribution": { "type": "linear" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } }, "overrides": [] }, - "gridPos": { "h": 15, "w": 12, "x": 0, "y": 23 }, - "id": 3, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 27 }, + "id": 10, "options": { - "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, - "pieType": "pie", - "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, - "tooltip": { "mode": "single", "sort": "none" } + "barRadius": 0, + "barWidth": 0.97, + "fullHighlight": false, + "groupWidth": 0.7, + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "orientation": "auto", + "showValue": "auto", + "stacking": "none", + "tooltip": { "mode": "single", "sort": "none" }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "editorMode": "code", - "expr": "topk(5, count by(operation_id) (aptos_api_request_source_client{request_source_client=\"$source_client\"}))", - "legendFormat": "__auto", - "range": true, - "refId": "A" + "datasource": { "type": "grafana-falconlogscale-datasource", "uid": "b4f0e2cd-2eea-4ada-a4c0-261e41369ed5" }, + "lsql": "| #resource.type=cloud_run_revision\n| resource.labels.service_name=indexer-$chain_name \n| jsonPayload.method=* logName=*stdout\n| case {jsonPayload.aptos_client!=* | jsonPayload.aptos_client:=\"unknown\"; *}\n| top(jsonPayload.aptos_client)", + "refId": "A", + "repository": "gcp" } ], - "title": "Top 5 endpoints", - "type": "piechart" + "title": "Indexer API requests by client", + "type": "barchart" }, { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "description": "If we see a client is making requests to a particular endpoint and getting lots of 400s, it could indicate an issue with the client. It could also just indicate bad requests by the user, so we need to take this with a pinch of salt.", - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { "hideFrom": { "legend": false, "tooltip": false, "viz": false } }, - "mappings": [] + "collapsed": true, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 35 }, + "id": 4, + "panels": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "gridPos": { "h": 3, "w": 24, "x": 0, "y": 36 }, + "id": 6, + "options": { + "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, + "content": "This section contains queries that show data for a specific client. To select which client to view metrics for, select one in the `source_client` variable dropdown above.", + "mode": "markdown" + }, + "pluginVersion": "10.0.0-cloud.3.b04cc88b", + "title": "Explanation", + "type": "text" }, - "overrides": [] - }, - "gridPos": { "h": 15, "w": 12, "x": 12, "y": 23 }, - "id": 2, - "options": { - "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, - "pieType": "pie", - "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", - "targets": [ { - "datasource": { "type": "prometheus", "uid": "vU-Lwva4k" }, - "editorMode": "builder", - "expr": "topk(5, count by(operation_id) (aptos_api_request_source_client{status=\"400\", request_source_client=\"$source_client\"}))", - "legendFormat": "__auto", - "range": true, - "refId": "A" + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "This shows what are the top 5 most common endpoints called by users of this client in the configured time window.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { "hideFrom": { "legend": false, "tooltip": false, "viz": false } }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { "h": 15, "w": 12, "x": 0, "y": 39 }, + "id": 3, + "options": { + "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, + "pieType": "pie", + "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "editorMode": "code", + "expr": "topk(5, sum by(operation_id) (increase(aptos_api_request_source_client{request_source_client=\"$source_client\", chain_name=\"$chain_name\"}[$__range])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Top 5 endpoints", + "type": "piechart" } ], - "title": "Top 5 endpoints where the response is a 400", - "type": "piechart" + "repeat": "source_client", + "repeatDirection": "h", + "title": "Per client", + "type": "row" } ], "refresh": "", @@ -202,7 +238,7 @@ "list": [ { "current": { "selected": false, "text": "unknown", "value": "unknown" }, - "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "datasource": { "type": "prometheus", "uid": "${datasource}" }, "definition": "label_values(aptos_api_request_source_client,request_source_client)", "hide": 0, "includeAll": false, @@ -218,6 +254,44 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "current": { + "selected": true, + "text": "VictoriaMetrics Global (Non-mainnet)", + "value": "VictoriaMetrics Global (Non-mainnet)" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "VictoriaMetrics.*Global|US", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { "selected": false, "text": "testnet", "value": "testnet" }, + "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "definition": "label_values(aptos_api_request_source_client,chain_name)", + "description": "When looking at non-mainnet, use this to select the network.", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "chain_name", + "options": [], + "query": { + "query": "label_values(aptos_api_request_source_client,chain_name)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "testnet|devnet|mainnet", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, @@ -226,6 +300,6 @@ "timezone": "", "title": "Developer Platform Client Metrics", "uid": "be847ea3-c7cc-4048-b783-eb2fdb4f1abd", - "version": 27, + "version": 53, "weekStart": "" } diff --git a/dashboards/developer-platform-client-metrics.json.gz b/dashboards/developer-platform-client-metrics.json.gz index 63332d90a129b..b376418ea5182 100644 Binary files a/dashboards/developer-platform-client-metrics.json.gz and b/dashboards/developer-platform-client-metrics.json.gz differ diff --git a/dashboards/execution.json b/dashboards/execution.json index 1735f32aaca46..a750897957da3 100644 --- a/dashboards/execution.json +++ b/dashboards/execution.json @@ -160,7 +160,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 8 }, "hiddenSeries": false, "id": 39, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -179,7 +178,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -254,7 +253,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 8 }, "hiddenSeries": false, "id": 40, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -273,7 +271,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -358,7 +356,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 8 }, "hiddenSeries": false, "id": 41, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -377,7 +374,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -446,11 +443,20 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "quantile(0.67, rate(block_executor_duplicates_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) ", + "expr": "quantile(0.67, rate(aptos_execution_transaction_dedup_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) ", "hide": false, - "legendFormat": "duplicate_filter", + "legendFormat": "pre_dedup", "range": true, "refId": "G" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_transaction_shuffle_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) ", + "hide": false, + "legendFormat": "pre_shuffle", + "range": true, + "refId": "H" } ], "thresholds": [], @@ -480,7 +486,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 16 }, "hiddenSeries": false, "id": 42, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -499,7 +504,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -575,7 +580,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 16 }, "hiddenSeries": false, "id": 43, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -594,7 +598,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -697,7 +701,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 16 }, "hiddenSeries": false, "id": 44, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -716,7 +719,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -870,7 +873,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 24 }, "hiddenSeries": false, "id": 45, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -889,7 +891,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -947,9 +949,9 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "quantile(0.67, rate(block_executor_duplicates_filtered_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "expr": "quantile(0.67, rate(aptos_execution_transaction_dedup_filtered_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", "hide": false, - "legendFormat": "duplicates_filtered_per_block", + "legendFormat": "deduped_per_block", "range": true, "refId": "F" } @@ -981,7 +983,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 24 }, "hiddenSeries": false, "id": 46, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1000,7 +1001,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1115,7 +1116,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 32 }, "hiddenSeries": false, "id": 51, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1134,7 +1134,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1174,7 +1174,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 40 }, "id": 13, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -1196,7 +1195,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 41 }, "hiddenSeries": false, "id": 9, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1215,7 +1213,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1261,7 +1259,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 41 }, "hiddenSeries": false, "id": 24, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1280,7 +1277,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1326,7 +1323,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 41 }, "hiddenSeries": false, "id": 23, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1345,7 +1341,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1391,7 +1387,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 49 }, "hiddenSeries": false, "id": 18, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1410,7 +1405,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1456,7 +1451,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 49 }, "hiddenSeries": false, "id": 25, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1475,7 +1469,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1521,7 +1515,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 49 }, "hiddenSeries": false, "id": 26, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1540,7 +1533,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1586,7 +1579,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 57 }, "hiddenSeries": false, "id": 16, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1651,7 +1643,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 57 }, "hiddenSeries": false, "id": 27, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1716,7 +1707,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 57 }, "hiddenSeries": false, "id": 28, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1781,7 +1771,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 65 }, "hiddenSeries": false, "id": 17, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1846,7 +1835,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 65 }, "hiddenSeries": false, "id": 29, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1911,7 +1899,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 65 }, "hiddenSeries": false, "id": 30, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1968,7 +1955,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 73 }, "id": 11, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -1990,7 +1976,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 74 }, "hiddenSeries": false, "id": 6, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2055,7 +2040,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 74 }, "hiddenSeries": false, "id": 15, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2120,7 +2104,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 74 }, "hiddenSeries": false, "id": 4, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2185,7 +2168,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 82 }, "hiddenSeries": false, "id": 2, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2250,7 +2232,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 82 }, "hiddenSeries": false, "id": 20, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2313,7 +2294,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 82 }, "hiddenSeries": false, "id": 50, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2380,7 +2360,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 90 }, "hiddenSeries": false, "id": 32, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2447,7 +2426,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 90 }, "hiddenSeries": false, "id": 49, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2653,7 +2631,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 90 }, "hiddenSeries": false, "id": 22, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2724,7 +2701,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 99 }, "hiddenSeries": false, "id": 52, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2799,7 +2775,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 99 }, "hiddenSeries": false, "id": 56, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2874,7 +2849,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 99 }, "hiddenSeries": false, "id": 53, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -3147,6 +3121,6 @@ "timezone": "", "title": "execution", "uid": "execution", - "version": 10, + "version": 12, "weekStart": "" } diff --git a/dashboards/execution.json.gz b/dashboards/execution.json.gz index 7959500c28b98..423ed49b1cc0e 100644 Binary files a/dashboards/execution.json.gz and b/dashboards/execution.json.gz differ diff --git a/dashboards/mempool.json b/dashboards/mempool.json index 2fe38671be550..218081e1191ae 100644 --- a/dashboards/mempool.json +++ b/dashboards/mempool.json @@ -66,7 +66,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 18, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -453,7 +452,7 @@ "refId": "F" } ], - "title": "Avg E2E Txn Commit Latency", + "title": "(deprecated) Avg E2E Txn Commit Latency", "type": "timeseries" }, { @@ -552,6 +551,66 @@ }, "overrides": [] }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 17 }, + "id": 177, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sum by (kubernetes_pod_name, role) (rate(aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"commit_accepted\", submitted_by=\"client\"}[$interval])) / sum by (kubernetes_pod_name, role) (rate(aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"commit_accepted\", submitted_by=\"client\"}[$interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}:{{role}}", + "range": true, + "refId": "B" + } + ], + "title": "Client-submitted E2E Txn latency", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Transactions that were submitted from downstream, and not from a peer validator", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" + }, + "overrides": [] + }, "gridPos": { "h": 8, "w": 8, "x": 8, "y": 17 }, "id": 172, "options": { @@ -563,15 +622,15 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "sum by (kubernetes_pod_name, role) (rate(aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"commit_accepted\"}[$interval])) / sum by (kubernetes_pod_name, role) (rate(aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"commit_accepted\"}[$interval]))", + "expr": "sum by (kubernetes_pod_name, role, submitted_by) (rate(aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=\"validator\", stage=\"commit_accepted\", submitted_by=\"downstream\"}[$interval])) / sum by (kubernetes_pod_name, role, submitted_by) (rate(aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=\"validator\", stage=\"commit_accepted\", submitted_by=\"downstream\"}[$interval]))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}:{{role}}", + "legendFormat": "{{kubernetes_pod_name}}:{{role}}:{{submitted_by}}", "range": true, "refId": "B" } ], - "title": "E2E Txn Commit Latency", + "title": "Validator E2E Txn Commit Latency", "type": "timeseries" }, { @@ -639,7 +698,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 25 }, "id": 58, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -1175,7 +1233,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 59 }, "id": 174, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -1558,13 +1615,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1615,13 +1666,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1672,13 +1717,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1730,13 +1769,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1788,13 +1821,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1844,13 +1871,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1902,13 +1923,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -1940,7 +1955,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 101 }, "id": 34, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -1974,13 +1988,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2032,13 +2040,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2088,13 +2090,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -2144,13 +2140,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -2201,13 +2191,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2257,13 +2241,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2312,13 +2290,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2367,13 +2339,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2422,13 +2388,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2486,13 +2446,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2545,13 +2499,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2604,13 +2552,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2663,13 +2605,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2722,13 +2658,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2781,13 +2711,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -2985,7 +2909,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 144 }, "id": 20, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -3019,13 +2942,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3079,13 +2996,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3138,13 +3049,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3196,13 +3101,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3254,13 +3153,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3294,7 +3187,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 161 }, "id": 22, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -3328,13 +3220,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3386,13 +3272,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3444,13 +3324,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3500,13 +3374,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3556,13 +3424,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3612,13 +3474,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3668,13 +3524,7 @@ }, "mappings": [], "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3727,13 +3577,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -3784,13 +3628,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3822,7 +3660,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 186 }, "id": 76, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -3855,13 +3692,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3910,13 +3741,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -3965,13 +3790,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -4020,13 +3839,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -4075,13 +3888,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -4130,13 +3937,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -4185,13 +3986,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -4240,13 +4035,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] @@ -4276,7 +4065,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 210 }, "id": 112, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -4389,7 +4177,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 219 }, "id": 64, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], @@ -4802,6 +4589,6 @@ "timezone": "", "title": "mempool", "uid": "mempool", - "version": 6, + "version": 9, "weekStart": "" } diff --git a/dashboards/mempool.json.gz b/dashboards/mempool.json.gz index 331a874f0e093..49110625bcd92 100644 Binary files a/dashboards/mempool.json.gz and b/dashboards/mempool.json.gz differ diff --git a/dashboards/overview.json b/dashboards/overview.json index b0e005b7d2813..ad2bce7c6f04a 100644 --- a/dashboards/overview.json +++ b/dashboards/overview.json @@ -59,7 +59,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 4, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, "refId": "A" }], @@ -84,7 +83,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 2, "x": 0, "y": 1 }, "id": 46, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -97,7 +95,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -142,7 +140,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 3, "x": 2, "y": 1 }, "id": 2, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -155,7 +152,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -200,7 +197,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 3, "x": 5, "y": 1 }, "id": 90, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -213,7 +209,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -259,7 +255,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 3, "x": 8, "y": 1 }, "id": 105, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -272,7 +267,7 @@ "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -322,7 +317,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 2, "x": 11, "y": 1 }, "id": 66, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -335,7 +329,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -375,7 +369,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 2, "x": 13, "y": 1 }, "id": 133, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -388,7 +381,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -434,7 +427,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 4, "x": 15, "y": 1 }, "id": 101, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -447,7 +439,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -491,7 +483,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 3, "x": 19, "y": 1 }, "id": 106, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -504,7 +495,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -554,7 +545,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 2, "x": 22, "y": 1 }, "id": 70, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -567,7 +557,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -615,7 +605,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "grafana-bigquery-datasource", "uid": "${BigQuery}" }, @@ -661,7 +651,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 2, "x": 11, "y": 5 }, "id": 134, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -674,7 +663,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -725,7 +714,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "grafana-bigquery-datasource", "uid": "${BigQuery}" }, @@ -767,7 +756,7 @@ "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "grafana-bigquery-datasource", "uid": "${BigQuery}" }, @@ -806,7 +795,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 4, "w": 3, "x": 21, "y": 5 }, "id": 130, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -819,7 +807,7 @@ "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -879,7 +867,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -927,7 +915,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -969,7 +957,7 @@ "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -1000,13 +988,12 @@ }, "gridPos": { "h": 5, "w": 5, "x": 11, "y": 10 }, "id": 72, - "isNew": false, "options": { "cellHeight": "sm", "footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false }, "showHeader": true }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "scroll": false, "span": 0, "targets": [ @@ -1059,7 +1046,6 @@ "gauge": { "maxValue": 0, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": false }, "gridPos": { "h": 5, "w": 3, "x": 16, "y": 10 }, "id": 141, - "isNew": false, "nullPointMode": "", "options": { "colorMode": "value", @@ -1072,7 +1058,7 @@ "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, "textMode": "auto" }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "span": 0, "sparkline": {}, "targets": [ @@ -1112,13 +1098,12 @@ }, "gridPos": { "h": 5, "w": 5, "x": 19, "y": 10 }, "id": 142, - "isNew": false, "options": { "cellHeight": "sm", "footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false }, "showHeader": true }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "scroll": false, "span": 0, "targets": [ @@ -1179,7 +1164,7 @@ "footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false }, "showHeader": true }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -1412,7 +1397,7 @@ "footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false }, "showHeader": true }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -1525,7 +1510,7 @@ "showHeader": true, "sortBy": [] }, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -1549,7 +1534,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 32 }, "id": 62, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, "refId": "A" }], @@ -1570,7 +1554,6 @@ "gridPos": { "h": 9, "w": 8, "x": 0, "y": 33 }, "hiddenSeries": false, "id": 10, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1589,7 +1572,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1765,7 +1748,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 42 }, "id": 22, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, "refId": "A" }], @@ -1786,7 +1768,6 @@ "gridPos": { "h": 9, "w": 6, "x": 0, "y": 43 }, "hiddenSeries": false, "id": 26, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1805,7 +1786,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -1852,7 +1833,6 @@ "gridPos": { "h": 9, "w": 6, "x": 6, "y": 43 }, "hiddenSeries": false, "id": 45, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -1871,7 +1851,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2017,7 +1997,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 52 }, "id": 42, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, "refId": "A" }], @@ -2039,7 +2018,6 @@ "gridPos": { "h": 8, "w": 6, "x": 0, "y": 53 }, "hiddenSeries": false, "id": 20, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2058,7 +2036,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2104,7 +2082,6 @@ "gridPos": { "h": 8, "w": 6, "x": 6, "y": 53 }, "hiddenSeries": false, "id": 48, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2123,7 +2100,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2168,7 +2145,6 @@ "gridPos": { "h": 8, "w": 6, "x": 12, "y": 53 }, "hiddenSeries": false, "id": 77, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2187,7 +2163,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2253,7 +2229,6 @@ "gridPos": { "h": 8, "w": 6, "x": 18, "y": 53 }, "hiddenSeries": false, "id": 129, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2272,7 +2247,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2328,7 +2303,6 @@ "gridPos": { "h": 8, "w": 6, "x": 0, "y": 62 }, "hiddenSeries": false, "id": 135, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2347,7 +2321,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2393,7 +2367,6 @@ "gridPos": { "h": 8, "w": 6, "x": 6, "y": 62 }, "hiddenSeries": false, "id": 136, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2412,7 +2385,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2457,7 +2430,6 @@ "gridPos": { "h": 8, "w": 6, "x": 12, "y": 62 }, "hiddenSeries": false, "id": 137, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2476,7 +2448,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2542,7 +2514,6 @@ "gridPos": { "h": 8, "w": 6, "x": 18, "y": 62 }, "hiddenSeries": false, "id": 138, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2561,7 +2532,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2601,7 +2572,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 70 }, "id": 53, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, "refId": "A" }], @@ -2733,7 +2703,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 79 }, "hiddenSeries": false, "id": 50, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2752,7 +2721,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2795,7 +2764,6 @@ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 79 }, "hiddenSeries": false, "id": 51, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2814,7 +2782,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2857,7 +2825,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 79 }, "hiddenSeries": false, "id": 54, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2876,7 +2843,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -2912,7 +2879,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 87 }, "id": 40, - "isNew": false, "panels": [], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, "refId": "A" }], @@ -2933,7 +2899,6 @@ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 88 }, "hiddenSeries": false, "id": 30, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -2952,7 +2917,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -3074,7 +3039,6 @@ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 88 }, "hiddenSeries": false, "id": 44, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -3093,7 +3057,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.5.3-cloud.2.0cb5a501", + "pluginVersion": "10.0.0-cloud.3.b04cc88b", "pointradius": 2, "points": false, "renderer": "flot", @@ -3140,9 +3104,87 @@ ], "yaxis": { "align": false } }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Data on PFN nodes only. Unless clients send duplicate transactions, this is a reliable way to see E2E latency.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 96 }, + "id": 158, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.5, rate(aptos_core_mempool_txn_commit_latency_sum{stage=~\"commit_accepted\", kubernetes_pod_name=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", scope=\"e2e\"}[1m])/rate(aptos_core_mempool_txn_commit_latency_count{stage=~\"commit_accepted\", kubernetes_pod_name=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", scope=\"e2e\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "p50", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.9, rate(aptos_core_mempool_txn_commit_latency_sum{stage=~\"commit_accepted\", kubernetes_pod_name=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", scope=\"e2e\"}[1m])/rate(aptos_core_mempool_txn_commit_latency_count{stage=~\"commit_accepted\", kubernetes_pod_name=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", scope=\"e2e\"}[1m]))", + "hide": false, + "legendFormat": "p90", + "range": true, + "refId": "B" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.99, rate(aptos_core_mempool_txn_commit_latency_sum{stage=~\"commit_accepted\", kubernetes_pod_name=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", scope=\"e2e\"}[1m])/rate(aptos_core_mempool_txn_commit_latency_count{stage=~\"commit_accepted\", kubernetes_pod_name=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=\"vmagent\", namespace=~\"$namespace\", scope=\"e2e\"}[1m]))", + "hide": false, + "legendFormat": "p99", + "range": true, + "refId": "C" + } + ], + "title": "Aptos PFN-only e2e latency", + "type": "timeseries" + }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 96 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 104 }, "id": 98, "panels": [], "title": "State Sync", @@ -3184,7 +3226,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 97 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 105 }, "id": 100, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3239,7 +3281,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 97 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 105 }, "id": 94, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3262,7 +3304,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 105 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 113 }, "id": 81, "panels": [], "title": "System Metrics", @@ -3306,7 +3348,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 106 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 114 }, "id": 79, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3389,7 +3431,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 106 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 114 }, "id": 83, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3448,7 +3490,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 106 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 114 }, "id": 85, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3506,7 +3548,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 114 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 122 }, "id": 89, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3564,7 +3606,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 114 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 122 }, "id": 87, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3622,7 +3664,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 16, "y": 114 }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 122 }, "id": 92, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3643,7 +3685,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 123 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 131 }, "id": 156, "panels": [], "title": "Other metrics", @@ -3677,7 +3719,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 124 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 132 }, "id": 154, "options": { "barRadius": 0, @@ -3707,7 +3749,7 @@ "type": "barchart" } ], - "refresh": false, + "refresh": "1m", "schemaVersion": 38, "style": "dark", "tags": ["aptos-core"], @@ -3871,6 +3913,6 @@ "timezone": "", "title": "overview", "uid": "overview", - "version": 15, + "version": 17, "weekStart": "" } diff --git a/dashboards/overview.json.gz b/dashboards/overview.json.gz index 339dc0773acbd..9194fb0be2000 100644 Binary files a/dashboards/overview.json.gz and b/dashboards/overview.json.gz differ diff --git a/dashboards/public-fullnodes.json b/dashboards/public-fullnodes.json new file mode 100644 index 0000000000000..dbf6e412644f0 --- /dev/null +++ b/dashboards/public-fullnodes.json @@ -0,0 +1,1124 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { "type": "grafana", "uid": "-- Grafana --" }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" }, + "type": "dashboard" + }, + { + "datasource": { "type": "datasource", "uid": "grafana" }, + "enable": true, + "expr": "", + "iconColor": "rgba(0, 211, 255, 1)", + "iconSize": 0, + "lineColor": "", + "name": "Annotations & Alerts", + "query": "", + "showLine": false, + "step": "", + "tagKeys": "", + "tagsField": "", + "target": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" }, + "textField": "", + "textFormat": "", + "titleFormat": "", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "title": "Other Dashboards", + "type": "dashboards" + } + ], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, + "id": 7, + "panels": [], + "span": 0, + "title": "State Sync", + "type": "row" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The latest synced version of the node.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 1 }, + "id": 36, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"synced\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "Latest synced version", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The latest synced version.", + "fieldConfig": { + "defaults": { + "color": { "mode": "continuous-GrYlRd" }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 1 }, + "id": 37, + "options": { + "displayMode": "lcd", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, + "showUnfilled": true, + "valueMode": "color" + }, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"synced\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "Latest synced version", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Rate at which the synced version is increasing", + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 9 }, + "hiddenSeries": false, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "rate(aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"synced\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "State Sync Rate", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "label": "/s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The difference between the highest advertised version and the currently synced version.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 9 }, + "id": 38, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "8.5.2", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "clamp_min(aptos_data_client_highest_advertised_data{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", data_type=\"transactions\"} - on(kubernetes_pod_name, cluster, run_uuid) aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", type=\"synced\"}, 0)", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "Sync lag (behind highest known)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 17 }, + "id": 31, + "panels": [], + "span": 0, + "title": "Mempool", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Number of uncommitted but still valid (not expired nor discarded) transactions in the nodes Mempool.", + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 18 }, + "hiddenSeries": false, + "id": 26, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "aptos_core_mempool_index_size{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", index=\"system_ttl\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "refId": "A" + }, + { + "expr": "aptos_core_mempool_index_size{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", index=\"system_ttl\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{job}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Mempool Pending transactions", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The time between Mempool receiving the transaction and time to be committed. Note: due to reliability mechanisms, this value can be lower than it really is.", + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 18 }, + "hiddenSeries": false, + "id": 34, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} / aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "refId": "A" + }, + { + "expr": "aptos_core_mempool_txn_commit_latency_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} / aptos_core_mempool_txn_commit_latency_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{job}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Mempool Txn Commit Latency", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "collapsed": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 26 }, + "id": 22, + "panels": [], + "span": 0, + "title": "Networking", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Number of Inbound Connections as measured by AptosNet", + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 27 }, + "hiddenSeries": false, + "id": 24, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (kubernetes_pod_name,kubernetes_pod_name)(aptos_connections{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", direction=\"inbound\",network_id=\"Public\", kubernetes_pod_name=~\"$kubernetes_pod_name\"})", + "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Network Connections (Incoming)", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Number of Outbound Network Connections as measured by AptosNet", + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 27 }, + "hiddenSeries": false, + "id": 35, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (kubernetes_pod_name, kubernetes_pod_name)(aptos_connections{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", direction=\"outbound\",network_id=\"Public\", kubernetes_pod_name=~\"$kubernetes_pod_name\"})", + "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Network Connections (Outgoing)", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 35 }, + "hiddenSeries": false, + "id": 20, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", pod=~\"$kubernetes_pod_name.*fullnode.*\"}[$interval])) by (pod)", + "legendFormat": "{{pod}}", + "refId": "A" + }, + { + "expr": "sum(irate(container_network_transmit_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", pod=~\"$kubernetes_pod_name.*fullnode.*\"}[$interval]))", + "legendFormat": "total", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transmit Bandwidth", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "Bps", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 35 }, + "hiddenSeries": false, + "id": 19, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", pod=~\"$kubernetes_pod_name.*fullnode.*\"}[$interval])) by (pod)", + "legendFormat": "{{pod}}", + "refId": "A" + }, + { + "expr": "sum(irate(container_network_receive_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", pod=~\"$kubernetes_pod_name.*fullnode.*\"}[$interval]))", + "legendFormat": "total", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Receive Bandwidth", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "Bps", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "collapsed": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 43 }, + "id": 9, + "panels": [], + "span": 0, + "title": "System", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 44 }, + "hiddenSeries": false, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - kubelet_volume_stats_available_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"fn.$kubernetes_pod_name.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} / kubelet_volume_stats_capacity_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"fn.$kubernetes_pod_name.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{persistentvolumeclaim}}", + "refId": "A" + }, + { + "expr": "1 - kubelet_volume_stats_available_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"$kubernetes_pod_name.*fullnode.*\", kubernetes_pod_name!~\"val.*\"} / kubelet_volume_stats_capacity_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"$kubernetes_pod_name.*fullnode.*\", kubernetes_pod_name!~\"val.*\"}", + "legendFormat": "{{persistentvolumeclaim}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Percentage Disk Used", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "percentunit", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 44 }, + "hiddenSeries": false, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "container_memory_working_set_bytes{container=\"fullnode\", pod=~\"$kubernetes_pod_name.*\", job=\"kubernetes-cadvisor\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\"}", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Memory Usage", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "bytes", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 52 }, + "hiddenSeries": false, + "id": 17, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "rate(container_cpu_usage_seconds_total{container=\"fullnode\", kubernetes_pod_name=~\"$kubernetes_pod_name\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{pod}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "rate(container_cpu_usage_seconds_total{container=\"fullnode\", pod=~\"pfn.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\"}[$interval])", + "legendFormat": "{{pod}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "CPU Usage", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editable": false, + "error": false, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 52 }, + "hiddenSeries": false, + "id": 15, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.0.1-cloud.1.d4a15e66", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "time() - container_start_time_seconds{container=\"fullnode\", pod=~\"$kubernetes_pod_name.*\", job=\"kubernetes-cadvisor\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\"}", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Uptime", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": ["aptos-core"], + "templating": { + "list": [ + { + "allFormat": "", + "allValue": "", + "current": { + "selected": true, + "text": "VictoriaMetrics Global (Non-mainnet)", + "value": "VictoriaMetrics Global (Non-mainnet)" + }, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "multiFormat": "", + "name": "Datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": ".*Prometheus.*|.*Victoria.*|.*Telemetry.*", + "skipUrlSync": false, + "sort": 0, + "type": "datasource" + }, + { + "current": { "selected": true, "text": ["vmagent"], "value": ["vmagent"] }, + "hide": 0, + "includeAll": false, + "label": "", + "multi": true, + "name": "metrics_source", + "options": [{ "selected": true, "text": "vmagent", "value": "vmagent" }], + "query": "vmagent", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + }, + { + "allFormat": "", + "allValue": "", + "current": { "selected": true, "text": "testnet", "value": "testnet" }, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "multiFormat": "", + "name": "chain_name", + "options": [], + "query": { + "query": "label_values(node_process_start_time{metrics_source=~\"$metrics_source\"}, chain_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "allFormat": "", + "allValue": ".*", + "current": { "selected": true, "text": ["gcp-testnet-pfn"], "value": ["gcp-testnet-pfn"] }, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "label_values(node_process_start_time{metrics_source=~\"$metrics_source\", chain_name=~\"$chain_name\"},cluster)", + "hide": 0, + "includeAll": false, + "label": "", + "multi": true, + "multiFormat": "", + "name": "cluster", + "options": [], + "query": { + "query": "label_values(node_process_start_time{metrics_source=~\"$metrics_source\", chain_name=~\"$chain_name\"},cluster)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "^.*pfn.*$", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allFormat": "", + "allValue": ".*", + "current": { "selected": false, "text": "All", "value": "$__all" }, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "multiFormat": "", + "name": "namespace", + "options": [], + "query": { + "query": "label_values(node_process_start_time{metrics_source=~\"$metrics_source\", chain_name=~\"$chain_name\", cluster=~\"$cluster\"}, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "allFormat": "", + "allValue": ".*", + "current": { "selected": true, "text": ["All"], "value": ["$__all"] }, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "", + "hide": 0, + "includeAll": true, + "label": "", + "multi": true, + "multiFormat": "", + "name": "kubernetes_pod_name", + "options": [], + "query": { + "query": "label_values(node_process_start_time{metrics_source=~\"$metrics_source\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=~\"$namespace\"}, kubernetes_pod_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "allFormat": "", + "allValue": "", + "auto": true, + "auto_count": 30, + "auto_min": "10s", + "current": { "selected": false, "text": "auto", "value": "$__auto_interval_interval" }, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "multiFormat": "", + "name": "interval", + "options": [ + { "selected": true, "text": "auto", "value": "$__auto_interval_interval" }, + { "selected": false, "text": "1m", "value": "1m" }, + { "selected": false, "text": "5m", "value": "5m" }, + { "selected": false, "text": "10m", "value": "10m" }, + { "selected": false, "text": "30m", "value": "30m" }, + { "selected": false, "text": "1h", "value": "1h" } + ], + "query": "1m,5m,10m,30m,1h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "interval" + } + ] + }, + "time": { "from": "now-2d", "to": "now" }, + "timepicker": { "refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"] }, + "timezone": "", + "title": "public-fullnodes", + "uid": "de6aa860-0aed-4876-bd81-ec593d4bc252", + "version": 5, + "weekStart": "" +} diff --git a/dashboards/public-fullnodes.json.gz b/dashboards/public-fullnodes.json.gz new file mode 100644 index 0000000000000..8cee6149f1cf7 Binary files /dev/null and b/dashboards/public-fullnodes.json.gz differ diff --git a/dashboards/state-sync-v2.json b/dashboards/state-sync-v2.json index 135c611ff79e4..977813c98a6cd 100644 --- a/dashboards/state-sync-v2.json +++ b/dashboards/state-sync-v2.json @@ -979,13 +979,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1034,13 +1028,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1089,13 +1077,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1144,13 +1126,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1199,13 +1175,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1254,13 +1224,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1309,13 +1273,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1364,13 +1322,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1419,13 +1371,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -1476,13 +1422,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [ @@ -1542,13 +1482,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1597,13 +1531,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1652,13 +1580,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1720,17 +1642,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 3 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 67 }, "id": 41, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -1775,17 +1691,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 3 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 67 }, "id": 87, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -1830,17 +1740,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 11 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 75 }, "id": 85, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -1885,17 +1789,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 11 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 75 }, "id": 42, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -1940,17 +1838,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 19 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 83 }, "id": 43, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -1995,18 +1887,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 19 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 83 }, "id": 47, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2066,7 +1952,7 @@ } ] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 27 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 91 }, "id": 48, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2125,7 +2011,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 27 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 91 }, "id": 46, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2156,7 +2042,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 35 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 99 }, "id": 44, "options": { "displayMode": "lcd", @@ -2164,9 +2050,10 @@ "minVizWidth": 0, "orientation": "horizontal", "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, - "showUnfilled": true + "showUnfilled": true, + "valueMode": "color" }, - "pluginVersion": "9.1.1", + "pluginVersion": "9.5.3-cloud.2.0cb5a501", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -2210,7 +2097,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 35 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 99 }, "id": 75, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2241,7 +2128,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 43 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 107 }, "id": 76, "options": { "displayMode": "lcd", @@ -2249,9 +2136,10 @@ "minVizWidth": 0, "orientation": "horizontal", "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }, - "showUnfilled": true + "showUnfilled": true, + "valueMode": "color" }, - "pluginVersion": "9.1.1", + "pluginVersion": "9.5.3-cloud.2.0cb5a501", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -2295,7 +2183,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 43 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 107 }, "id": 45, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2364,7 +2252,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 68 }, "id": 52, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2375,7 +2263,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "expr": "rate(aptos_storage_service_server_requests_received{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{request_type}}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{request_type}}-{{network_id}}", "range": true, "refId": "A" } @@ -2419,7 +2307,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 68 }, "id": 54, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2430,7 +2318,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "expr": "rate(aptos_storage_service_server_responses_sent{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{response_type}}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{response_type}}-{{network_id}}", "range": true, "refId": "A" } @@ -2475,7 +2363,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 12 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 76 }, "id": 50, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2541,7 +2429,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 12 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 76 }, "id": 51, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2553,7 +2441,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "expr": "aptos_storage_service_server_request_latency_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}/aptos_storage_service_server_request_latency_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{request_type}}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{request_type}}-{{network_id}}", "range": true, "refId": "A" } @@ -2597,7 +2485,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 20 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 84 }, "id": 55, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2652,7 +2540,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 20 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 84 }, "id": 53, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -2663,7 +2551,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "expr": "rate(aptos_storage_service_server_errors{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{error_type}}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{error_type}}-{{network_id}}", "range": true, "refId": "A" } @@ -2673,7 +2561,7 @@ }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The hit rate (percentage) of the LRU storage cache per cache probe.", + "description": "New optimistic fetch events seen at the server.", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -2704,12 +2592,12 @@ { "color": "red", "value": 80 } ] }, - "unit": "percent" + "unit": "none" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 28 }, - "id": 80, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 92 }, + "id": 97, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } @@ -2719,19 +2607,19 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "exemplar": true, - "expr": "sum by (kubernetes_pod_name, role) (aptos_storage_service_server_lru_cache{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", event=\"lru_cache_hit\"}) / sum by (kubernetes_pod_name, role) (aptos_storage_service_server_lru_cache{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", event=\"lru_cache_probe\"}) * 100\n", + "expr": "rate(aptos_storage_service_server_optimistic_fetch_event{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\",event=\"optimistic_fetch_add\"}[1m])", "interval": "", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{state}}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{network_id}}", "range": true, "refId": "A" } ], - "title": "LRU storage cache (Hit Rate)", + "title": "New optimistic fetch events", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The compression ratios of sent network messages", + "description": "Expired optimistic fetch events seen at the server.", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -2762,12 +2650,12 @@ { "color": "red", "value": 80 } ] }, - "unit": "percent" + "unit": "none" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 28 }, - "id": 88, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 92 }, + "id": 99, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } @@ -2777,20 +2665,85 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "exemplar": true, - "expr": "sum by (kubernetes_pod_name, role, client) (aptos_compression_byte_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", data_type=\"compressed_bytes\"}) / sum by (kubernetes_pod_name, role, client) (aptos_compression_byte_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", data_type=\"raw_bytes\"}) * 100\n", - "hide": false, + "expr": "rate(aptos_storage_service_server_optimistic_fetch_event{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\",event=\"optimistic_fetch_expire\"}[1m])", "interval": "", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{client}}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{network_id}}", "range": true, "refId": "A" } ], - "title": "Compression ratio (sent)", + "title": "Expired optimistic fetch events", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The rate of network frame overflows when handling storage requests (per second).", + "description": "The latencies for handling optimistic fetch requests.", + "fieldConfig": { + "defaults": { + "color": { "mode": "continuous-GrYlRd" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "scheme", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "smooth", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 100 }, + "id": 108, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(rate(aptos_storage_service_server_optimistic_fetch_latency_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\"}[$__rate_interval])) by (le))", + "legendFormat": "P99", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.70, sum(rate(aptos_storage_service_server_optimistic_fetch_latency_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\"}[$__rate_interval])) by (le))", + "hide": false, + "legendFormat": "P70", + "range": true, + "refId": "B" + } + ], + "title": "Optimistic fetch latencies", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average latencies to handle optimistic fetch requests.", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -2821,34 +2774,33 @@ { "color": "red", "value": 80 } ] }, - "unit": "none" + "unit": "s" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 36 }, - "id": 89, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 100 }, + "id": 109, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, + "pluginVersion": "v1.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "exemplar": true, - "expr": "rate(aptos_storage_service_server_network_frame_overflow{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])", - "interval": "", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{response_type}}", + "expr": "aptos_storage_service_server_optimistic_fetch_latency_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}/aptos_storage_service_server_optimistic_fetch_latency_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{request_type}}-{{network_id}}", "range": true, "refId": "A" } ], - "title": "Network frame overflow (count per second)", + "title": "Average optimistic fetch latencies", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The number of peers currently being ignored", + "description": "The hit rate (percentage) of the LRU storage cache per cache probe.", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -2879,12 +2831,12 @@ { "color": "red", "value": 80 } ] }, - "unit": "none" + "unit": "percent" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 36 }, - "id": 97, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 108 }, + "id": 80, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } @@ -2894,25 +2846,566 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", "exemplar": true, - "expr": "aptos_storage_service_server_ignored_peer_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", + "expr": "sum by (kubernetes_pod_name, role, network_id) (aptos_storage_service_server_lru_cache{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", event=\"lru_cache_hit\"}) / sum by (kubernetes_pod_name, role, network_id) (aptos_storage_service_server_lru_cache{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", event=\"lru_cache_probe\"}) * 100\n", "interval": "", "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{network_id}}", "range": true, "refId": "A" } ], - "title": "Number of ignored peers", + "title": "LRU storage cache (Hit Rate)", "type": "timeseries" - } - ], - "targets": [ - { "datasource": { "type": "doitintl-bigquery-datasource", "uid": "P09697CCD6C202E6D" }, "refId": "A" } - ], + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The compression ratios of sent network messages (i.e., compressed bytes / raw bytes)", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 108 }, + "id": 88, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (kubernetes_pod_name, role, client) (aptos_compression_byte_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", data_type=\"compressed_bytes\"}) / sum by (kubernetes_pod_name, role, client) (aptos_compression_byte_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", data_type=\"raw_bytes\"}) * 100\n", + "hide": false, + "interval": "", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{client}}", + "range": true, + "refId": "A" + } + ], + "title": "Compression ratio (sent)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The rate of network frame overflows when handling storage requests (per second).", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 116 }, + "id": 89, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(aptos_storage_service_server_network_frame_overflow{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])", + "interval": "", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{response_type}}", + "range": true, + "refId": "A" + } + ], + "title": "Network frame overflow (count per second)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The number of peers currently being ignored", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 116 }, + "id": 98, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "exemplar": true, + "expr": "aptos_storage_service_server_ignored_peer_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", + "interval": "", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{network_id}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of ignored peers", + "type": "timeseries" + } + ], + "targets": [ + { "datasource": { "type": "doitintl-bigquery-datasource", "uid": "P09697CCD6C202E6D" }, "refId": "A" } + ], "title": "Storage Service Server", "type": "row" + }, + { + "collapsed": true, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 68 }, + "id": 103, + "panels": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average latencies taken from when data is first proposed, to when it is finally synced.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 125 }, + "id": 104, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"propose_to_sync_latency\"}[1m])/rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"propose_to_sync_latency\"}[1m])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{label}}", + "range": true, + "refId": "A" + } + ], + "title": "Average propose to sync latencies (seconds)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The latencies taken from when data is first proposed, to when it is finally synced.", + "fieldConfig": { + "defaults": { + "color": { "mode": "continuous-GrYlRd" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "scheme", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "smooth", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { "mode": "exclude", "names": ["Value"], "prefix": "All except:", "readOnly": true } + }, + "properties": [{ "id": "custom.hideFrom", "value": { "legend": false, "tooltip": false, "viz": true } }] + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 125 }, + "id": 102, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"propose_to_sync_latency\"}[$__rate_interval])) by (le))", + "legendFormat": "P99", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.70, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"propose_to_sync_latency\"}[$__rate_interval])) by (le))", + "hide": false, + "legendFormat": "P70", + "range": true, + "refId": "B" + } + ], + "title": "Propose to sync latencies (seconds)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average latencies taken from when data is first proposed, to when it is finally seen.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 133 }, + "id": 106, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"propose_to_seen_latency\"}[1m])/rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"propose_to_seen_latency\"}[1m])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{label}}", + "range": true, + "refId": "A" + } + ], + "title": "Average propose to seen latencies (seconds)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The latencies taken from when data is first proposed, to when it is finally seen.", + "fieldConfig": { + "defaults": { + "color": { "mode": "continuous-GrYlRd" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "scheme", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "smooth", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { "mode": "exclude", "names": ["Value"], "prefix": "All except:", "readOnly": true } + }, + "properties": [{ "id": "custom.hideFrom", "value": { "legend": false, "tooltip": false, "viz": true } }] + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 133 }, + "id": 107, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"propose_to_seen_latency\"}[$__rate_interval])) by (le))", + "legendFormat": "P99", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.70, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"propose_to_seen_latency\"}[$__rate_interval])) by (le))", + "hide": false, + "legendFormat": "P70", + "range": true, + "refId": "B" + } + ], + "title": "Propose to seen latencies (seconds)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average latencies taken from when data is first seen, to when it is finally synced.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 141 }, + "id": 100, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"seen_to_sync_latency\"}[1m])/rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"seen_to_sync_latency\"}[1m])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{label}}", + "range": true, + "refId": "A" + } + ], + "title": "Average seen to sync latencies (seconds)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The latencies taken from when data is first seen, to when it is finally synced.", + "fieldConfig": { + "defaults": { + "color": { "mode": "continuous-GrYlRd" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "scheme", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "smooth", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { "mode": "exclude", "names": ["Value"], "prefix": "All except:", "readOnly": true } + }, + "properties": [{ "id": "custom.hideFrom", "value": { "legend": false, "tooltip": false, "viz": true } }] + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 141 }, + "id": 105, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"seen_to_sync_latency\"}[$__rate_interval])) by (le))", + "legendFormat": "P99", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.70, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"seen_to_sync_latency\"}[$__rate_interval])) by (le))", + "hide": false, + "legendFormat": "P70", + "range": true, + "refId": "B" + } + ], + "title": "Seen to sync latencies (seconds)", + "type": "timeseries" + } + ], + "title": "Latency Monitor", + "type": "row" } ], - "refresh": "", + "refresh": false, "schemaVersion": 38, "style": "dark", "tags": ["aptos-core"], @@ -2941,19 +3434,6 @@ "sort": 0, "type": "datasource" }, - { - "description": "BigQuery data source", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "BigQuery", - "options": [], - "query": "grafana-bigquery-datasource", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, { "allValue": ".*", "current": { "selected": false, "text": "vmagent", "value": "vmagent" }, @@ -3130,6 +3610,6 @@ "timezone": "", "title": "state-sync-v2", "uid": "state_sync_v2", - "version": 3, + "version": 10, "weekStart": "" } diff --git a/dashboards/state-sync-v2.json.gz b/dashboards/state-sync-v2.json.gz index 6499a6fe24bff..779f5a320093f 100644 Binary files a/dashboards/state-sync-v2.json.gz and b/dashboards/state-sync-v2.json.gz differ diff --git a/dashboards/storage-overview.json b/dashboards/storage-overview.json index 98d5608a84569..41c45e92a298e 100644 --- a/dashboards/storage-overview.json +++ b/dashboards/storage-overview.json @@ -31,282 +31,269 @@ "liveNow": false, "panels": [ { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 3286, - "panels": [ + "panels": [], + "title": "Overview", + "type": "row" + }, + { + "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, + "description": "", + "gridPos": { "h": 3, "w": 24, "x": 0, "y": 1 }, + "id": 3266, + "options": { + "content": "These are basic facts that can be useful for understanding what's happening on chain right now.\r\n\r\nFollow the sections below this to examine the three major focus areas of the storage system health.\r\n", + "mode": "markdown" + }, + "pluginVersion": "9.1.1", + "type": "text" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "version", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineStyle": { "fill": "solid" }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "decimals": 0, + "links": [], + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { "id": "byName", "options": "epoch (max)" }, + "properties": [ + { "id": "custom.axisPlacement", "value": "hidden" }, + { "id": "custom.axisLabel", "value": "epoch" }, + { "id": "custom.lineStyle" }, + { "id": "color", "value": { "fixedColor": "blue", "mode": "fixed" } }, + { "id": "custom.lineWidth", "value": 1 }, + { "id": "custom.axisColorMode", "value": "series" }, + { "id": "custom.lineStyle", "value": { "dash": [10, 10], "fill": "dash" } }, + { "id": "custom.axisPlacement", "value": "right" } + ] + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }, + "id": 66, + "options": { + "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": true }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ { - "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, - "description": "", - "gridPos": { "h": 3, "w": 24, "x": 0, "y": 1 }, - "id": 3266, - "options": { - "content": "These are basic facts that can be useful for understanding what's happening on chain right now.\r\n\r\nFollow the sections below this to examine the three major focus areas of the storage system health.\r\n", - "mode": "markdown" - }, - "pluginVersion": "9.1.1", - "type": "text" + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "max(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": false, + "interval": "", + "legendFormat": "latest (synced) (max)", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "version", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineStyle": { "fill": "solid" }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "never", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "decimals": 0, - "links": [], - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "epoch (max)" }, - "properties": [ - { "id": "custom.axisPlacement", "value": "hidden" }, - { "id": "custom.axisLabel", "value": "epoch" }, - { "id": "custom.lineStyle" }, - { "id": "color", "value": { "fixedColor": "blue", "mode": "fixed" } }, - { "id": "custom.lineWidth", "value": 1 }, - { "id": "custom.axisColorMode", "value": "series" }, - { "id": "custom.lineStyle", "value": { "dash": [10, 10], "fill": "dash" } }, - { "id": "custom.axisPlacement", "value": "right" } - ] - } - ] - }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }, - "id": 66, - "options": { - "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": true }, - "tooltip": { "mode": "multi", "sort": "none" } - }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "max(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": false, - "interval": "", - "legendFormat": "latest (synced) (max)", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "max(aptos_storage_ledger_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": false, - "interval": "", - "legendFormat": "committed (max)", - "range": true, - "refId": "C" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(quantile by (pruner_name) (0.8, aptos_pruner_min_readable_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", pruner_name !=\"state_store\"}))", - "hide": false, - "interval": "", - "legendFormat": "{{kubernetes_pod_name}} {{pruner_name}} pruned-till (p80)", - "range": true, - "refId": "B" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "exemplar": false, - "expr": "max(aptos_storage_next_block_epoch{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": false, - "legendFormat": "epoch (max)", - "range": true, - "refId": "D" - } - ], - "title": "latest version, epoch, pruner versions", - "type": "timeseries" + "editorMode": "code", + "expr": "max(aptos_storage_ledger_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": false, + "interval": "", + "legendFormat": "committed (max)", + "range": true, + "refId": "C" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "never", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "links": [], - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "P50" }, - "properties": [ - { "id": "custom.lineWidth", "value": 5 }, - { "id": "color", "value": { "fixedColor": "yellow", "mode": "fixed" } }, - { "id": "custom.lineStyle", "value": { "fill": "solid" } } - ] - } - ] - }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }, - "id": 257, - "options": { - "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "multi", "sort": "none" } + "editorMode": "code", + "expr": "sort_desc(quantile by (pruner_name, tag) (0.8, aptos_pruner_versions{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", tag=\"min_readable\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{kubernetes_pod_name}} {{pruner_name}} {{tag}} (p80)", + "range": true, + "refId": "B" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "exemplar": false, + "expr": "max(aptos_storage_next_block_epoch{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": false, + "legendFormat": "epoch (max)", + "range": true, + "refId": "D" + } + ], + "title": "latest version, epoch, pruner versions", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(irate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]) < 10000)", - "interval": "", - "legendFormat": "{{kubernetes_pod_name}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.5, (rate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])))", - "hide": false, - "legendFormat": "P50", - "range": true, - "refId": "B" - } - ], - "title": "Transactions per second", - "type": "timeseries" + "links": [], + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "none" }, + "overrides": [ + { + "matcher": { "id": "byName", "options": "P50" }, + "properties": [ + { "id": "custom.lineWidth", "value": 5 }, + { "id": "color", "value": { "fixedColor": "yellow", "mode": "fixed" } }, + { "id": "custom.lineStyle", "value": { "fill": "solid" } } + ] + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }, + "id": 257, + "options": { + "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "never", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "decimals": 2, - "links": [], - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "P50" }, - "properties": [ - { "id": "custom.lineWidth", "value": 5 }, - { "id": "color", "value": { "fixedColor": "yellow", "mode": "fixed" } } - ] - } + "editorMode": "code", + "expr": "sort_desc(irate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]) < 10000)", + "interval": "", + "legendFormat": "{{kubernetes_pod_name}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.5, (rate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])))", + "hide": false, + "legendFormat": "P50", + "range": true, + "refId": "B" + } + ], + "title": "Transactions per second", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "decimals": 2, + "links": [], + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { "id": "byName", "options": "P50" }, + "properties": [ + { "id": "custom.lineWidth", "value": 5 }, + { "id": "color", "value": { "fixedColor": "yellow", "mode": "fixed" } } ] - }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 12 }, - "id": 258, - "options": { - "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "multi", "sort": "none" } - }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]) / on (kubernetes_pod_name, run_uuid) rate(aptos_storage_api_latency_seconds_count{api_name=\"save_transactions\", result=\"Ok\", role=~\"$role\"}[1m]) < 10000)", - "hide": false, - "interval": "", - "legendFormat": "{{kubernetes_pod_name}} ", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.5, rate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]) / on (kubernetes_pod_name, run_uuid) rate(aptos_storage_api_latency_seconds_count{api_name=\"save_transactions\", role=~\"$role\", result=\"Ok\"}[1m]) < 10000)", - "hide": false, - "legendFormat": "P50", - "range": true, - "refId": "B" - } - ], - "title": "Transactions per save", - "type": "timeseries" + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 12 }, + "id": 258, + "options": { + "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sort_desc(rate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]) / on (kubernetes_pod_name, run_uuid) rate(aptos_storage_api_latency_seconds_count{api_name=\"save_transactions\", result=\"Ok\", role=~\"$role\"}[1m]) < 10000)", + "hide": false, + "interval": "", + "legendFormat": "{{kubernetes_pod_name}} ", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.5, rate(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]) / on (kubernetes_pod_name, run_uuid) rate(aptos_storage_api_latency_seconds_count{api_name=\"save_transactions\", role=~\"$role\", result=\"Ok\"}[1m]) < 10000)", + "hide": false, + "legendFormat": "P50", + "range": true, + "refId": "B" } ], - "title": "Overview", - "type": "row" + "title": "Transactions per save", + "type": "timeseries" }, { "collapsed": true, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 1 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 20 }, "id": 3288, "panels": [ { @@ -330,7 +317,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "green", "value": null }, + { "color": "green" }, { "color": "#EAB839", "value": 100000 }, { "color": "red", "value": 1000000 } ] @@ -403,7 +390,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, "unit": "none" }, "overrides": [ @@ -481,13 +468,7 @@ }, "decimals": 0, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "bytes" }, "overrides": [ @@ -575,7 +556,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, "unit": "short" }, "overrides": [ @@ -646,7 +627,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, "unit": "bytes" }, "overrides": [ @@ -717,13 +698,7 @@ }, "links": [], "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "decbytes" }, "overrides": [ @@ -797,7 +772,7 @@ "decimals": 0, "links": [], "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, "unit": "decbytes" }, "overrides": [ @@ -902,7 +877,7 @@ "decimals": 0, "links": [], "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, "unit": "decbytes" }, "overrides": [ @@ -1006,7 +981,7 @@ "decimals": 0, "links": [], "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, "unit": "decbytes" }, "overrides": [ @@ -1126,13 +1101,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "bytes" }, "overrides": [] @@ -1174,7 +1143,7 @@ }, { "collapsed": true, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 2 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 21 }, "id": 3292, "panels": [ { @@ -2155,7 +2124,7 @@ }, { "collapsed": true, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 3 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 22 }, "id": 3294, "panels": [ { @@ -2327,8 +2296,8 @@ "type": "row" } ], - "refresh": false, - "schemaVersion": 37, + "refresh": "", + "schemaVersion": 38, "style": "dark", "tags": ["aptos-core"], "templating": { @@ -2611,6 +2580,6 @@ "timezone": "", "title": "storage-overview", "uid": "ptUp6Vn4k", - "version": 5, + "version": 6, "weekStart": "" } diff --git a/dashboards/storage-overview.json.gz b/dashboards/storage-overview.json.gz index cba94495d6f26..95b9845ebb2d5 100644 Binary files a/dashboards/storage-overview.json.gz and b/dashboards/storage-overview.json.gz differ diff --git a/dashboards/system.json b/dashboards/system.json index 821e18d73b35a..c3c332050b6f2 100644 --- a/dashboards/system.json +++ b/dashboards/system.json @@ -48,47 +48,49 @@ "liveNow": false, "panels": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, "gridPos": { "h": 11, "w": 8, "x": 0, "y": 0 }, - "hiddenSeries": false, "id": 6, - "isNew": false, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "9.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "targets": [ { "expr": "rate(container_cpu_usage_seconds_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", container=~\"$container\"}[$interval])", @@ -98,17 +100,8 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "CPU Usage", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "percentunit", "logBase": 1, "min": 0, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } + "type": "timeseries" }, { "aliasColors": {}, @@ -124,7 +117,6 @@ "gridPos": { "h": 11, "w": 8, "x": 8, "y": 0 }, "hiddenSeries": false, "id": 2, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -143,7 +135,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -187,7 +179,6 @@ "gridPos": { "h": 11, "w": 8, "x": 16, "y": 0 }, "hiddenSeries": false, "id": 25, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -206,7 +197,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -253,7 +244,6 @@ "gridPos": { "h": 11, "w": 8, "x": 0, "y": 11 }, "hiddenSeries": false, "id": 13, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -272,7 +262,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -319,7 +309,6 @@ "gridPos": { "h": 11, "w": 8, "x": 8, "y": 11 }, "hiddenSeries": false, "id": 23, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -338,7 +327,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -384,7 +373,6 @@ "gridPos": { "h": 11, "w": 8, "x": 16, "y": 11 }, "hiddenSeries": false, "id": 24, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -403,7 +391,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -447,7 +435,6 @@ "gridPos": { "h": 11, "w": 8, "x": 0, "y": 22 }, "hiddenSeries": false, "id": 22, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -466,7 +453,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -510,7 +497,6 @@ "gridPos": { "h": 11, "w": 8, "x": 16, "y": 22 }, "hiddenSeries": false, "id": 16, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -529,7 +515,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.0.1-cloud.2.a7a20fbf", "pointradius": 2, "points": false, "renderer": "flot", @@ -563,8 +549,8 @@ "yaxis": { "align": false } } ], - "refresh": false, - "schemaVersion": 37, + "refresh": "", + "schemaVersion": 38, "style": "dark", "tags": ["aptos-core"], "templating": { @@ -717,9 +703,9 @@ "multiFormat": "", "name": "pod", "options": [], - "query": { "query": "up{namespace=~\"$namespace\"}", "refId": "StandardVariableQuery" }, + "query": { "query": "up{namespace=~\"$namespace\"}", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 1, - "regex": ".*pod=\"(.*?)\".*", + "regex": ".*kubernetes_pod_name=\"(.*?)\".*", "skipUrlSync": false, "sort": 1, "type": "query" @@ -768,6 +754,6 @@ "timezone": "", "title": "system", "uid": "system", - "version": 1, + "version": 3, "weekStart": "" } diff --git a/dashboards/system.json.gz b/dashboards/system.json.gz index ea26d8a7b9b66..c0fabd4771106 100644 Binary files a/dashboards/system.json.gz and b/dashboards/system.json.gz differ diff --git a/developer-docs-site/docs/community/external-resources.md b/developer-docs-site/docs/community/external-resources.md index cab2dbe1cbb2c..98b33008b8e43 100644 --- a/developer-docs-site/docs/community/external-resources.md +++ b/developer-docs-site/docs/community/external-resources.md @@ -26,6 +26,10 @@ To add your own resource, click **Edit this page** at the bottom, add your resou | Contribution | Description | Author | Date added/updated | | --- | --- | --- | --- | +| [Alerts integration on your validator/full node](https://forum.aptoslabs.com/t/alerts-integration-on-your-validator-full-node/196210) | Explains how to integrate alerts on your validator (fullnode). | [cryptomolot](https://forum.aptoslabs.com/u/unlimitedmolot) | 2023-06-11 | +| [Tools to monitor your validator](https://forum.aptoslabs.com/t/tools-to-monitore-your-validator/197163) | Explains what tools to use to monitor your validator (fullnode). | [cryptomolot](https://forum.aptoslabs.com/u/unlimitedmolot) and [p1xel32](https://forum.aptoslabs.com/u/p1xel32) | 2023-06-11 | +| [How to join validator set via snapshot](https://forum.aptoslabs.com/t/how-to-join-validator-set-via-snapshot/207568) | Demonstrates a method to join a validator set with a snapshot. | [cryptomolot](https://forum.aptoslabs.com/u/unlimitedmolot) | 2023-06-11 | +| [Alerts for your validator via Telegram public](https://forum.aptoslabs.com/t/alerts-for-your-validator-via-telegram-public/201959) | Demonstrates a useful method for receiving alerts. | [cryptomolot](https://forum.aptoslabs.com/u/unlimitedmolot) | 2023-06-11 | | [Ansible playbook for Node Management (Bare Metal)](https://github.com/RhinoStake/ansible-aptos) | This Ansible Playbook is for the initialization, configuration, planned and hotfix upgrades of Aptos Validators, VFNs and PFNs on bare metal servers. | [RHINO](https://rhinostake.com) | 2023-03-14 | | [Ansible playbook for Node Management (Docker)](https://github.com/LavenderFive/aptos-ansible) | This Ansible Playbook is intended for node management, including initial launch and handling upgrades of nodes. | [Lavender.Five Nodes](https://github.com/LavenderFive) | 2023-03-13 | | [Write Your First Smart Contract On Aptos](https://medium.com/mokshyaprotocol/write-your-first-smart-contract-on-aptos-a-step-by-step-guide-e16a6f5c2be6) | This blog is created to help you start writing smart contracts in Aptos Blockchain. | [Samundra Karki](https://medium.com/@samundrakarki56), [MokshyaProtocol](https://mokshya.io/) | 2023-02-27 | diff --git a/developer-docs-site/docs/guides/data-pruning.md b/developer-docs-site/docs/guides/data-pruning.md index 81d288304d918..95a5eb4825afc 100644 --- a/developer-docs-site/docs/guides/data-pruning.md +++ b/developer-docs-site/docs/guides/data-pruning.md @@ -15,7 +15,7 @@ nodes with a pruning window that can be configured. This document describes how you can configure the pruning behavior. :::note -By default the ledger pruner keeps 150 million recent transactions. Unless +By default the ledger pruner keeps 150 million recent transactions. The approximate amount of disk space required for every 150M transactions is 200G. Unless bootstrapped from the genesis and configured to disable the pruner or a long prune window, the node doesn't carry the entirety of the ledger history. Majority of the nodes on both the testnet and mainnet have a partial diff --git a/developer-docs-site/docs/guides/state-sync.md b/developer-docs-site/docs/guides/state-sync.md index 17bd706f753db..dde8c3ef04286 100644 --- a/developer-docs-site/docs/guides/state-sync.md +++ b/developer-docs-site/docs/guides/state-sync.md @@ -8,14 +8,14 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; # State Synchronization -Nodes in an Aptos network, both the validator nodes and the fullnodes, must always be synchronized to the latest Aptos blockchain state. The [state synchronization](https://medium.com/aptoslabs/the-evolution-of-state-sync-the-path-to-100k-transactions-per-second-with-sub-second-latency-at-52e25a2c6f10) (state sync) component that runs on each node is responsible for this synchronization. To achieve this synchronization, state sync identifies and fetches new blockchain data from the peers, validates the data and persists it to the local storage. +Nodes in an Aptos network (e.g., validator nodes and fullnodes) must always be synchronized to the latest Aptos blockchain state. The [state synchronization](https://medium.com/aptoslabs/the-evolution-of-state-sync-the-path-to-100k-transactions-per-second-with-sub-second-latency-at-52e25a2c6f10) (state sync) component that runs on each node is responsible for this. State sync identifies and fetches new blockchain data from the peers, validates the data and persists it to the local storage. :::tip Need to start a node quickly? If you need to start a node quickly, here's what we recommend by use case: - - **Devnet public fullnode**: To sync the entire blockchain history, download [a snapshot](../nodes/full-node/bootstrap-fullnode.md). Otherwise, use [fast sync](state-sync.md#fast-syncing). - - **Testnet public fullnode**: To sync the entire blockchain history, download [a snapshot](../nodes/full-node/bootstrap-fullnode.md). Otherwise, use [fast sync](state-sync.md#fast-syncing). - - **Mainnet public fullnode**: To sync the entire blockchain history, use [output syncing](state-sync.md#applying-all-transaction-outputs). Otherwise, use [fast sync](state-sync.md#fast-syncing). - - **Mainnet validator or validator fullnode**: Use [output syncing](state-sync.md#applying-all-transaction-outputs). Note: [fast sync](state-sync.md#fast-syncing) is not recommended. + - **Devnet public fullnode**: To sync the entire blockchain history, use [output syncing](state-sync.md#applying-all-transaction-outputs). Otherwise, use [fast sync](state-sync.md#fast-syncing). + - **Testnet public fullnode**: To sync the entire blockchain history, restore from a [backup](../nodes/full-node/aptos-db-restore.md). Otherwise, download [a snapshot](../nodes/full-node/bootstrap-fullnode.md) or use [fast sync](state-sync.md#fast-syncing). + - **Mainnet public fullnode**: To sync the entire blockchain history, restore from a [backup](../nodes/full-node/aptos-db-restore.md). Otherwise, use [fast sync](state-sync.md#fast-syncing). + - **Mainnet validator or validator fullnode**: To sync the entire blockchain history, restore from a [backup](../nodes/full-node/aptos-db-restore.md). Otherwise, use [fast sync](state-sync.md#fast-syncing). ::: ## State sync modes @@ -116,15 +116,6 @@ increase then do the following: 1. Double-check the node configuration file has correctly been updated. 2. Make sure that the node is starting up with an empty storage database (i.e., that it has not synced any state previously). -3. Add the following to your node configuration to account for any potential -network delays that may occur when initializing slow network connections: - -```yaml - state_sync: - state_sync_driver: - ... - max_connection_deadline_secs: 1000000 # Tolerate slow peer discovery & connections -``` ## Running archival nodes @@ -166,43 +157,3 @@ is still manually re-verified, e.g., epoch changes and the resulting blockchain All of the syncing modes get their root of trust from the validator set and cryptographic signatures from those validators over the blockchain data. For more information about how this works, see the [state synchronization blogpost](https://medium.com/aptoslabs/the-evolution-of-state-sync-the-path-to-100k-transactions-per-second-with-sub-second-latency-at-52e25a2c6f10). - -# State sync architecture - -The state synchronization component is comprised of four sub-components, each with a specific purpose: - -1. **Driver**: The driver “drives” the synchronization progress of the node. -It is responsible for verifying all data that the node receives from peers. Data -is forwarded from the peers via the data streaming service. After data -verification, the driver persists the data to storage. -2. **Data Streaming Service**: The streaming service creates data streams for -clients (one of which is the state sync driver). It allows the client to stream -new data chunks from peers, without having to worry about which peers have the -data or how to manage data requests. For example, the client can request all -transactions since version `5` and the data streaming service will provide -this. -3. **Aptos Data Client**: The data client is responsible for handling data -requests from the data streaming service. For the data streaming service to -stream all transactions, it must make multiple requests (each request for a -batch of transactions) and send those requests to peers (e.g., transactions -`1→5`, `6→10`, `11→15`, and so on). The data client takes the request, -identifies which peer can handle the request and sends the request to them. -4. **Storage Service**: The storage service is a simple storage API offered by -each node which allows peers to fetch data. For example, the data client on -peer `X` can send the data request to the storage service on peer `Y` to fetch -a batch of transactions. - -## State sync code structure - -Below are the links to the state sync code showing the structure that matches the architecture outlined above: -- **Driver:** [https://github.com/aptos-labs/aptos-core/tree/main/state-sync/state-sync-v2/state-sync-driver](https://github.com/aptos-labs/aptos-core/tree/main/state-sync/state-sync-v2/state-sync-driver) -- **Data Streaming Service:** [https://github.com/aptos-labs/aptos-core/tree/main/state-sync/state-sync-v2/data-streaming-service](https://github.com/aptos-labs/aptos-core/tree/main/state-sync/state-sync-v2/data-streaming-service) -- **Aptos Data Client**: [https://github.com/aptos-labs/aptos-core/tree/main/state-sync/aptos-data-client](https://github.com/aptos-labs/aptos-core/tree/main/state-sync/aptos-data-client) -- **Storage Service:** [https://github.com/aptos-labs/aptos-core/tree/main/state-sync/storage-service](https://github.com/aptos-labs/aptos-core/tree/main/state-sync/storage-service) - -In addition, see also a directory containing the code for -**inter-component** communication: [https://github.com/aptos-labs/aptos-core/tree/main/state-sync/inter-component](https://github.com/aptos-labs/aptos-core/tree/main/state-sync/inter-component). -This is required so that: - - State sync can handle notifications from consensus (e.g., to catch up after falling behind). - - State sync can notify mempool when transactions are committed (i.e., so they can be removed from mempool). - - State sync can update the event subscription service to notify listeners (e.g., other system components for reconfiguration events). diff --git a/developer-docs-site/docs/guides/transaction-management.md b/developer-docs-site/docs/guides/transaction-management.md index 172bcece2b0ef..cf4b882590d0f 100644 --- a/developer-docs-site/docs/guides/transaction-management.md +++ b/developer-docs-site/docs/guides/transaction-management.md @@ -38,7 +38,12 @@ Each transaction requires a distinct sequence number that is sequential to previ In parallel, monitor new transactions submitted. Once the earliest transaction expiration time has expired synchronize up to that transaction. Then repeat the process for the next transaction. -If there is any failure, wait until all outstanding transactions have timed out and leave it to the application to decide how to proceed, e.g., replay failed transactions. +If there is any failure, wait until all outstanding transactions have timed out and leave it to the application to decide how to proceed, e.g., replay failed transactions. The best method for waiting for outstanded transactions is first to query the ledger timestamp and ensure it is at least elapsed the maximum timeout from the last transactions submit time. From there, validate with mempool that all transactions since the last known committed transaction are either committed or no longer exist within the mmempool. This can be done by querying the REST API for transactions of a specific account, specifying the currently being evaluated sequence number and setting a limit to 1. Once these checks are complete, the local transaction number can be resynchronized. + +These failure handling steps are critical for the following reasons: +* Mempool does not immediate evict expired transactions. +* A new transaction cannot overwrite an existing transaction, even if it is expired. +* Consensus, i.e., the ledger timestamp, dictates expirations, the local node will only expire after it sees a committed timestamp after the transactions expiration time and a garbage collection has happened. ### Managing Transactions diff --git a/developer-docs-site/docs/integration/aptos-apis.md b/developer-docs-site/docs/integration/aptos-apis.md index 97224285a0e21..3280a3e4fd187 100644 --- a/developer-docs-site/docs/integration/aptos-apis.md +++ b/developer-docs-site/docs/integration/aptos-apis.md @@ -13,7 +13,7 @@ Also see the [System Integrators Guide](../guides/system-integrators-guide.md) f ## Understanding rate limits -As with the [Aptos Indexer](./indexing.md#rate-limits), the Aptos REST API has a rate limit of 1000 requests per five minutes by IP address, whether submitting transactions or querying the API on Aptos-provided nodes. (As a node operator, you may raise those limits on your own node.) Note that this limit can change with or without prior notice. +As with the [Aptos Indexer](./indexing.md#rate-limits), the Aptos REST API has a rate limit of 5000 requests per five minutes by IP address, whether submitting transactions or querying the API on Aptos-provided nodes. (As a node operator, you may raise those limits on your own node.) Note that this limit can change with or without prior notice. ## Viewing current and historical state @@ -27,12 +27,12 @@ Most integrations into the Aptos blockchain benefit from a holistic and comprehe Ensure the [fullnode](../nodes/deployments.md) you're communicating with is up to date. The fullnode must reach the version containing your transaction to retrieve relevant data from it. There can be latency from the fullnodes retrieving state from [validator fullnodes](../concepts/fullnodes.md), which in turn rely upon [validator nodes](../concepts/validator-nodes.md) as the source of truth. ::: -The storage service on a node employs two forms of pruning that erase data from nodes: +The storage service on a node employs two forms of pruning that erase data from nodes: * state * events, transactions, and everything else -While either of these may be disabled, storing the state versions is not particularly sustainable. +While either of these may be disabled, storing the state versions is not particularly sustainable. Events and transactions pruning can be disabled via setting the [`enable_ledger_pruner`](https://github.com/aptos-labs/aptos-core/blob/cf0bc2e4031a843cdc0c04e70b3f7cd92666afcf/config/src/config/storage_config.rs#L141) to `false` in `storage_config.rs`. This is default behavior in Mainnet. In the near future, Aptos will provide indexers that mitigate the need to directly query from a node. @@ -44,17 +44,28 @@ The REST API offers querying transactions and events in these ways: ## Reading state with the View function -View functions do not modify blockchain state. A [View](https://github.com/aptos-labs/aptos-core/blob/main/api/src/view_function.rs) function and its [input](https://github.com/aptos-labs/aptos-core/blob/main/api/types/src/view.rs) can be used to read potentially complex on-chain state using Move. For example, you can evaluate who has the highest bid in an auction contract. Here are related files: +View functions do not modify blockchain state when called from the API. A [View](https://github.com/aptos-labs/aptos-core/blob/main/api/src/view_function.rs) function and its [input](https://github.com/aptos-labs/aptos-core/blob/main/api/types/src/view.rs) can be used to read potentially complex on-chain state using Move. For example, you can evaluate who has the highest bid in an auction contract. Here are related files: * [`view_function.rs`](https://github.com/aptos-labs/aptos-core/blob/main/api/src/tests/view_function.rs) for an example * related [Move](https://github.com/aptos-labs/aptos-core/blob/90c33dc7a18662839cd50f3b70baece0e2dbfc71/aptos-move/framework/aptos-framework/sources/coin.move#L226) code * [specification](https://github.com/aptos-labs/aptos-core/blob/90c33dc7a18662839cd50f3b70baece0e2dbfc71/api/doc/spec.yaml#L8513). -The View function operates like the [Aptos Simulation API](../guides/system-integrators-guide.md#testing-transactions-or-transaction-pre-execution), though with no side effects and a accessible output path. The function is immutable if tagged as `#[view]`, the compiler will confirm it so and if fail otherwise. View functions can be called via the `/view` endpoint. Calls to view functions require the module and function names along with input type parameters and values. +The view function operates like the [Aptos Simulation API](../guides/system-integrators-guide.md#testing-transactions-or-transaction-pre-execution), though with no side effects and a accessible output path. View functions can be called via the `/view` endpoint. Calls to view functions require the module and function names along with input type parameters and values. -In order to use the View functions, you need to pass `--bytecode-version 6` to the [Aptos CLI](../tools/install-cli/index.md) when publishing the module. +A function does not have to be immutable to be tagged as `#[view]`, but if the function is mutable it will not result in state mutation when called from the API. +If you want to tag a mutable function as `#[view]`, consider making it private so that it cannot be maliciously called during runtime. -> Note: Calling View functions is not yet supported by the Aptos CLI. +In order to use the View functions, you need to [publish the module](../move/move-on-aptos/cli.md#publishing-a-move-package-with-a-named-address) through the [Aptos CLI](../tools/install-cli/index.md). + +In the Aptos CLI, a view function request would look like this: +``` +aptos move view --function-id devnet::message::get_message --profile devnet --args address:devnet +{ + "Result": [ + "View functions rock!" + ] +} +``` In the TypeScript SDK, a view function request would look like this: ``` @@ -73,13 +84,13 @@ The view function returns a list of values as a vector. By default, the results ## Exchanging and tracking coins -Aptos has a standard *Coin type* define in [`coin.move`](https://github.com/aptos-labs/aptos-core/blob/main/aptos-move/framework/aptos-framework/sources/coin.move). Different types of coins can be represented in this type through the use of distinct structs that symbolize the type parameter or use generic for `Coin`. +Aptos has a standard *Coin type* define in [`coin.move`](https://github.com/aptos-labs/aptos-core/blob/main/aptos-move/framework/aptos-framework/sources/coin.move). Different types of coins can be represented in this type through the use of distinct structs that symbolize the type parameter or use generic for `Coin`. Coins are stored within an account under the resource `CoinStore`. At account creation, each user has the resource `CoinStore<0x1::aptos_coin::AptosCoin>` or `CoinStore`, for short. Within this resource is the Aptos coin: `Coin`. ### Transferring coins between users -Coins can be transferred between users via the [`coin::transfer`](https://github.com/aptos-labs/aptos-core/blob/36a7c00b29a457469264187d8e44070b2d5391fe/aptos-move/framework/aptos-framework/sources/coin.move#L307) function for all coins and [`aptos_account::transfer`](https://github.com/aptos-labs/aptos-core/blob/88c9aab3982c246f8aa75eb2caf8c8ab1dcab491/aptos-move/framework/aptos-framework/sources/aptos_account.move#L18) for Aptos coins. The advantage of the latter function is that it creates the destination account if it does not exist. +Coins can be transferred between users via the [`coin::transfer`](https://github.com/aptos-labs/aptos-core/blob/36a7c00b29a457469264187d8e44070b2d5391fe/aptos-move/framework/aptos-framework/sources/coin.move#L307) function for all coins and [`aptos_account::transfer`](https://github.com/aptos-labs/aptos-core/blob/88c9aab3982c246f8aa75eb2caf8c8ab1dcab491/aptos-move/framework/aptos-framework/sources/aptos_account.move#L18) for Aptos coins. The advantage of the latter function is that it creates the destination account if it does not exist. :::caution It is important to note that if an account has not registered a `CoinStore` for a given `T`, then any transfer of type `T` to that account will fail. diff --git a/developer-docs-site/docs/integration/indexing.md b/developer-docs-site/docs/integration/indexing.md index c12d0445da832..4a1e83ce059f4 100644 --- a/developer-docs-site/docs/integration/indexing.md +++ b/developer-docs-site/docs/integration/indexing.md @@ -251,7 +251,7 @@ query UserTransactions($limit: Int) { The following rate limit applies for this Aptos-provided indexing service: -- For a web application that calls this Aptos-provided indexer API directly from the client (for example, wallet or explorer), the rate limit is currently 1000 requests per five minutes by IP address. **Note that this limit can change with or without prior notice.** +- For a web application that calls this Aptos-provided indexer API directly from the client (for example, wallet or explorer), the rate limit is currently 5000 requests per five minutes by IP address. **Note that this limit can change with or without prior notice.** If you are running a backend (server-side) application and want to call the indexer programmatically then you should run an indexer-enabled fullnode. diff --git a/developer-docs-site/docs/move/book/coding-conventions.md b/developer-docs-site/docs/move/book/coding-conventions.md index caea845e411c2..654557f22c896 100644 --- a/developer-docs-site/docs/move/book/coding-conventions.md +++ b/developer-docs-site/docs/move/book/coding-conventions.md @@ -10,7 +10,7 @@ This section lays out some basic coding conventions for Move that the Move team - **Constant names**: should be upper camel case and begin with an `E` if they represent error codes (e.g., `EIndexOutOfBounds`) and upper snake case if they represent a non-error value (e.g., `MIN_STAKE`). - - **Generic type names**: should be descriptive, or anti-descriptive where appropriate, e.g., `T` or `Element` for the Vector generic type parameter. Most of the time the "main" type in a module should be the same name as the module e.g., `option::Option`, `fixed_point32::FixedPoint32`. -- **Module file names**: should be the same as the module name e.g., `Option.move`. +- **Module file names**: should be the same as the module name e.g., `option.move`. - **Script file names**: should be lower snake case and should match the name of the “main” function in the script. - **Mixed file names**: If the file contains multiple modules and/or scripts, the file name should be lower snake case, where the name does not match any particular module/script inside. diff --git a/developer-docs-site/docs/move/book/functions.md b/developer-docs-site/docs/move/book/functions.md index e95d533fff145..2e19ffd90b78c 100644 --- a/developer-docs-site/docs/move/book/functions.md +++ b/developer-docs-site/docs/move/book/functions.md @@ -327,6 +327,10 @@ fun zero(): u64 { 0 } Here `: u64` indicates that the function's return type is `u64`. +:::tip +A function can return an immutable `&` or mutable `&mut` [reference](./references.md) if derived from an input reference. Keep in mind, this means that a function [cannot return a reference to global storage](./references.md#references-cannot-be-stored) unless it is an [inline function](#inline-functions). +::: + Using tuples, a function can return multiple values: ```move diff --git a/developer-docs-site/docs/move/book/generics.md b/developer-docs-site/docs/move/book/generics.md index d7f0d437a5f78..1e5364cd4a3e7 100644 --- a/developer-docs-site/docs/move/book/generics.md +++ b/developer-docs-site/docs/move/book/generics.md @@ -435,7 +435,7 @@ module m { // error! // foo -> foo> -> foo>> -> ... fun foo() { - foo>(); + foo>(); } } } diff --git a/developer-docs-site/docs/nodes/full-node/aptos-db-restore.md b/developer-docs-site/docs/nodes/full-node/aptos-db-restore.md new file mode 100644 index 0000000000000..ae76d2c7a5422 --- /dev/null +++ b/developer-docs-site/docs/nodes/full-node/aptos-db-restore.md @@ -0,0 +1,79 @@ +--- +title: "Bootstrap Fullnode from Backup" +--- + +# Bootstrap Fullnode from Backup + +Since the Aptos mainnet launch in October 2022, the Aptos community has grown rapidly. As of May 2023, Aptos has 743GB and 159GB of data in testnet and mainnet, respectively. We expect the data to increase greatly as more transactions are submitted to the blockchain. Facing such a large amount of data, we want to provide users with a way to achieve two goals: + +- Quickly bootstrap a database to start a new or failed node +- Efficiently recover data from any specific period + +Our database restore tool lets you use the existing [public backup files](#public-backup-files) to restore the database (i.e., the transaction history containing events, write sets, key-value pairs, etc.) on your local machine to any historical range or to the latest version. The public backup files are backed by cryptographic proof and stored on both AWS and Google Cloud for an easy download. + +## Public backup files + +Aptos Labs maintains a few publicly accessible database backups by continuously querying a local fullnode and storing the backup data in remote storage, such as Amazon S3 or Google Cloud Storage. + +| | AWS Backup Data | Google Cloud Backup Data | +| --- | --- | --- | +| Testnet | https://github.com/aptos-labs/aptos-networks/blob/main/testnet/backups/s3-public.yaml | https://github.com/aptos-labs/aptos-networks/blob/main/testnet/backups/gcs.yaml | +| Mainnet | https://github.com/aptos-labs/aptos-networks/blob/main/mainnet/backups/s3-public.yaml | https://github.com/aptos-labs/aptos-networks/blob/main/mainnet/backups/gcs.yaml | + +The backup files consist of three types of data that can be used to reconstruct the blockchain DB: + +- `epoch_ending` – It contains the ledger_info at the ending block of each epoch since the genesis. This data can be used to prove the epoch's provenance from the genesis and validator set of each epoch. +- `state_snapshot` – It contains a snapshot of the blockchain's state Merkle tree (SMT) and key values at a certain version. +- `transaction` – It contains the raw transaction metadata, payload, the executed outputs of the transaction after VM, and the cryptographic proof of the transaction in the ledger history. + +Each type of data in the backup storage is organized as follows: +- The metadata file in the metadata folder contains the range of each backup and the relative path to the backup folder. +- The backup contains a manifest file and all the actual chunked data files. + +![aptos-db-restore.png](../../../static/img/docs/aptos-db-restore.png) + +## Restore a DB using the public backup files + +The [Aptos CLI](../../tools/aptos-cli-tool/use-aptos-cli.md) supports two kinds of restore operations by reading from the public backup files: +1. Recreating a database with minimal transaction history at a user-specified transaction version (or the latest version the backup has) +2. Restoring the database over a specific period. In addition to the above, this option ensures that the recreated database carries the ledger history of the user-designated version range. + +Aptos CLI 1.0.14 or newer is needed to perform these operations. Additionally, depending on whether you use AWS or Google Cloud, install [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) or [gsutil](https://cloud.google.com/storage/docs/gsutil_install). + +### Bootstrap a DB + +The `aptos node bootstrap-db` command can quickly restore a database from the closest snapshot to a target version, but it does not restore the transaction history prior to the target version. + +Use the following options: +- `target-version` – The sync will begin from this period onwards in the transaction history. +- `command-adapter-config` – The path to one of the [YAML configuration files](#public-backup-files) that specifies the location of the public backup files and commands used by our backup and restore tool to interact with the remote storage. +- `target-db-dir` – The target DB path. + +Example command: + +```bash +aptos node bootstrap-db \ + --target-version 500000000 \ + --command-adapter-config /path/to/s3-public.yaml \ + --target-db-dir /path/to/local/db +``` + +### Restore a DB over a specific time period + +The `aptos node bootstrap-db` command can restore the transaction history within a specified period, along with the state Merkle tree at the target version. + +Use the following options: +- `ledger-history-start-version` – The version to which the DB will sync. +- `target-version` – The sync will begin from this period onwards in the transaction history. +- `command-adapter-config` – The path to one of the [YAML configuration files](#public-backup-files) that specifies the location of the public backup files and commands used by our backup and restore tool to interact with the remote storage. +- `target-db-dir` – The target DB path. + +Example command: + +```bash +aptos node bootstrap-db \ + --ledger-history-start-version 150000000 \ + --target-version 155000000 + --command-adapter-config /path/to/s3-public.yaml \ + --target-db-dir /path/to/local/db +``` diff --git a/developer-docs-site/docs/reference/glossary.md b/developer-docs-site/docs/reference/glossary.md index 12ff909ce8a9f..d70b6e14a6ce3 100755 --- a/developer-docs-site/docs/reference/glossary.md +++ b/developer-docs-site/docs/reference/glossary.md @@ -187,6 +187,21 @@ then there is a guarantee that T_N will never be included in the blockchain. - **Fullnodes** are clients that ensure data are stored up-to-date on the network. They replicate blockchain state and transactions from other fullnodes and validator nodes. +### Fungible Asset + +- A **fungible asset** is an asset, such as a currency, share, in-game resource, etc., that is interchangeable with another identical asset without any loss in its value. For example, APT is a fungible asset because you can exchange one APT for another. +- Follow the [Digital Asset Standards](../standards/index.md#digital-asset-standards) to create fungible assets on the Aptos blockchain. +- Next generation of the Coin standard that addresses shortcomings of `aptos_framework::coin` such as lack of guaranteed enforcement of freeze and burn and advanced functionalities such as programmable transfers, e.g., approve in ERC-20. + +### Fungible Token + +- For TokenV1 (aptos_token::token), a **fungible token** is a token that is interchangeable with other identical tokens (i.e., tokens that share the same `TokenId`). This means the tokens have the same `creator address`, `collection name`, `token name`, and `property version`. +- For TokenV2 (aptos_token_objects::token), a **fungible token** is a fungible asset with metadata object includes a TokenV2 resource. + +### Fungible Unit + +- A **fungible unit** is an individual unit of a fungible asset. These units are identical and interchangeable without any loss in value. For example, each Octa (the smallest unit of APT) is a fungible unit. + ## G ### Gas diff --git a/developer-docs-site/docs/standards/aptos-token-v2.md b/developer-docs-site/docs/standards/aptos-token-v2.md index 3f90207d0ea6d..8442b90f8f4a1 100644 --- a/developer-docs-site/docs/standards/aptos-token-v2.md +++ b/developer-docs-site/docs/standards/aptos-token-v2.md @@ -50,12 +50,43 @@ flexibility in this way. * NFTs can own other NFTs adding easy composability * Soul bound tokens can be easily supported +## Collections and tokens as objects +In this Token standard, both collections and tokens will be separate [objects](./aptos-object.md). They have their own +distinct addresses and can be referenced both on and off chain by address. Each object can contain multiple resources +so collections and tokens are extensible by default, allowing the creator to add custom data and functionalities without +having to modify the framework. + +On chain, another struct can include a reference to the collection or token objects like below: +```rust +struct ReferenceExample has key { + my_collection: Object, + my_token: Object, +} +``` +where both `my_collection` and `my_token` are addresses (with `Object<>` wrapper). + +Off-chain, the address of the object can be passed along to replace object arguments in entry functions called by transaction creation. +as arguments. For example: +```rust +public entry fun my_function(my_collection: Object) { + // Do something with the collection +} +``` + +Collection and token addresses will also be used to query data such as fetching all resources via fullnode API or against +an indexing service. + ### Royalties -Royalties are simply [another module](https://github.com/aptos-labs/aptos-core/blob/main/aptos-move/framework/aptos-token-objects/sources/royalty.move) -that is attached to a collection. They're allowed to be updated as long as a `MutatorRef` is generated at creation time. -The royalty can also be set directly on a token if it has a different royalty config than the collection's. +Following the object extensibility pattern, royalties are added to collections or tokens as a resource with associated +functionality provided by [the royalty module](https://github.com/aptos-labs/aptos-core/blob/main/aptos-move/framework/aptos-token-objects/sources/royalty.move) +Royalty can be updated as long as a `MutatorRef`, a storable struct that grants permissions, is generated at creation +time and stored. + +See [Aptos Token](#aptos-token) for examples on how Royalty's `MutatorRef` can be stored and used. +Royalty can also be set directly on a token if it has a different royalty config than the collection's. ## Token lifecycle +All token v2 modules are deployed at `0x4`. ### Collection creation Every token belongs to a collection. The developer first needs to create a collection with: @@ -98,8 +129,8 @@ collection with the same name. * Royalty - specifies how many % of the sale price goes to the creator of the collection. This can be changed with a `MutatorRef` generated by the Royalty module. -A `MutatorRef` can be generated only during creation of the collection. If created, the holder of the `MutatorRef` can -change the `description` and the `URI length` of the collection. +A `MutatorRef`, a storable struct that grants permissions to mutate, can be generated only during creation of the collection. +If created, the holder of the `MutatorRef` can change the `description` and the `URI length` of the collection. ```rust public entry fun create_collection(creator: &signer) { let collection_constructor_ref = &collection::create_unlimited_collection( @@ -123,6 +154,8 @@ struct MyCollectionMetadata has key { } public entry fun create_collection(creator: &signer) { + // Constructor ref is a non-storable struct returned when creating a new object. + // It can generate an object signer to add resources to the collection object. let collection_constructor_ref = &collection::create_unlimited_collection( creator, "My Collection Description", @@ -140,7 +173,7 @@ public entry fun create_collection(creator: &signer) { Creators can mint tokens, which are separate objects from the collection. This allows for greater customization. Tokens can be created in two ways: 1. Named tokens. These tokens have deterministic addresses that are sha256 hash of the creator address, collection name, -and a custom seed value, concatenated. This allows for predictable addresses and easier querying of tokens. However, +and token name, concatenated. This allows for predictable addresses and easier querying of tokens. However, named tokens are fully deletable and thus burning them will only delete the token data and not fully delete the underlying object ```rust @@ -159,7 +192,7 @@ public entry fun mint_token(creator: &signer) { ``` 2. (Unnamed) tokens based on the creator account's guid. These tokens have addresses are generated based on the creator account's incrementing guid. The addresses of unnamed tokens are not deterministic as the account's guid can change outside -of minting. Thus, querying for unnamed tokens is more difficult and requires indexing. +minting. Thus, querying for unnamed tokens is more difficult and requires indexing. ```rust use aptos_token_objects::token; @@ -175,6 +208,12 @@ public entry fun mint_token(creator: &signer) { } ``` +Creators should cautiously consider whether they should use `create_named_token` or `create_from_account` when building +their custom collection/token. In general `create_from_account` is recommended as it allows for clean deletion if the +tokens are burnt and generally, deterministic addresses for tokens are not always necessary thanks to indexing services. +One example that would prefer deterministic addresses and thus `create_named_token` is a collection of soul bound tokens +where each token's address is created from the holder's name. + ### Token properties Tokens by default have the following properties: * Token name - unique within each collection. A collection cannot have more than one token with the same name. @@ -186,6 +225,8 @@ a different royalty setting than the collection's. A `MutatorRef` can be generated only during creation of the token. ```rust public entry fun mint_token(creator: &signer) { + // Constructor ref is a non-storable struct returned when creating a new object. + // It can be exchanged for signer to add resources to the token object. let token_constructor_ref = &token::create_from_account( creator, "My Collection", @@ -268,15 +309,15 @@ To mint a soul bound token, the creator can call `aptos_token::mint_soul_bound` the holder cannot transfer. ```rust public entry fun mint_soul_bound( -creator: &signer, -collection: String, -description: String, -name: String, -uri: String, -property_keys: vector, -property_types: vector, -property_values: vector>, -soul_bound_to: address, + creator: &signer, + collection: String, + description: String, + name: String, + uri: String, + property_keys: vector, + property_types: vector, + property_values: vector>, + soul_bound_to: address, ) acquires AptosCollection ``` @@ -297,3 +338,53 @@ the refs obtained from creating the collection and token objects and do not expo If a creator wants more custom functionalities such as being able to forcefully transfer a soul bound token, they would need to write their own custom module that builds on top of the base token v2 standard. They can of course borrow inspiration and code from the Aptos Token module. + +## Fungible Token +Similar to [EIP-1155](https://eips.ethereum.org/EIPS/eip-1155), the Token v2 standard also supports fungible tokens +(also known as semi-fungible tokens). An example of this would be armor tokens in a game. Each armor token represents a +type of armor and is a token in a collection with metadata (e.g. durability, defense, etc.) and can be minted and burned. +However, there are multiple instances of the same armor type. For example, a player can have 3 wooden armors, where wooden armor +is a token in the Armor collection. + +This can be easily built by combining Token v2 and Fungible Assets. After the creator creates the Armor collection and the +Wooden Armor token, they can make the Wooden Armor token "fungible": + +```rust +use aptos_framework::primary_fungible_store; + +public entry fun create_armor_collection(creator: &signer) { + collection::create_unlimited_collection( + creator, + "Collection containing different types of armors. Each armor type is a separate token", + "Armor", + royalty, + "https://myarmor.com", + ); +} + +public entry fun create_armor_type(creator: &signer, armor_type: String) { + let new_armor_type_constructor_ref = &token::create_from_account( + creator, + "Armor", + "Armor description", + armor_type, + royalty, + "https://myarmor.com/my-named-token.jpeg", + ); + // Make this armor token fungible so there can multiple instances of it. + primary_fungible_store::create_primary_store_enabled_fungible_asset( + new_armor_type_constructor_ref, + maximum_number_of_armors, + armor_type, + "ARMOR", + 0, // Armor cannot be divided so decimals is 0, + "https://mycollection.com/armor-icon.jpeg", + "https://myarmor.com", + ); + + // Add properties such as durability, defence, etc. to this armor token +} +``` + +Now the creator can mint multiple instances of the same armor type and transfer them to players. The players can freely +transfer the armor tokens to each other the same way they would transfer a fungible asset. diff --git a/developer-docs-site/docs/tools/aptos-cli-tool/use-aptos-cli.md b/developer-docs-site/docs/tools/aptos-cli-tool/use-aptos-cli.md index 948247c485cf1..3af3a1d97c6ff 100644 --- a/developer-docs-site/docs/tools/aptos-cli-tool/use-aptos-cli.md +++ b/developer-docs-site/docs/tools/aptos-cli-tool/use-aptos-cli.md @@ -49,10 +49,22 @@ OPTIONS: -V, --version Print version information SUBCOMMANDS: + build-publish-payload + Build a publication transaction payload and store it in a JSON output file clean Cleans derived artifacts of a package compile - Compiles a package and returns the [`ModuleId`]s + Compiles a package and returns the associated ModuleIds + compile-script + Compiles a Move script into bytecode + coverage + Computes coverage for a package + create-resource-account-and-publish-package + Publishes the modules in a Move package to the Aptos blockchain under a resource account + disassemble + Disassemble the Move bytecode pointed to + document + Documents a Move package download Downloads a package and stores it in a directory named after the package help @@ -60,17 +72,23 @@ SUBCOMMANDS: init Creates a new Move package at the given location list - Lists information about packages and modules on-chain + Lists information about packages and modules on-chain for an account prove - Proves the Move package + Proves a Move package publish Publishes the modules in a Move package to the Aptos blockchain run Run a Move function + run-script + Run a Move script test Runs Move unit tests for a package transactional-test Run Move transactional tests + verify-package + Downloads a package and verifies the bytecode + view + Run a view function ``` ### Sub-command help @@ -82,15 +100,30 @@ USAGE: aptos move compile [OPTIONS] OPTIONS: + --bytecode-version + Specify the version of the bytecode the compiler is going to emit + -h, --help Print help information + --included-artifacts + Artifacts to be generated when building the package + + Which artifacts to include in the package. This can be one of `none`, `sparse`, and + `all`. `none` is the most compact form and does not allow to reconstruct a source + package from chain; `sparse` is the minimal set of artifacts needed to reconstruct a + source package; `all` includes all available artifacts. The choice of included artifacts + heavily influences the size and therefore gas cost of publishing: `none` is the size of + bytecode alone; `sparse` is roughly 2 times as much; and `all` 3-4 as much. + + [default: sparse] + --named-addresses Named addresses for the move binary - Example: alice=0x1234,bob=0x5678 + Example: alice=0x1234, bob=0x5678 - Note: This will fail if there are duplicates in the Move.toml file remove those first. Also make sure there's no space in between named addresses if more than one is provided. + Note: This will fail if there are duplicates in the Move.toml file remove those first. [default: ] @@ -102,6 +135,18 @@ OPTIONS: --package-dir Path to a move package (the folder with a Move.toml file) + --save-metadata + Save the package metadata in the package's build directory + + If set, package metadata should be generated and stored in the package's build + directory. This metadata can be used to construct a transaction to publish a package. + + --skip-fetch-latest-git-deps + Skip pulling the latest git dependencies + + If you don't have a network connection, the compiler may fail due to no ability to pull + git dependencies. This will allow overriding this for local development. + -V, --version Print version information ``` diff --git a/developer-docs-site/scripts/additional_dict.txt b/developer-docs-site/scripts/additional_dict.txt index 8da06afc2b47a..a7f5089cde741 100644 --- a/developer-docs-site/scripts/additional_dict.txt +++ b/developer-docs-site/scripts/additional_dict.txt @@ -72,6 +72,7 @@ DoS DocGen delegator delegators +deletable dropdown EACCOUNT EALREADY @@ -245,6 +246,7 @@ TokenClient TokenData TokenStore TokenTransfers +TokenV TransactionBuilder TransactionBuilderABI TransactionFee @@ -567,4 +569,4 @@ AptosClient ANSClient FaucetClient CoinClient -RESTful \ No newline at end of file +RESTful diff --git a/developer-docs-site/sidebars.js b/developer-docs-site/sidebars.js index 08b3e02c0d264..4532fccd102b7 100644 --- a/developer-docs-site/sidebars.js +++ b/developer-docs-site/sidebars.js @@ -410,6 +410,7 @@ const sidebars = { items: [ "nodes/full-node/fullnode-source-code-or-docker", "nodes/full-node/bootstrap-fullnode", + "nodes/full-node/aptos-db-restore", "nodes/full-node/update-fullnode-with-new-releases", "nodes/full-node/network-identity-fullnode", "nodes/full-node/fullnode-network-connections", diff --git a/developer-docs-site/src/components/MoveReference/index.tsx b/developer-docs-site/src/components/MoveReference/index.tsx index 69664cd406675..a7c393e1cd158 100644 --- a/developer-docs-site/src/components/MoveReference/index.tsx +++ b/developer-docs-site/src/components/MoveReference/index.tsx @@ -14,7 +14,7 @@ const branches = ["mainnet", "testnet", "devnet", "main"]; const branch_titles = ["Mainnet", "Testnet", "Devnet", "Main"]; -const frameworks = ["move-stdlib", "aptos-framework", "aptos-token", "aptos-token-objects"]; +const frameworks = ["move-stdlib", "aptos-stdlib", "aptos-framework", "aptos-token", "aptos-token-objects"]; const TopNav = ({ branch }: TopNavProps) => { const adjustBranch = (event) => { const params = new URLSearchParams(window.location.search); diff --git a/developer-docs-site/static/img/docs/aptos-db-restore.png b/developer-docs-site/static/img/docs/aptos-db-restore.png new file mode 100644 index 0000000000000..4bb10a3abaccc Binary files /dev/null and b/developer-docs-site/static/img/docs/aptos-db-restore.png differ diff --git a/developer-docs-site/static/img/docs/stake-state-dark.svg b/developer-docs-site/static/img/docs/stake-state-dark.svg index 6bdd4443997d4..4259015529111 100644 --- a/developer-docs-site/static/img/docs/stake-state-dark.svg +++ b/developer-docs-site/static/img/docs/stake-state-dark.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/developer-docs-site/static/img/docs/stake-state.svg b/developer-docs-site/static/img/docs/stake-state.svg index bb9691f3aa089..5706b931c2d24 100644 --- a/developer-docs-site/static/img/docs/stake-state.svg +++ b/developer-docs-site/static/img/docs/stake-state.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/developer-docs-site/static/scripts/install_cli.py b/developer-docs-site/static/scripts/install_cli.py index 633926ae24713..3bce28e39a1b2 100644 --- a/developer-docs-site/static/scripts/install_cli.py +++ b/developer-docs-site/static/scripts/install_cli.py @@ -215,13 +215,13 @@ def __init__( version: Optional[str] = None, force: bool = False, accept_all: bool = False, - path: Optional[str] = None, + bin_dir: Optional[str] = None, ) -> None: self._version = version self._force = force self._accept_all = accept_all + self._bin_dir = Path(bin_dir).expanduser() if bin_dir else None - self._bin_dir = None self._release_info = None self._latest_release_info = None @@ -514,12 +514,17 @@ def main(): action="store_true", default=False, ) + parser.add_argument( + "--bin-dir", + help="If given, the CLI binary will be downloaded here instead", + ) args = parser.parse_args() installer = Installer( force=args.force, accept_all=args.accept_all or not is_interactive(), + bin_dir=args.bin_dir, ) try: diff --git a/docker/builder/docker-bake-rust-all.hcl b/docker/builder/docker-bake-rust-all.hcl index faa47cd481eb5..651d8053aad4b 100644 --- a/docker/builder/docker-bake-rust-all.hcl +++ b/docker/builder/docker-bake-rust-all.hcl @@ -27,7 +27,7 @@ variable "GCP_DOCKER_ARTIFACT_REPO_US" {} variable "AWS_ECR_ACCOUNT_NUM" {} variable "TARGET_REGISTRY" { - // must be "aws" | "remote" | "local", informs which docker tags are being generated + // must be "gcp" | "local" | "remote-all" | "remote" (deprecated, but kept for backwards compatibility. Same as "gcp"), informs which docker tags are being generated default = CI == "true" ? "remote" : "local" } @@ -68,14 +68,14 @@ group "forge-images" { target "debian-base" { dockerfile = "docker/builder/debian-base.Dockerfile" contexts = { - debian = "docker-image://debian:bullseye-20230502@sha256:32888a3c745e38e72a5f49161afc7bb52a263b8f5ea1b3b4a6af537678f29491" + debian = "docker-image://debian:bullseye@sha256:1bf0e24813ee8306c3fba1fe074793eb91c15ee580b61fff7f3f41662bc0031d" } } target "builder-base" { dockerfile = "docker/builder/builder.Dockerfile" - target = "builder-base" - context = "." + target = "builder-base" + context = "." contexts = { rust = "docker-image://rust:1.66.1-bullseye@sha256:f72949bcf1daf8954c0e0ed8b7e10ac4c641608f6aa5f0ef7c172c49f35bd9b5" } @@ -92,7 +92,7 @@ target "builder-base" { target "aptos-node-builder" { dockerfile = "docker/builder/builder.Dockerfile" - target = "aptos-node-builder" + target = "aptos-node-builder" contexts = { builder-base = "target:builder-base" } @@ -103,9 +103,9 @@ target "aptos-node-builder" { target "tools-builder" { dockerfile = "docker/builder/builder.Dockerfile" - target = "tools-builder" + target = "tools-builder" contexts = { - builder-base = "target:builder-base" + builder-base = "target:builder-base" } secret = [ "id=GIT_CREDENTIALS" @@ -114,8 +114,8 @@ target "tools-builder" { target "_common" { contexts = { - debian-base = "target:debian-base" - node-builder = "target:aptos-node-builder" + debian-base = "target:debian-base" + node-builder = "target:aptos-node-builder" tools-builder = "target:tools-builder" } labels = { @@ -124,12 +124,12 @@ target "_common" { "org.label-schema.git-sha" = "${GIT_SHA}" } args = { - PROFILE = "${PROFILE}" - FEATURES = "${FEATURES}" - GIT_SHA = "${GIT_SHA}" - GIT_BRANCH = "${GIT_BRANCH}" - GIT_TAG = "${GIT_TAG}" - BUILD_DATE = "${BUILD_DATE}" + PROFILE = "${PROFILE}" + FEATURES = "${FEATURES}" + GIT_SHA = "${GIT_SHA}" + GIT_BRANCH = "${GIT_BRANCH}" + GIT_TAG = "${GIT_TAG}" + BUILD_DATE = "${BUILD_DATE}" } } @@ -137,7 +137,7 @@ target "validator-testing" { inherits = ["_common"] dockerfile = "docker/builder/validator-testing.Dockerfile" target = "validator-testing" - cache-from = generate_cache_from("validator-testing") + cache-from = generate_cache_from("validator-testing") cache-to = generate_cache_to("validator-testing") tags = generate_tags("validator-testing") } @@ -146,7 +146,7 @@ target "tools" { inherits = ["_common"] dockerfile = "docker/builder/tools.Dockerfile" target = "tools" - cache-from = generate_cache_from("tools") + cache-from = generate_cache_from("tools") cache-to = generate_cache_to("tools") tags = generate_tags("tools") } @@ -155,7 +155,7 @@ target "forge" { inherits = ["_common"] dockerfile = "docker/builder/forge.Dockerfile" target = "forge" - cache-from = generate_cache_from("forge") + cache-from = generate_cache_from("forge") cache-to = generate_cache_to("forge") tags = generate_tags("forge") } @@ -164,7 +164,7 @@ target "validator" { inherits = ["_common"] dockerfile = "docker/builder/validator.Dockerfile" target = "validator" - cache-from = generate_cache_from("validator") + cache-from = generate_cache_from("validator") cache-to = generate_cache_to("validator") tags = generate_tags("validator") } @@ -173,7 +173,7 @@ target "tools" { inherits = ["_common"] dockerfile = "docker/builder/tools.Dockerfile" target = "tools" - cache-from = generate_cache_from("tools") + cache-from = generate_cache_from("tools") cache-to = generate_cache_to("tools") tags = generate_tags("tools") } @@ -182,7 +182,7 @@ target "node-checker" { inherits = ["_common"] dockerfile = "docker/builder/node-checker.Dockerfile" target = "node-checker" - cache-from = generate_cache_from("node-checker") + cache-from = generate_cache_from("node-checker") cache-to = generate_cache_to("node-checker") tags = generate_tags("node-checker") } @@ -191,8 +191,8 @@ target "faucet" { inherits = ["_common"] dockerfile = "docker/builder/faucet.Dockerfile" target = "faucet" - cache-from = generate_cache_from("faucet") - cache-to = generate_cache_to("faucet") + cache-from = generate_cache_from("faucet") + cache-to = generate_cache_to("faucet") tags = generate_tags("faucet") } @@ -200,17 +200,17 @@ target "telemetry-service" { inherits = ["_common"] dockerfile = "docker/builder/telemetry-service.Dockerfile" target = "telemetry-service" - cache-from = generate_cache_from("telemetry-service") - cache-to = generate_cache_to("telemetry-service") - tags = generate_tags("telemetry-service") + cache-from = generate_cache_from("telemetry-service") + cache-to = generate_cache_to("telemetry-service") + tags = generate_tags("telemetry-service") } target "indexer-grpc" { - inherits = ["_common"] + inherits = ["_common"] dockerfile = "docker/builder/indexer-grpc.Dockerfile" - target = "indexer-grpc" - cache-to = generate_cache_to("indexer-grpc") - tags = generate_tags("indexer-grpc") + target = "indexer-grpc" + cache-to = generate_cache_to("indexer-grpc") + tags = generate_tags("indexer-grpc") } function "generate_cache_from" { @@ -224,7 +224,7 @@ function "generate_cache_from" { function "generate_cache_to" { params = [target] - result = TARGET_REGISTRY == "remote" ? [ + result = TARGET_REGISTRY != "local" ? [ "type=registry,ref=${GCP_DOCKER_ARTIFACT_REPO}/${target}:cache-${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", "type=registry,ref=${GCP_DOCKER_ARTIFACT_REPO}/${target}:cache-${IMAGE_TAG_PREFIX}${GIT_SHA}" ] : [] @@ -232,15 +232,22 @@ function "generate_cache_to" { function "generate_tags" { params = [target] - result = TARGET_REGISTRY == "remote" ? [ + result = TARGET_REGISTRY == "remote-all" ? [ + "${GCP_DOCKER_ARTIFACT_REPO}/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}", + "${GCP_DOCKER_ARTIFACT_REPO}/${target}:${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", + "${GCP_DOCKER_ARTIFACT_REPO_US}/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}", + "${GCP_DOCKER_ARTIFACT_REPO_US}/${target}:${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", + "${ecr_base}/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}", + "${ecr_base}/${target}:${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", + ] : ( + TARGET_REGISTRY == "gcp" || TARGET_REGISTRY == "remote" ? [ "${GCP_DOCKER_ARTIFACT_REPO}/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}", "${GCP_DOCKER_ARTIFACT_REPO}/${target}:${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", "${GCP_DOCKER_ARTIFACT_REPO_US}/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}", "${GCP_DOCKER_ARTIFACT_REPO_US}/${target}:${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", - "${ecr_base}/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}", - "${ecr_base}/${target}:${IMAGE_TAG_PREFIX}${NORMALIZED_GIT_BRANCH_OR_PR}", - ] : [ - "aptos-core/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}-from-local", - "aptos-core/${target}:${IMAGE_TAG_PREFIX}from-local", - ] + ] : [ // "local" or any other value + "aptos-core/${target}:${IMAGE_TAG_PREFIX}${GIT_SHA}-from-local", + "aptos-core/${target}:${IMAGE_TAG_PREFIX}from-local", + ] + ) } diff --git a/docker/builder/docker-bake-rust-all.sh b/docker/builder/docker-bake-rust-all.sh index 4589fd1fa20ad..dc62f1f02ab28 100755 --- a/docker/builder/docker-bake-rust-all.sh +++ b/docker/builder/docker-bake-rust-all.sh @@ -51,9 +51,9 @@ echo "To build only a specific target, run: docker/builder/docker-bake-rust-all. echo "E.g. docker/builder/docker-bake-rust-all.sh forge-images" if [ "$CI" == "true" ]; then - TARGET_REGISTRY=remote docker buildx bake --progress=plain --file docker/builder/docker-bake-rust-all.hcl --push $BUILD_TARGET + docker buildx bake --progress=plain --file docker/builder/docker-bake-rust-all.hcl --push $BUILD_TARGET else - TARGET_REGISTRY=local docker buildx bake --file docker/builder/docker-bake-rust-all.hcl $BUILD_TARGET + docker buildx bake --file docker/builder/docker-bake-rust-all.hcl $BUILD_TARGET fi echo "Build complete. Docker buildx cache usage:" diff --git a/docker/compose/indexer-grpc/cache-worker-config.yaml b/docker/compose/indexer-grpc/cache-worker-config.yaml index 67d6f8f82acf6..df7748a589529 100644 --- a/docker/compose/indexer-grpc/cache-worker-config.yaml +++ b/docker/compose/indexer-grpc/cache-worker-config.yaml @@ -5,4 +5,4 @@ server_config: file_store_config: file_store_type: LocalFileStore local_file_store_path: /opt/aptos/file-store - redis_main_instance_address: 172.16.1.12:6379 + redis_main_instance_address: 172.16.1.12:6379 # use the primary diff --git a/docker/compose/indexer-grpc/data-service-config.yaml b/docker/compose/indexer-grpc/data-service-config.yaml index 3c266cbb491d4..9212ff6abdfb6 100644 --- a/docker/compose/indexer-grpc/data-service-config.yaml +++ b/docker/compose/indexer-grpc/data-service-config.yaml @@ -6,4 +6,4 @@ server_config: file_store_config: file_store_type: LocalFileStore local_file_store_path: /opt/aptos/file-store - redis_read_replica_address: 172.16.1.12:6379 + redis_read_replica_address: 172.16.1.22:6379 # use a the read replica diff --git a/docker/compose/indexer-grpc/docker-compose.yaml b/docker/compose/indexer-grpc/docker-compose.yaml index 1714d4ab3ee70..9c60f53cbbfc4 100644 --- a/docker/compose/indexer-grpc/docker-compose.yaml +++ b/docker/compose/indexer-grpc/docker-compose.yaml @@ -14,7 +14,7 @@ version: "3.8" services: redis: - image: redis:6.2 + image: ${REDIS_IMAGE_REPO:-redis}:6.2 networks: shared: ipv4_address: 172.16.1.12 @@ -24,6 +24,18 @@ services: ports: - 6379:6379 + redis-replica: + image: ${REDIS_IMAGE_REPO:-redis}:6.2 + command: redis-server --replicaof redis 6379 + networks: + shared: + ipv4_address: 172.16.1.22 + restart: unless-stopped + expose: + - 6379 + depends_on: + - redis + indexer-grpc-cache-worker: image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-devnet}" networks: @@ -83,8 +95,10 @@ services: - '/opt/aptos/data-service-config.yaml' ports: - "50052:50052" # GRPC + - "18084:8084" # health depends_on: - indexer-grpc-cache-worker + - redis-replica # This joins the indexer-grpc compose with the validator-testnet compose using a shared docker network networks: diff --git a/docker/compose/indexer-grpc/reset_indexer_grpc_testnet.sh b/docker/compose/indexer-grpc/reset_indexer_grpc_testnet.sh deleted file mode 100755 index 044fe1bd03cc0..0000000000000 --- a/docker/compose/indexer-grpc/reset_indexer_grpc_testnet.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# kill everything -docker ps -a | grep -E "validator|faucet|indexer|redis" | awk '{ print $1 }' | xargs -I{} docker kill {} -docker ps -a | grep -E "validator|faucet|indexer|redis" | awk '{ print $1 }' | xargs -I{} docker rm {} - -# delete volume -docker volume rm aptos-shared indexer-grpc-file-store diff --git a/docker/compose/indexer-grpc/test_indexer_grpc_docker_compose.sh b/docker/compose/indexer-grpc/test_indexer_grpc_docker_compose.sh deleted file mode 100755 index 910c40fc11fae..0000000000000 --- a/docker/compose/indexer-grpc/test_indexer_grpc_docker_compose.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -# A quick script that checks the e2e setup for the indexer-grpc service on docker-compose - -if ! command -v grpcurl &>/dev/null; then - wget https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz - sha=$(shasum -a 256 grpcurl_1.8.7_linux_x86_64.tar.gz | awk '{ print $1 }') - [ "$sha" != "b50a9c9cdbabab03c0460a7218eab4a954913d696b4d69ffb720f42d869dbdd5" ] && echo "shasum mismatch" && exit 1 - tar -xvf grpcurl_1.8.7_linux_x86_64.tar.gz - chmod +x grpcurl - mv grpcurl /usr/local/bin/grpcurl - grpcurl -version -fi - -# Try hitting the indexer-grpc setup in a number of ways -# - -# try getting the internal grpc on the fullnode itself -stream_time_seconds=30 -start_time=$(date +%s) -timeout "${stream_time_seconds}s" grpcurl -max-msg-sz 10000000 -d '{ "starting_version": 0 }' -import-path crates/aptos-protos/proto -proto aptos/internal/fullnode/v1/fullnode_data.proto -plaintext 127.0.0.1:50051 aptos.internal.fullnode.v1.FullnodeData/GetTransactionsFromNode > fullnode_grpc_response.txt -end_time=$(date +%s) -total_time=$((end_time - start_time)) -echo "grpcurl took ${total_time} seconds to run" - -if [ $total_time -lt "${stream_time_seconds}" ]; then - echo "grpcurl exited early, which indicates failure" - echo "FullnodeData/GetTransactionsFromNode on the aptos-node should be an endless stream" - exit 1 -fi - -# try hitting the data service -stream_time_seconds=30 -start_time=$(date +%s) -timeout "${stream_time_seconds}s" grpcurl -max-msg-sz 10000000 -d '{ "starting_version": 0 }' -H "x-aptos-data-authorization:dummy_token" -import-path crates/aptos-protos/proto -proto aptos/indexer/v1/raw_data.proto -plaintext 127.0.0.1:50052 aptos.indexer.v1.RawData/GetTransactions > data_service_grpc_response.txt -end_time=$(date +%s) -total_time=$((end_time - start_time)) -echo "grpcurl took ${total_time} seconds to run" - -if [ $total_time -lt "${stream_time_seconds}" ]; then - echo "grpcurl exited early, which indicates failure" - echo "RawData/GetTransactions on the data service should be an endless stream" - exit 1 -fi - -echo "All tests passed!" diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml index 4915b396c1c8a..2b3ca437063b8 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml @@ -35,3 +35,11 @@ serde_yaml = { workspace = true } tokio = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } + +[dev-dependencies] +aptos-config = { workspace = true } +reqwest = { workspace = true } +tempfile = { workspace = true } + +[features] +integration-tests = [] diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/README.md b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/README.md index c8478b53b2978..0299c39364a72 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/README.md +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/README.md @@ -12,10 +12,11 @@ Cache worker fetches data from fullnode GRPC and push data to Cache. * Yaml Example ```yaml -fullnode_grpc_address: 127.0.0.1:50051 -redis_address: 127.0.0.1:6379 -health_check_port: 8081 -file_store: - file_store_type: GcsFileStore - gcs_file_store_bucket_name: indexer-grpc-file-store-bucketname +health_check_port: 8083 +server_config: + fullnode_grpc_address: 0.0.0.0:50052 + file_store_config: + file_store_type: GcsFileStore + gcs_file_store_bucket_name: indexer-grpc-file-store-bucketname + redis_main_instance_address: 127.0.0.1:6379 ``` diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs index a6cf53d8e3eb6..55820d580a76d 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs @@ -3,3 +3,35 @@ pub mod metrics; pub mod worker; + +use anyhow::{Ok, Result}; +use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use serde::{Deserialize, Serialize}; +use worker::Worker; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcCacheWorkerConfig { + pub fullnode_grpc_address: String, + pub file_store_config: IndexerGrpcFileStoreConfig, + pub redis_main_instance_address: String, +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcCacheWorkerConfig { + async fn run(&self) -> Result<()> { + let mut worker = Worker::new( + self.fullnode_grpc_address.clone(), + self.redis_main_instance_address.clone(), + self.file_store_config.clone(), + ) + .await; + worker.run().await; + Ok(()) + } + + fn get_server_name(&self) -> String { + "idxcache".to_string() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/main.rs index 8556c4ed2bf07..20820f529efab 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/main.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/main.rs @@ -1,38 +1,10 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use anyhow::{Ok, Result}; -use aptos_indexer_grpc_cache_worker::worker::Worker; -use aptos_indexer_grpc_server_framework::{RunnableConfig, ServerArgs}; -use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use anyhow::Result; +use aptos_indexer_grpc_cache_worker::IndexerGrpcCacheWorkerConfig; +use aptos_indexer_grpc_server_framework::ServerArgs; use clap::Parser; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct IndexerGrpcCacheWorkerConfig { - pub fullnode_grpc_address: String, - pub file_store_config: IndexerGrpcFileStoreConfig, - pub redis_main_instance_address: String, -} - -#[async_trait::async_trait] -impl RunnableConfig for IndexerGrpcCacheWorkerConfig { - async fn run(&self) -> Result<()> { - let mut worker = Worker::new( - self.fullnode_grpc_address.clone(), - self.redis_main_instance_address.clone(), - self.file_store_config.clone(), - ) - .await; - worker.run().await; - Ok(()) - } - - fn get_server_name(&self) -> String { - "idxcache".to_string() - } -} #[tokio::main] async fn main() -> Result<()> { diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs index f9404e33347e1..2e78959acc1b8 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs @@ -26,8 +26,6 @@ use tracing::{error, info}; type ChainID = u32; type StartingVersion = u64; -const WORKER_RESTART_DELAY_IF_METADATA_NOT_FOUND_IN_SECS: u64 = 60; - pub struct Worker { /// Redis client. redis_client: redis::Client, @@ -103,22 +101,12 @@ impl Worker { }; file_store_operator.verify_storage_bucket_existence().await; - let mut starting_version = 0; - let file_store_metadata = file_store_operator.get_file_store_metadata().await; + let starting_version = file_store_operator + .get_starting_version() + .await + .unwrap_or(0); - if let Some(metadata) = file_store_metadata { - info!("[Indexer Cache] File store metadata: {:?}", metadata); - starting_version = metadata.version; - } else { - error!("[Indexer Cache] File store is empty. Exit after 1 minute."); - tokio::spawn(async move { - tokio::time::sleep(std::time::Duration::from_secs( - WORKER_RESTART_DELAY_IF_METADATA_NOT_FOUND_IN_SECS, - )) - .await; - std::process::exit(1); - }); - } + let file_store_metadata = file_store_operator.get_file_store_metadata().await; // 2. Start streaming RPC. let request = tonic::Request::new(GetTransactionsFromNodeRequest { diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/README.md b/ecosystem/indexer-grpc/indexer-grpc-data-service/README.md index de6b6fe025496..667452f932fc4 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/README.md +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/README.md @@ -22,6 +22,9 @@ server_config: file_store_config: file_store_type: GcsFileStore gcs_file_store_bucket_name: indexer-grpc-file-store-bucketname + data_service_grpc_tls_config: + cert_path: /path/to/cert.cert + key_path: /path/to/key.pem redis_read_replica_address: 127.0.0.1:6379 ``` diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs index 646aa1611cd0d..f86b31ba33ff4 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs @@ -23,8 +23,24 @@ use tonic::{ #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -pub struct IndexerGrpcDataServiceConfig { +pub struct TlsConfig { + // TLS config. pub data_service_grpc_listen_address: String, + pub cert_path: String, + pub key_path: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NonTlsConfig { + pub data_service_grpc_listen_address: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcDataServiceConfig { + pub data_service_grpc_tls_config: Option, + pub data_service_grpc_non_tls_config: Option, pub whitelisted_auth_tokens: Vec, pub file_store_config: IndexerGrpcFileStoreConfig, pub redis_read_replica_address: String, @@ -33,8 +49,6 @@ pub struct IndexerGrpcDataServiceConfig { #[async_trait::async_trait] impl RunnableConfig for IndexerGrpcDataServiceConfig { async fn run(&self) -> Result<()> { - let grpc_address = self.data_service_grpc_listen_address.clone(); - let token_set = build_auth_token_set(self.whitelisted_auth_tokens.clone()); let authentication_inceptor = move |req: Request<()>| -> std::result::Result, Status> { @@ -61,7 +75,7 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) .build() - .expect("Failed to build reflection service"); + .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))?; // Add authentication interceptor. let server = RawDataServerWrapper::new( @@ -72,12 +86,57 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { .send_compressed(CompressionEncoding::Gzip) .accept_compressed(CompressionEncoding::Gzip); let svc_with_interceptor = InterceptedService::new(svc, authentication_inceptor); - Server::builder() - .add_service(reflection_service) - .add_service(svc_with_interceptor) - .serve(grpc_address.to_socket_addrs().unwrap().next().unwrap()) - .await - .map_err(|e| anyhow::anyhow!("Failed to serve: {}", e)) + + let svc_with_interceptor_clone = svc_with_interceptor.clone(); + let reflection_service_clone = reflection_service.clone(); + + let mut tasks = vec![]; + if self.data_service_grpc_non_tls_config.is_some() { + let config = self.data_service_grpc_non_tls_config.clone().unwrap(); + let grpc_address = config + .data_service_grpc_listen_address + .to_socket_addrs() + .map_err(|e| anyhow::anyhow!(e))? + .next() + .ok_or_else(|| anyhow::anyhow!("Failed to parse grpc address"))?; + tasks.push(tokio::spawn(async move { + Server::builder() + .add_service(svc_with_interceptor_clone) + .add_service(reflection_service_clone) + .serve(grpc_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + if self.data_service_grpc_tls_config.is_some() { + let config = self.data_service_grpc_tls_config.clone().unwrap(); + let grpc_address = config + .data_service_grpc_listen_address + .to_socket_addrs() + .map_err(|e| anyhow::anyhow!(e))? + .next() + .ok_or_else(|| anyhow::anyhow!("Failed to parse grpc address"))?; + + let cert = tokio::fs::read(config.cert_path.clone()).await?; + let key = tokio::fs::read(config.key_path.clone()).await?; + let identity = tonic::transport::Identity::from_pem(cert, key); + tasks.push(tokio::spawn(async move { + Server::builder() + .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? + .add_service(svc_with_interceptor) + .add_service(reflection_service) + .serve(grpc_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + + if tasks.is_empty() { + return Err(anyhow::anyhow!("No grpc config provided")); + } + + futures::future::try_join_all(tasks).await?; + Ok(()) } fn get_server_name(&self) -> String { diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs index 9968929d5e490..d5461a6bb570b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs @@ -50,7 +50,7 @@ pub static ERROR_COUNT: Lazy = Lazy::new(|| { /// Data latency for data service based on latest processed transaction based on selected processor. pub static PROCESSED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { register_gauge_vec!( - "indexer_grpc_data_service_data_latency_in_secs", + "indexer_grpc_data_service_latest_data_latency_in_secs", "Latency of data service based on latest processed transaction", &["request_token", "processor_name"], ) @@ -60,7 +60,7 @@ pub static PROCESSED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { /// Data latency for data service based on latest processed transaction for all processors. pub static PROCESSED_LATENCY_IN_SECS_ALL: Lazy = Lazy::new(|| { register_histogram_vec!( - "indexer_grpc_data_service_data_latency_in_secs_all", + "indexer_grpc_data_service_latest_data_latency_in_secs_all", "Latency of data service based on latest processed transaction", &["request_token"] ) diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs index 22aa91a53fc46..db7e3e6cb2457 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs @@ -9,7 +9,7 @@ use aptos_indexer_grpc_utils::{ build_protobuf_encoded_transaction_wrappers, cache_operator::{CacheBatchGetStatus, CacheOperator}, config::IndexerGrpcFileStoreConfig, - constants::{GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER}, + constants::{BLOB_STORAGE_SIZE, GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER}, file_store_operator::{FileStoreOperator, GcsFileStoreOperator, LocalFileStoreOperator}, time_diff_since_pb_timestamp_in_secs, EncodedTransactionWithVersion, }; @@ -261,15 +261,19 @@ impl RawData for RawDataServerWrapper { ]) .inc_by(current_batch_size as u64); if let Some(data_latency_in_secs) = data_latency_in_secs { - PROCESSED_LATENCY_IN_SECS - .with_label_values(&[ - request_metadata.request_token.as_str(), - request_metadata.request_name.as_str(), - ]) - .set(data_latency_in_secs); - PROCESSED_LATENCY_IN_SECS_ALL - .with_label_values(&[request_metadata.request_source.as_str()]) - .observe(data_latency_in_secs); + // If it's a partial batch, we record the latency because it usually means + // the data is the latest. + if current_batch_size % BLOB_STORAGE_SIZE != 0 { + PROCESSED_LATENCY_IN_SECS + .with_label_values(&[ + request_metadata.request_token.as_str(), + request_metadata.request_name.as_str(), + ]) + .set(data_latency_in_secs); + PROCESSED_LATENCY_IN_SECS_ALL + .with_label_values(&[request_metadata.request_source.as_str()]) + .observe(data_latency_in_secs); + } } }, Err(SendTimeoutError::Timeout(_)) => { diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs index 1f0625b52964c..e44896e3f001d 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs @@ -3,3 +3,32 @@ pub mod metrics; pub mod processor; + +use anyhow::Result; +use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use processor::Processor; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcFileStoreWorkerConfig { + pub file_store_config: IndexerGrpcFileStoreConfig, + pub redis_main_instance_address: String, +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcFileStoreWorkerConfig { + async fn run(&self) -> Result<()> { + let mut processor = Processor::new( + self.redis_main_instance_address.clone(), + self.file_store_config.clone(), + ); + processor.run().await; + Ok(()) + } + + fn get_server_name(&self) -> String { + "idxfile".to_string() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs index db8a28bc0c318..ef3714cb9c6b3 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs @@ -2,34 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use aptos_indexer_grpc_file_store::processor::Processor; -use aptos_indexer_grpc_server_framework::{RunnableConfig, ServerArgs}; -use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use aptos_indexer_grpc_file_store::IndexerGrpcFileStoreWorkerConfig; +use aptos_indexer_grpc_server_framework::ServerArgs; use clap::Parser; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct IndexerGrpcFileStoreWorkerConfig { - pub file_store_config: IndexerGrpcFileStoreConfig, - pub redis_main_instance_address: String, -} - -#[async_trait::async_trait] -impl RunnableConfig for IndexerGrpcFileStoreWorkerConfig { - async fn run(&self) -> Result<()> { - let mut processor = Processor::new( - self.redis_main_instance_address.clone(), - self.file_store_config.clone(), - ); - processor.run().await; - Ok(()) - } - - fn get_server_name(&self) -> String { - "idxfile".to_string() - } -} #[tokio::main] async fn main() -> Result<()> { diff --git a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml new file mode 100644 index 0000000000000..e3829549a7239 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "aptos-indexer-grpc-integration-tests" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { workspace = true } +aptos-config = { workspace = true } +aptos-indexer-grpc-cache-worker = { workspace = true } +aptos-indexer-grpc-data-service = { workspace = true } +aptos-indexer-grpc-file-store = { workspace = true } +aptos-indexer-grpc-server-framework = { workspace = true } +aptos-indexer-grpc-utils = { workspace = true } +aptos-inspection-service = { workspace = true } +aptos-logger = { workspace = true } +aptos-protos = { workspace = true } +aptos-runtimes = { workspace = true } +aptos-transaction-emitter-lib = { workspace = true } +aptos-transaction-generator-lib = { workspace = true } +aptos-types = { workspace = true } +async-trait = { workspace = true } +backoff = { workspace = true } +base64 = { workspace = true } +clap = { workspace = true } +futures = { workspace = true } +futures-core = { workspace = true } +futures-util = { workspace = true } +itertools = { workspace = true } +prometheus = { workspace = true } +prost = { workspace = true } +redis = { workspace = true } +regex = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +tonic = { workspace = true } +tracing = { workspace = true } +url = { workspace = true } +warp = { workspace = true } + +[features] +integration-tests = [] diff --git a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/lib.rs new file mode 100644 index 0000000000000..41eeab05b7c79 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/lib.rs @@ -0,0 +1,8 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// We hide these tests behind a feature flag because these are not standard unit tests, +// these are integration tests that rely on a variety of outside pieces such as a local +// testnet and a running Redis instance. +#[cfg(feature = "integration-tests")] +mod tests; diff --git a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs new file mode 100644 index 0000000000000..48466edcf2bfd --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs @@ -0,0 +1,356 @@ +// Copyright © Aptos Foundation + +use anyhow::{bail, Context, Result}; +use aptos_indexer_grpc_cache_worker::IndexerGrpcCacheWorkerConfig; +use aptos_indexer_grpc_file_store::IndexerGrpcFileStoreWorkerConfig; +use aptos_indexer_grpc_server_framework::{ + run_server_with_config, setup_logging, setup_panic_handler, GenericConfig, RunnableConfig, +}; +use aptos_indexer_grpc_utils::{ + cache_operator::CacheOperator, + config::{IndexerGrpcFileStoreConfig, LocalFileStore}, + constants::BLOB_STORAGE_SIZE, + file_store_operator::{FileStoreOperator, LocalFileStoreOperator}, +}; +use aptos_transaction_emitter_lib::{emit_transactions, ClusterArgs, CoinSourceArgs, EmitArgs}; +use aptos_transaction_generator_lib::args::TransactionTypeArg; +use aptos_types::chain_id::ChainId; +use regex::Regex; +use std::{fs::File, io::Write, path::PathBuf}; +use tempfile::TempDir; +use tokio::task::JoinHandle; +use tracing::info; + +static TESTNET_REST_API_URL: &str = "http://localhost:8080"; +static TESTNET_FULLNODE_GRPC_URL: &str = "localhost:50051"; +static REDIS_PRIMARY_URL: &str = "localhost:6379"; + +static MINT_KEY_FILE_NAME: &str = "mint.key"; + +/// Get the name of docker containers that match the given regex +/// This works around different docker compose v1 and v2 naming conventions +fn get_container_by_name_regex(name_regex: Regex) -> Result> { + let containers = std::process::Command::new("docker") + .args(&["ps", "--format", "{{.Names}}"]) + .output()?; + let containers = String::from_utf8(containers.stdout)?; + let ret = containers + .split("\n") + .map(|x| x.to_string()) + .filter(|x| name_regex.is_match(x)) + .collect::>(); + Ok(ret) +} + +/// Connects to the local redis running in docker and resets it +async fn reset_redis() -> Result<()> { + let redis_containers = get_container_by_name_regex(Regex::new(r".*redis.*")?)?; + for container in redis_containers { + let _ = std::process::Command::new("docker") + .args(&["exec", &container, "redis-cli", "FLUSHALL"]) + .output()?; + } + + let conn = redis::Client::open(format!("redis://{}", REDIS_PRIMARY_URL)) + .expect("Create redis client failed.") + .get_async_connection() + .await + .expect("Create redis connection failed."); + let mut cache_operator = CacheOperator::new(conn); + match cache_operator.get_latest_version().await { + Ok(x) => { + bail!( + "Redis did not scale down properly. There's still stuff in the cache. Latest version: {}", + x + ); + }, + Err(_) => info!("Redis scaled down properly"), + } + Ok(()) +} + +/// Fetch the mint key from the running local testnet and dump it into the path specified +async fn dump_mint_key_to_file(path: &PathBuf) -> Result { + let validator_containers = + get_container_by_name_regex(Regex::new(r"validator-testnet.*validator.*")?)?; + if validator_containers.len() != 1 { + bail!( + "Expected 1 validator container, found {}", + validator_containers.len() + ); + } + let validator = &validator_containers[0]; + let output = std::process::Command::new("docker") + .args(&["exec", validator, "cat", "/opt/aptos/var/mint.key"]) + .output()?; + let output_stdout = output.stdout; + info!("Mint key: {:?}", output_stdout); + let mint_key_path = path.join(MINT_KEY_FILE_NAME); + let mint_key_path_string = mint_key_path.display().to_string(); + let mut file = File::create(mint_key_path).context("Could not create mint key in path")?; + file.write_all(&output_stdout) + .context("Could not write mint key to file")?; + Ok(mint_key_path_string) +} + +/// Emit transactions to the local testnet to invoke certain indexer actions, such as writing +/// to filestore +async fn emit_transactions_for_test() -> Result<()> { + // dump the key to a tempfile + let path_buf = TempDir::new() + .context("Could not create temp dir")? + .into_path(); + let mint_key_file_path = dump_mint_key_to_file(&path_buf) + .await + .expect("Failed to fetch mint key"); + info!("Mint key file path: {}", mint_key_file_path); + + // emit some transactions + let duration = 10; + let target_tps = BLOB_STORAGE_SIZE / duration; + let cluster_args = ClusterArgs { + targets: Some(vec![url::Url::parse(TESTNET_REST_API_URL) + .context("Cannot parse default fullnode url") + .unwrap()]), + targets_file: None, + reuse_accounts: false, + chain_id: ChainId::test(), + coin_source_args: CoinSourceArgs { + mint_file: Some(mint_key_file_path), + ..CoinSourceArgs::default() + }, + }; + let emit_args = EmitArgs { + // mempool_backlog: None, + target_tps: Some(target_tps), + txn_expiration_time_secs: 30, + duration: duration.try_into().unwrap(), + transaction_type: vec![TransactionTypeArg::default()], + ..EmitArgs::default() + }; + + info!( + "Emitting transactions: {} tps for {} seconds...", + target_tps, duration + ); + + let stats = emit_transactions(&cluster_args, &emit_args) + .await + .map_err(|e| panic!("Emit transactions failed {:?}", e)) + .unwrap(); + info!("Total stats: {}", stats); + info!("Average rate: {}", stats.rate()); + Ok(()) +} + +async fn start_server( + server_config: T, +) -> Result<(u16, JoinHandle>)> { + let health_check_port = aptos_config::utils::get_available_port(); + let config = GenericConfig { + health_check_port, + server_config, + }; + let server_name = config.server_config.get_server_name(); + info!( + "Starting server {} with healtheck port {}", + server_name, health_check_port + ); + let runtime_handle = tokio::runtime::Handle::current(); + + // runs the component's run, but we need the server run + let join_handle = runtime_handle.spawn(async move { run_server_with_config(config).await }); + let startup_timeout_secs = 30; + for i in 0..startup_timeout_secs { + match reqwest::get(format!("http://localhost:{}/metrics", health_check_port)).await { + Ok(_) => break, + Err(e) => { + if i == startup_timeout_secs - 1 { + let msg = if join_handle.is_finished() { + format!("Server failed on startup: {:#?}", join_handle.await) + } else { + "Server was still starting up".to_string() + }; + bail!( + "Server didn't come up within given timeout: {:#?} {}", + e, + msg + ); + } + }, + } + if join_handle.is_finished() { + bail!( + "Server returned error while starting up: {:#?}", + join_handle.await + ); + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + Ok((health_check_port, join_handle)) +} + +async fn setup_test() { + // we're going to run both cache worker and file store worker in the same process + // so we need to centrally set up logging and panic handler, whereas they are usually done in the same service + setup_logging(); + setup_panic_handler(); + reset_redis().await.expect("Failed to reset redis for test"); + + // aptos_logger too + aptos_logger::Logger::init_for_testing(); +} + +// These tests expect that the local environment has a running fullnode +// This can be done by using the docker-compose +// We will then simulate chaos by using (1) docker exec (2) docker-compose scale = +#[tokio::test] +pub async fn verify_docker_compose_setup() { + reqwest::get(&format!("{}/v1", TESTNET_REST_API_URL)) + .await + .unwrap() + .error_for_status() + .unwrap(); // we just want a good status code +} + +/// Test that the cache worker can start from scratch and make progress. +/// This is a cold start because there is no existing cache and also it is unable to read from the file store +/// about the latest state prior to starting. +#[tokio::test] +async fn test_cold_start_cache_worker_progress() { + setup_test().await; + + let tmp_dir = TempDir::new().expect("Could not create temp dir"); // start with a new file store each time + let cache_worker_config = IndexerGrpcCacheWorkerConfig { + fullnode_grpc_address: TESTNET_FULLNODE_GRPC_URL.to_string(), + file_store_config: IndexerGrpcFileStoreConfig::LocalFileStore(LocalFileStore { + local_file_store_path: tmp_dir.path().to_path_buf(), + }), + redis_main_instance_address: REDIS_PRIMARY_URL.to_string(), + }; + + let (_cache_worker_port, _cache_worker_handle) = + start_server::(cache_worker_config) + .await + .expect("Failed to start CacheWorker"); + + let conn = redis::Client::open(format!("redis://{}", REDIS_PRIMARY_URL.to_string())) + .expect("Create redis client failed.") + .get_async_connection() + .await + .expect("Create redis connection failed."); + + let check_cache_secs = 30; + let check_cache_frequency_secs = 5; + let tries = check_cache_secs / check_cache_frequency_secs; + + // check that the cache was written to + let mut cache_operator = CacheOperator::new(conn); + let mut chain_id = 0; + for _ in 0..tries { + match cache_operator.get_chain_id().await { + Ok(x) => { + chain_id = x; + info!("Chain id: {}", x); + break; + }, + Err(_) => { + tokio::time::sleep(std::time::Duration::from_secs(check_cache_frequency_secs)) + .await; + }, + } + } + assert!(chain_id == 4); + + // check that the cache worker is making progress + let mut latest_version = 0; + let mut new_latest_version; + for _ in 0..tries { + tokio::time::sleep(std::time::Duration::from_secs(check_cache_frequency_secs)).await; + new_latest_version = cache_operator.get_latest_version().await.unwrap(); + info!( + "Processed {} versions since last check {}s ago...", + new_latest_version - latest_version, + check_cache_frequency_secs + ); + assert!(new_latest_version > latest_version); + latest_version = new_latest_version; + } +} + +/// Test that the file store worker can start from scratch and make progress. +/// This is a cold start since the file store and the cache start as empty. And the file store is generally the source of truth +/// between the two. We expect the file store to be written to +#[tokio::test] +async fn test_cold_start_file_store_worker_progress() { + setup_test().await; + + let tmp_dir = TempDir::new().expect("Could not create temp dir"); // start with a new file store each time + + let cache_worker_config = IndexerGrpcCacheWorkerConfig { + fullnode_grpc_address: TESTNET_FULLNODE_GRPC_URL.to_string(), + file_store_config: IndexerGrpcFileStoreConfig::LocalFileStore(LocalFileStore { + local_file_store_path: tmp_dir.path().to_path_buf(), + }), + redis_main_instance_address: REDIS_PRIMARY_URL.to_string(), + }; + + let file_store_worker_config = IndexerGrpcFileStoreWorkerConfig { + redis_main_instance_address: REDIS_PRIMARY_URL.to_string(), + file_store_config: IndexerGrpcFileStoreConfig::LocalFileStore(LocalFileStore { + local_file_store_path: tmp_dir.path().to_path_buf(), + }), + }; + + let (_cache_worker_port, _cache_worker_handle) = + start_server::(cache_worker_config.clone()) + .await + .expect("Failed to start CacheWorker"); + + // XXX: wait some time before file store starts up. we should resolve the boot dependency cycle + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + let (_file_store_port, _file_store_handle) = + start_server::(file_store_worker_config) + .await + .expect("Failed to start FileStoreWorker"); + + // wait until file store writes its first metadata + let file_store_operator = LocalFileStoreOperator::new(tmp_dir.path().to_path_buf()); + let tries = 6; + for _ in 0..tries { + match file_store_operator.get_file_store_metadata().await { + Some(_) => { + info!("File store metadata found"); + break; + }, + None => { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + }, + } + } + + // inspect the files at boot. there should at least be the metadata file specifying that it has 0 versions processed + info!( + "Expecting file store to have files {}", + tmp_dir.path().display() + ); + let file_store_metadata = file_store_operator.get_file_store_metadata().await; + assert!(file_store_metadata.is_some()); + + // emit transactions, enough to write to file store + emit_transactions_for_test() + .await + .expect("Emit transactions failed"); + + // after a while, expect the metadata file to be updated with the latest version + let file_store_metadata = file_store_operator + .get_file_store_metadata() + .await + .expect("Failed to get file store metadata"); + info!( + "[Indexer Cache] File store metadata: {:?}", + file_store_metadata + ); + assert!(file_store_metadata.version > 0); +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/mod.rs new file mode 100644 index 0000000000000..b102b93ad164a --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/mod.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#[cfg(test)] +mod fullnode_tests; diff --git a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs index e58f0e384928b..4030979187937 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs @@ -23,27 +23,33 @@ impl ServerArgs { where C: RunnableConfig, { - let config = load::>(&self.config_path)?; // Set up the server. setup_logging(); setup_panic_handler(); + let config = load::>(&self.config_path)?; + run_server_with_config(config).await + } +} - let runtime = aptos_runtimes::spawn_named_runtime(config.get_server_name(), None); - let health_port = config.health_check_port; - // Start liveness and readiness probes. - let task_handler = runtime.spawn(async move { - register_probes_and_metrics_handler(health_port).await; - Ok(()) - }); - let main_task_handler = runtime.spawn(async move { config.run().await }); - let results = futures::future::join_all(vec![task_handler, main_task_handler]).await; - let errors = results.iter().filter(|r| r.is_err()).collect::>(); - if !errors.is_empty() { - return Err(anyhow::anyhow!("Failed to run server: {:?}", errors)); - } - // TODO(larry): fix the dropped runtime issue. +pub async fn run_server_with_config(config: GenericConfig) -> Result<()> +where + C: RunnableConfig, +{ + let runtime = aptos_runtimes::spawn_named_runtime(config.get_server_name(), None); + let health_port = config.health_check_port; + // Start liveness and readiness probes. + let task_handler = runtime.spawn(async move { + register_probes_and_metrics_handler(health_port).await; Ok(()) + }); + let main_task_handler = runtime.spawn(async move { config.run().await }); + let results = futures::future::join_all(vec![task_handler, main_task_handler]).await; + let errors = results.iter().filter(|r| r.is_err()).collect::>(); + if !errors.is_empty() { + return Err(anyhow::anyhow!("Failed to run server: {:?}", errors)); } + // TODO(larry): fix the dropped runtime issue. + Ok(()) } #[derive(Deserialize, Debug, Serialize)] @@ -97,7 +103,7 @@ pub struct CrashInfo { /// Tokio's default behavior is to catch panics and ignore them. Invoking this function will /// ensure that all subsequent thread panics (even Tokio threads) will report the /// details/backtrace and then exit. -fn setup_panic_handler() { +pub fn setup_panic_handler() { std::panic::set_hook(Box::new(move |pi: &PanicInfo<'_>| { handle_panic(pi); })); @@ -119,7 +125,7 @@ fn handle_panic(panic_info: &PanicInfo<'_>) { } /// Set up logging for the server. -fn setup_logging() { +pub fn setup_logging() { let env_filter = EnvFilter::try_from_default_env() .or_else(|_| EnvFilter::try_new("info")) .unwrap(); diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs index 62309ed001933..46a78b76cc134 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::constants::BLOB_STORAGE_SIZE; +use anyhow::Context; use redis::{AsyncCommands, RedisError, RedisResult}; // Configurations for cache. @@ -15,7 +16,7 @@ const CACHE_SIZE_ESTIMATION: u64 = 3_000_000_u64; // lower than the latest version - CACHE_SIZE_EVICTION_LOWER_BOUND. // The gap between CACHE_SIZE_ESTIMATION and this is to give buffer since // reading latest version and actual data not atomic(two operations). -const CACHE_SIZE_EVICTION_LOWER_BOUND: u64 = 12_000_000_u64; +const CACHE_SIZE_EVICTION_LOWER_BOUND: u64 = 4_000_000_u64; // Keys for cache. const CACHE_KEY_LATEST_VERSION: &str = "latest_version"; @@ -153,7 +154,23 @@ impl CacheOperator { // Downstream system can infer the chain id from cache. pub async fn get_chain_id(&mut self) -> anyhow::Result { let chain_id: u64 = match self.conn.get::<&str, String>(CACHE_KEY_CHAIN_ID).await { - Ok(v) => v.parse::().expect("Redis chain_id is not a number."), + Ok(v) => v + .parse::() + .with_context(|| format!("Redis key {} is not a number.", CACHE_KEY_CHAIN_ID))?, + Err(err) => return Err(err.into()), + }; + Ok(chain_id) + } + + pub async fn get_latest_version(&mut self) -> anyhow::Result { + let chain_id: u64 = match self + .conn + .get::<&str, String>(CACHE_KEY_LATEST_VERSION) + .await + { + Ok(v) => v.parse::().with_context(|| { + format!("Redis key {} is not a number.", CACHE_KEY_LATEST_VERSION) + })?, Err(err) => return Err(err.into()), }; Ok(chain_id) @@ -227,7 +244,7 @@ impl CacheOperator { version: u64, ) -> anyhow::Result<()> { let script = redis::Script::new(CACHE_SCRIPT_UPDATE_LATEST_VERSION); - tracing::info!( + tracing::debug!( num_of_versions = num_of_versions, version = version, "Updating latest version in cache." diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/gcs.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/gcs.rs index 1bb8d944a5445..64d7df0410e5d 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/gcs.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/gcs.rs @@ -10,14 +10,14 @@ const JSON_FILE_TYPE: &str = "application/json"; pub struct GcsFileStoreOperator { bucket_name: String, /// The timestamp of the latest metadata update; this is to avoid too frequent metadata update. - latest_metadata_update_timestamp: std::time::Instant, + latest_metadata_update_timestamp: Option, } impl GcsFileStoreOperator { pub fn new(bucket_name: String) -> Self { Self { bucket_name, - latest_metadata_update_timestamp: std::time::Instant::now(), + latest_metadata_update_timestamp: None, } } } @@ -135,10 +135,6 @@ impl FileStoreOperator for GcsFileStoreOperator { chain_id: u64, version: u64, ) -> anyhow::Result<()> { - if (std::time::Instant::now() - self.latest_metadata_update_timestamp).as_secs() < 5 { - return Ok(()); - } - let metadata = FileStoreMetadata::new(chain_id, version); // If the metadata is not updated, the indexer will be restarted. match Object::create( @@ -150,7 +146,7 @@ impl FileStoreOperator for GcsFileStoreOperator { .await { Ok(_) => { - self.latest_metadata_update_timestamp = std::time::Instant::now(); + self.latest_metadata_update_timestamp = Some(std::time::Instant::now()); Ok(()) }, Err(err) => Err(anyhow::Error::from(err)), @@ -205,7 +201,18 @@ impl FileStoreOperator for GcsFileStoreOperator { anyhow::bail!("Uploading transactions failed."); } - self.update_file_store_metadata(chain_id, start_version + batch_size as u64) - .await + if let Some(ts) = self.latest_metadata_update_timestamp { + // a periodic metadata update + if (std::time::Instant::now() - ts).as_secs() > FILE_STORE_UPDATE_FREQUENCY_SECS { + self.update_file_store_metadata(chain_id, start_version + batch_size as u64) + .await?; + } + } else { + // the first metadata update + self.update_file_store_metadata(chain_id, start_version + batch_size as u64) + .await?; + } + + Ok(()) } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/local.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/local.rs index 90c9e1a4fb2da..a322293fb350c 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/local.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/local.rs @@ -4,18 +4,19 @@ use crate::{constants::BLOB_STORAGE_SIZE, file_store_operator::*, EncodedTransactionWithVersion}; use itertools::{any, Itertools}; use std::path::PathBuf; +use tracing::info; pub struct LocalFileStoreOperator { path: PathBuf, /// The timestamp of the latest metadata update; this is to avoid too frequent metadata update. - latest_metadata_update_timestamp: std::time::Instant, + latest_metadata_update_timestamp: Option, } impl LocalFileStoreOperator { pub fn new(path: PathBuf) -> Self { Self { path, - latest_metadata_update_timestamp: std::time::Instant::now(), + latest_metadata_update_timestamp: None, } } } @@ -96,6 +97,7 @@ impl FileStoreOperator for LocalFileStoreOperator { Err(err) => { if err.kind() == std::io::ErrorKind::NotFound { // If the metadata is not found, it means the file store is empty. + info!("File store is empty. Creating metadata file."); self.update_file_store_metadata(expected_chain_id, 0) .await .expect("[Indexer File] Update metadata failed."); @@ -116,16 +118,17 @@ impl FileStoreOperator for LocalFileStoreOperator { chain_id: u64, version: u64, ) -> anyhow::Result<()> { - if (std::time::Instant::now() - self.latest_metadata_update_timestamp).as_secs() < 5 { - return Ok(()); - } - let metadata = FileStoreMetadata::new(chain_id, version); // If the metadata is not updated, the indexer will be restarted. let metadata_path = self.path.join(METADATA_FILE_NAME); + info!( + "Updating metadata file {} @ version {}", + metadata_path.display(), + version + ); match tokio::fs::write(metadata_path, serde_json::to_vec(&metadata).unwrap()).await { Ok(_) => { - self.latest_metadata_update_timestamp = std::time::Instant::now(); + self.latest_metadata_update_timestamp = Some(std::time::Instant::now()); Ok(()) }, Err(err) => Err(anyhow::Error::from(err)), @@ -164,7 +167,7 @@ impl FileStoreOperator for LocalFileStoreOperator { .path .join(generate_blob_name(transactions_file.starting_version).as_str()); - tracing::info!( + tracing::debug!( "Uploading transactions to {:?}", txns_path.to_str().unwrap() ); @@ -192,7 +195,18 @@ impl FileStoreOperator for LocalFileStoreOperator { anyhow::bail!("Uploading transactions failed."); } - self.update_file_store_metadata(chain_id, start_version + batch_size as u64) - .await + if let Some(ts) = self.latest_metadata_update_timestamp { + // a periodic metadata update + if (std::time::Instant::now() - ts).as_secs() > FILE_STORE_UPDATE_FREQUENCY_SECS { + self.update_file_store_metadata(chain_id, start_version + batch_size as u64) + .await?; + } + } else { + // the first metadata update + self.update_file_store_metadata(chain_id, start_version + batch_size as u64) + .await?; + } + + Ok(()) } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/mod.rs index 6ce9a425e4078..6b0f40c14ee77 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/mod.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator/mod.rs @@ -11,8 +11,8 @@ pub mod local; pub use local::*; pub const FILE_FOLDER_NAME: &str = "files"; - const METADATA_FILE_NAME: &str = "metadata.json"; +const FILE_STORE_UPDATE_FREQUENCY_SECS: u64 = 5; #[inline] pub fn generate_blob_name(starting_version: u64) -> String { @@ -78,6 +78,11 @@ pub trait FileStoreOperator: Send + Sync { chain_id: u64, transactions: Vec, ) -> anyhow::Result<()>; + + async fn get_starting_version(&self) -> Option { + let metadata = self.get_file_store_metadata().await; + metadata.map(|metadata| metadata.version) + } } pub(crate) fn build_transactions_file( diff --git a/ecosystem/node-checker/src/provider/metrics.rs b/ecosystem/node-checker/src/provider/metrics.rs index 3cee07634187a..7e88029b4fe37 100644 --- a/ecosystem/node-checker/src/provider/metrics.rs +++ b/ecosystem/node-checker/src/provider/metrics.rs @@ -74,7 +74,7 @@ impl MetricsProvider { ) }) .map_err(|e| ProviderError::ParseError(anyhow!(e)))?; - Scrape::parse(body.lines().into_iter().map(|l| Ok(l.to_string()))) + Scrape::parse(body.lines().map(|l| Ok(l.to_string()))) .with_context(|| { format!( "Failed to parse response text from {} as a Prometheus scrape", diff --git a/ecosystem/python/sdk/CHANGELOG.md b/ecosystem/python/sdk/CHANGELOG.md index 8645004dd6ee2..cb1598d84e5c5 100644 --- a/ecosystem/python/sdk/CHANGELOG.md +++ b/ecosystem/python/sdk/CHANGELOG.md @@ -4,6 +4,9 @@ All notable changes to the Aptos Python SDK will be captured in this file. This **Note:** Until we cut a 1.0.0 release, the Aptos Python SDK does not follow semantic versioning. +## 0.6.2 +- Added custom header "x-aptos-client" to both sync/async RestClient + ## 0.6.1 - Updated package manifest. diff --git a/ecosystem/python/sdk/Makefile b/ecosystem/python/sdk/Makefile index 4096c287621ba..48cf36dae9907 100644 --- a/ecosystem/python/sdk/Makefile +++ b/ecosystem/python/sdk/Makefile @@ -4,14 +4,18 @@ test: poetry run python -m unittest discover -s aptos_sdk/ -p '*.py' -t .. +test-coverage: + poetry run python -m coverage run -m unittest discover -s aptos_sdk/ -p '*.py' -t .. + poetry run python -m coverage report + fmt: find ./examples ./aptos_sdk *.py -type f -name "*.py" | xargs poetry run autoflake -i -r --remove-all-unused-imports --remove-unused-variables --ignore-init-module-imports - poetry run isort aptos_sdk examples setup.py - poetry run black aptos_sdk examples setup.py + poetry run isort aptos_sdk examples + poetry run black aptos_sdk examples lint: - poetry run mypy aptos_sdk - - poetry run flake8 aptos_sdk examples setup.py + - poetry run flake8 aptos_sdk examples examples: poetry run python -m examples.async-read-aggregator diff --git a/ecosystem/python/sdk/aptos_sdk/account_sequence_number.py b/ecosystem/python/sdk/aptos_sdk/account_sequence_number.py new file mode 100644 index 0000000000000..be6343428426c --- /dev/null +++ b/ecosystem/python/sdk/aptos_sdk/account_sequence_number.py @@ -0,0 +1,214 @@ +# Copyright © Aptos Foundation +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import asyncio +import logging +from typing import Callable, Optional + +from aptos_sdk.account_address import AccountAddress +from aptos_sdk.async_client import ApiError, RestClient + + +class AccountSequenceNumberConfig: + """Common configuration for account number generation""" + + maximum_in_flight: int = 100 + maximum_wait_time: int = 30 + sleep_time: float = 0.01 + + +class AccountSequenceNumber: + """ + A managed wrapper around sequence numbers that implements the trivial flow control used by the + Aptos faucet: + * Submit up to 100 transactions per account in parallel with a timeout of 20 seconds + * If local assumes 100 are in flight, determine the actual committed state from the network + * If there are less than 100 due to some being committed, adjust the window + * If 100 are in flight Wait .1 seconds before re-evaluating + * If ever waiting more than 30 seconds restart the sequence number to the current on-chain state + Assumptions: + * Accounts are expected to be managed by a single AccountSequenceNumber and not used otherwise. + * They are initialized to the current on-chain state, so if there are already transactions in + flight, they may take some time to reset. + * Accounts are automatically initialized if not explicitly + + Notes: + * This is co-routine safe, that is many async tasks can be reading from this concurrently. + * The state of an account cannot be used across multiple AccountSequenceNumber services. + * The synchronize method will create a barrier that prevents additional next_sequence_number + calls until it is complete. + * This only manages the distribution of sequence numbers it does not help handle transaction + failures. + * If a transaction fails, you should call synchronize and wait for timeouts. + * Mempool limits the number of transactions per account to 100, hence why we chose 100. + """ + + _client: RestClient + _account: AccountAddress + _lock: asyncio.Lock + + _maximum_in_flight: int = 100 + _maximum_wait_time: int = 30 + _sleep_time: float = 0.01 + + _last_committed_number: Optional[int] + _current_number: Optional[int] + + def __init__( + self, + client: RestClient, + account: AccountAddress, + config: AccountSequenceNumberConfig = AccountSequenceNumberConfig(), + ): + self._client = client + self._account = account + self._lock = asyncio.Lock() + + self._last_uncommitted_number = None + self._current_number = None + + self._maximum_in_flight = config.maximum_in_flight + self._maximum_wait_time = config.maximum_wait_time + self._sleep_time = config.sleep_time + + async def next_sequence_number(self, block: bool = True) -> Optional[int]: + """ + Returns the next sequence number available on this account. This leverages a lock to + guarantee first-in, first-out ordering of requests. + """ + async with self._lock: + if self._last_uncommitted_number is None or self._current_number is None: + await self._initialize() + # If there are more than self._maximum_in_flight in flight, wait for a slot. + # Or at least check to see if there is a slot and exit if in non-blocking mode. + if ( + self._current_number - self._last_uncommitted_number + >= self._maximum_in_flight + ): + await self._update() + if ( + self._current_number - self._last_uncommitted_number + >= self._maximum_in_flight + ): + if not block: + return None + await self._resync( + lambda acn: acn._current_number - acn._last_uncommitted_number + >= acn._maximum_in_flight + ) + + next_number = self._current_number + self._current_number += 1 + return next_number + + async def _initialize(self): + """Optional initializer. called by next_sequence_number if not called prior.""" + self._current_number = await self._current_sequence_number() + self._last_uncommitted_number = self._current_number + + async def synchronize(self): + """ + Poll the network until all submitted transactions have either been committed or until + the maximum wait time has elapsed. This will prevent any calls to next_sequence_number + until this called has returned. + """ + async with self._lock: + await self._update() + await self._resync( + lambda acn: acn._last_uncommitted_number != acn._current_number + ) + + async def _resync(self, check: Callable[[AccountSequenceNumber], bool]): + """Forces a resync with the upstream, this should be called within the lock""" + start_time = await self._client.current_timestamp() + failed = False + while check(self): + ledger_time = await self._client.current_timestamp() + if ledger_time - start_time > self._maximum_wait_time: + logging.warn( + f"Waited over {self._maximum_wait_time} seconds for a transaction to commit, resyncing {self._account}" + ) + failed = True + break + else: + await asyncio.sleep(self._sleep_time) + await self._update() + if not failed: + return + for seq_num in range(self._last_uncommitted_number + 1, self._current_number): + while True: + try: + result = ( + await self._client.account_transaction_sequence_number_status( + self._account, seq_num + ) + ) + if result: + break + except ApiError as error: + if error.status_code == 404: + break + raise + await self._initialize() + + async def _update(self): + self._last_uncommitted_number = await self._current_sequence_number() + return self._last_uncommitted_number + + async def _current_sequence_number(self) -> int: + return await self._client.account_sequence_number(self._account) + + +import unittest +import unittest.mock + + +class Test(unittest.IsolatedAsyncioTestCase): + async def test_common_path(self): + """ + Verifies that: + * AccountSequenceNumber returns sequential numbers starting from 0 + * When the account has been updated on-chain include that in computations 100 -> 105 + * Ensure that none is returned if the call for next_sequence_number would block + * Ensure that synchronize completes if the value matches on-chain + """ + patcher = unittest.mock.patch( + "aptos_sdk.async_client.RestClient.account_sequence_number", return_value=0 + ) + patcher.start() + + rest_client = RestClient("https://fullnode.devnet.aptoslabs.com/v1") + account_sequence_number = AccountSequenceNumber( + rest_client, AccountAddress.from_hex("b0b") + ) + last_seq_num = 0 + for seq_num in range(5): + last_seq_num = await account_sequence_number.next_sequence_number() + self.assertEqual(last_seq_num, seq_num) + + patcher.stop() + patcher = unittest.mock.patch( + "aptos_sdk.async_client.RestClient.account_sequence_number", return_value=5 + ) + patcher.start() + + for seq_num in range(AccountSequenceNumber._maximum_in_flight): + last_seq_num = await account_sequence_number.next_sequence_number() + self.assertEqual(last_seq_num, seq_num + 5) + + self.assertEqual( + await account_sequence_number.next_sequence_number(block=False), None + ) + next_sequence_number = last_seq_num + 1 + patcher.stop() + patcher = unittest.mock.patch( + "aptos_sdk.async_client.RestClient.account_sequence_number", + return_value=next_sequence_number, + ) + patcher.start() + + self.assertNotEqual(account_sequence_number._current_number, last_seq_num) + await account_sequence_number.synchronize() + self.assertEqual(account_sequence_number._current_number, next_sequence_number) diff --git a/ecosystem/python/sdk/aptos_sdk/aptos_token_client.py b/ecosystem/python/sdk/aptos_sdk/aptos_token_client.py index 330e0591783f3..777eef2f51b68 100644 --- a/ecosystem/python/sdk/aptos_sdk/aptos_token_client.py +++ b/ecosystem/python/sdk/aptos_sdk/aptos_token_client.py @@ -338,9 +338,7 @@ async def read_object(self, address: AccountAddress) -> ReadObject: resources[resource_obj] = resource_obj.parse(resource["data"]) return ReadObject(resources) - async def create_collection( - self, - creator: Account, + def create_collection_payload( description: str, max_supply: int, name: str, @@ -356,7 +354,7 @@ async def create_collection( tokens_freezable_by_creator: bool, royalty_numerator: int, royalty_denominator: int, - ) -> str: + ) -> TransactionPayload: transaction_arguments = [ TransactionArgument(description, Serializer.str), TransactionArgument(max_supply, Serializer.u64), @@ -382,20 +380,56 @@ async def create_collection( transaction_arguments, ) + return TransactionPayload(payload) + + async def create_collection( + self, + creator: Account, + description: str, + max_supply: int, + name: str, + uri: str, + mutable_description: bool, + mutable_royalty: bool, + mutable_uri: bool, + mutable_token_description: bool, + mutable_token_name: bool, + mutable_token_properties: bool, + mutable_token_uri: bool, + tokens_burnable_by_creator: bool, + tokens_freezable_by_creator: bool, + royalty_numerator: int, + royalty_denominator: int, + ) -> str: + payload = create_collection_payload( + description, + max_supply, + name, + uri, + mutable_description, + mutable_royalty, + mutable_uri, + mutable_token_description, + mutable_token_name, + mutable_token_properties, + mutable_token_uri, + tokens_burnable_by_creator, + tokens_freezable_by_creator, + royalty_numerator, + royalty_denominator, + ) signed_transaction = await self.client.create_bcs_signed_transaction( - creator, TransactionPayload(payload) + creator, payload ) return await self.client.submit_bcs_transaction(signed_transaction) - async def mint_token( - self, - creator: Account, + def mint_token_payload( collection: str, description: str, name: str, uri: str, properties: PropertyMap, - ) -> str: + ) -> TransactionPayload: (property_names, property_types, property_values) = properties.to_tuple() transaction_arguments = [ TransactionArgument(collection, Serializer.str), @@ -420,8 +454,20 @@ async def mint_token( transaction_arguments, ) + return TransactionPayload(payload) + + async def mint_token( + self, + creator: Account, + collection: str, + description: str, + name: str, + uri: str, + properties: PropertyMap, + ) -> str: + payload = mint_token_payload(collection, description, name, uri, properties) signed_transaction = await self.client.create_bcs_signed_transaction( - creator, TransactionPayload(payload) + creator, payload ) return await self.client.submit_bcs_transaction(signed_transaction) diff --git a/ecosystem/python/sdk/aptos_sdk/async_client.py b/ecosystem/python/sdk/aptos_sdk/async_client.py index 3e1c377a96e99..466dee13747e7 100644 --- a/ecosystem/python/sdk/aptos_sdk/async_client.py +++ b/ecosystem/python/sdk/aptos_sdk/async_client.py @@ -1,6 +1,8 @@ # Copyright © Aptos Foundation # SPDX-License-Identifier: Apache-2.0 +import asyncio +import logging import time from typing import Any, Dict, List, Optional @@ -11,6 +13,7 @@ from .account_address import AccountAddress from .authenticator import Authenticator, Ed25519Authenticator, MultiAgentAuthenticator from .bcs import Serializer +from .metadata import Metadata from .transactions import ( EntryFunction, MultiAgentRawTransaction, @@ -19,7 +22,6 @@ TransactionArgument, TransactionPayload, ) -from .type_tag import StructTag, TypeTag U64_MAX = 18446744073709551615 @@ -49,8 +51,13 @@ def __init__(self, base_url: str, client_config: ClientConfig = ClientConfig()): # Default timeouts but do not set a pool timeout, since the idea is that jobs will wait as # long as progress is being made. timeout = httpx.Timeout(60.0, pool=None) + # Default headers + headers = {Metadata.APTOS_HEADER: Metadata.get_aptos_header_val()} self.client = httpx.AsyncClient( - http2=client_config.http2, limits=limits, timeout=timeout + http2=client_config.http2, + limits=limits, + timeout=timeout, + headers=headers, ) self.client_config = client_config self._chain_id = None @@ -92,7 +99,7 @@ async def account_balance( "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", ledger_version, ) - return resource["data"]["coin"]["value"] + return int(resource["data"]["coin"]["value"]) async def account_sequence_number( self, account_address: AccountAddress, ledger_version: int = None @@ -137,6 +144,10 @@ async def account_resources( raise ApiError(f"{response.text} - {account_address}", response.status_code) return response.json() + async def current_timestamp(self) -> float: + info = await self.info() + return float(info["ledger_timestamp"]) / 1_000_000 + async def get_table_item( self, handle: str, @@ -320,7 +331,7 @@ async def wait_for_transaction(self, txn_hash: str) -> None: assert ( count < self.client_config.transaction_wait_in_seconds ), f"transaction {txn_hash} timed out" - time.sleep(1) + await asyncio.sleep(1) count += 1 response = await self.client.get( f"{self.base_url}/transactions/by_hash/{txn_hash}" @@ -329,6 +340,20 @@ async def wait_for_transaction(self, txn_hash: str) -> None: "success" in response.json() and response.json()["success"] ), f"{response.text} - {txn_hash}" + async def account_transaction_sequence_number_status( + self, address: AccountAddress, sequence_number: int + ) -> bool: + """Retrieve the state of a transaction by account and sequence number.""" + + response = await self.client.get( + f"{self.base_url}/accounts/{address}/transactions?limit=1&start={sequence_number}" + ) + if response.status_code >= 400: + logging.info(f"k {response}") + raise ApiError(response.text, response.status_code) + data = response.json() + return len(data) == 1 and data[0]["type"] != "pending_transaction" + # # Transaction helpers # @@ -374,11 +399,19 @@ async def create_multi_agent_bcs_transaction( return SignedTransaction(raw_transaction.inner(), authenticator) async def create_bcs_transaction( - self, sender: Account, payload: TransactionPayload + self, + sender: Account, + payload: TransactionPayload, + sequence_number: Optional[int] = None, ) -> RawTransaction: + sequence_number = ( + sequence_number + if sequence_number is not None + else await self.account_sequence_number(sender.address()) + ) return RawTransaction( sender.address(), - await self.account_sequence_number(sender.address()), + sequence_number, payload, self.client_config.max_gas_amount, self.client_config.gas_unit_price, @@ -387,9 +420,14 @@ async def create_bcs_transaction( ) async def create_bcs_signed_transaction( - self, sender: Account, payload: TransactionPayload + self, + sender: Account, + payload: TransactionPayload, + sequence_number: Optional[int] = None, ) -> SignedTransaction: - raw_transaction = await self.create_bcs_transaction(sender, payload) + raw_transaction = await self.create_bcs_transaction( + sender, payload, sequence_number + ) signature = sender.sign(raw_transaction.keyed()) authenticator = Authenticator( Ed25519Authenticator(sender.public_key(), signature) @@ -408,8 +446,8 @@ async def transfer( payload = { "type": "entry_function_payload", - "function": "0x1::coin::transfer", - "type_arguments": ["0x1::aptos_coin::AptosCoin"], + "function": "0x1::aptos_account::transfer", + "type_arguments": [], "arguments": [ f"{recipient}", str(amount), @@ -419,7 +457,11 @@ async def transfer( # :!:>bcs_transfer async def bcs_transfer( - self, sender: Account, recipient: AccountAddress, amount: int + self, + sender: Account, + recipient: AccountAddress, + amount: int, + sequence_number: Optional[int] = None, ) -> str: transaction_arguments = [ TransactionArgument(recipient, Serializer.struct), @@ -427,14 +469,14 @@ async def bcs_transfer( ] payload = EntryFunction.natural( - "0x1::coin", + "0x1::aptos_account", "transfer", - [TypeTag(StructTag.from_str("0x1::aptos_coin::AptosCoin"))], + [], transaction_arguments, ) signed_transaction = await self.create_bcs_signed_transaction( - sender, TransactionPayload(payload) + sender, TransactionPayload(payload), sequence_number=sequence_number ) return await self.submit_bcs_transaction(signed_transaction) diff --git a/ecosystem/python/sdk/aptos_sdk/client.py b/ecosystem/python/sdk/aptos_sdk/client.py index fe92db6ee1a8e..66ff43ebe3128 100644 --- a/ecosystem/python/sdk/aptos_sdk/client.py +++ b/ecosystem/python/sdk/aptos_sdk/client.py @@ -11,6 +11,7 @@ from .account_address import AccountAddress from .authenticator import Authenticator, Ed25519Authenticator, MultiAgentAuthenticator from .bcs import Serializer +from .metadata import Metadata from .transactions import ( EntryFunction, MultiAgentRawTransaction, @@ -44,6 +45,7 @@ class RestClient: def __init__(self, base_url: str, client_config: ClientConfig = ClientConfig()): self.base_url = base_url self.client = httpx.Client() + self.client.headers[Metadata.APTOS_HEADER] = Metadata.get_aptos_header_val() self.client_config = client_config self.chain_id = int(self.info()["chain_id"]) diff --git a/ecosystem/python/sdk/aptos_sdk/metadata.py b/ecosystem/python/sdk/aptos_sdk/metadata.py new file mode 100644 index 0000000000000..144cd198aae46 --- /dev/null +++ b/ecosystem/python/sdk/aptos_sdk/metadata.py @@ -0,0 +1,13 @@ +import importlib.metadata as metadata + +# constants +PACKAGE_NAME = "aptos-sdk" + + +class Metadata: + APTOS_HEADER = "x-aptos-client" + + @staticmethod + def get_aptos_header_val(): + version = metadata.version(PACKAGE_NAME) + return f"aptos-python-sdk/{version}" diff --git a/ecosystem/python/sdk/aptos_sdk/transaction_worker.py b/ecosystem/python/sdk/aptos_sdk/transaction_worker.py new file mode 100644 index 0000000000000..22ec9ca87ac72 --- /dev/null +++ b/ecosystem/python/sdk/aptos_sdk/transaction_worker.py @@ -0,0 +1,226 @@ +# Copyright © Aptos Foundation +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import logging +import typing + +from aptos_sdk.account import Account +from aptos_sdk.account_address import AccountAddress +from aptos_sdk.account_sequence_number import AccountSequenceNumber +from aptos_sdk.async_client import RestClient +from aptos_sdk.transactions import SignedTransaction, TransactionPayload + + +class TransactionWorker: + """ + The TransactionWorker provides a simple framework for receiving payloads to be processed. It + acquires new sequence numbers and calls into the callback to produce a signed transaction, and + then submits the transaction. In another task, it waits for resolution of the submission + process or get pre-execution validation error. + + Note: This is not a particularly robust solution, as it lacks any framework to handle failed + transactions with functionality like retries or checking whether the framework is online. + This is the responsibility of a higher-level framework. + """ + + _account: Account + _account_sequence_number: AccountSequenceNumber + _rest_client: RestClient + _transaction_generator: typing.Callable[ + [Account, int], typing.Awaitable[SignedTransaction] + ] + _started: bool + _stopped: bool + _outstanding_transactions: asyncio.Queue + _outstanding_transactions_task: typing.Optional[asyncio.Task] + _processed_transactions: asyncio.Queue + _process_transactions_task: typing.Optional[asyncio.Task] + + def __init__( + self, + account: Account, + rest_client: RestClient, + transaction_generator: typing.Callable[ + [Account, int], typing.Awaitable[SignedTransaction] + ], + ): + self._account = account + self._account_sequence_number = AccountSequenceNumber( + rest_client, account.address() + ) + self._rest_client = rest_client + self._transaction_generator = transaction_generator + + self._started = False + self._stopped = False + self._outstanding_transactions = asyncio.Queue() + self._processed_transactions = asyncio.Queue() + + def address(self) -> AccountAddress: + return self._account.address() + + async def _submit_transactions_task(self): + try: + while True: + sequence_number = ( + await self._account_sequence_number.next_sequence_number() + ) + transaction = await self._transaction_generator( + self._account, sequence_number + ) + txn_hash_awaitable = self._rest_client.submit_bcs_transaction( + transaction + ) + await self._outstanding_transactions.put( + (txn_hash_awaitable, sequence_number) + ) + except asyncio.CancelledError: + return + except Exception as e: + # This is insufficient, if we hit this we either need to bail or resolve the potential errors + logging.error(e, exc_info=True) + + async def _process_transactions_task(self): + try: + while True: + # Always start waiting for one, that way we can acquire a batch in the loop below. + ( + txn_hash_awaitable, + sequence_number, + ) = await self._outstanding_transactions.get() + awaitables = [txn_hash_awaitable] + sequence_numbers = [sequence_number] + + # Now acquire our batch. + while not self._outstanding_transactions.empty(): + ( + txn_hash_awaitable, + sequence_number, + ) = await self._outstanding_transactions.get() + awaitables.append(txn_hash_awaitable) + sequence_numbers.append(sequence_number) + + outputs = await asyncio.gather(*awaitables, return_exceptions=True) + + for (output, sequence_number) in zip(outputs, sequence_numbers): + if isinstance(output, BaseException): + await self._processed_transactions.put( + (sequence_number, None, output) + ) + else: + await self._processed_transactions.put( + (sequence_number, output, None) + ) + except asyncio.CancelledError: + return + except Exception as e: + # This is insufficient, if we hit this we either need to bail or resolve the potential errors + logging.error(e, exc_info=True) + + async def next_processed_transaction( + self, + ) -> (int, typing.Optional[str], typing.Optional[Exception]): + return await self._processed_transactions.get() + + def stop(self): + """Stop the tasks for managing transactions""" + if not self._started: + raise Exception("Start not yet called") + if self._stopped: + raise Exception("Already stopped") + self._stopped = True + + self._submit_transactions_task.cancel() + self._process_transactions_task.cancel() + + def start(self): + """Begin the tasks for managing transactions""" + if self._started: + raise Exception("Already started") + self._started = True + + self._submit_transactions_task = asyncio.create_task( + self._submit_transactions_task() + ) + self._process_transactions_task = asyncio.create_task( + self._process_transactions_task() + ) + + +class TransactionQueue: + """Provides a queue model for pushing transactions into the TransactionWorker.""" + + _client: RestClient + _outstanding_transactions: asyncio.Queue + + def __init__(self, client: RestClient): + self._client = client + self._outstanding_transactions = asyncio.Queue() + + async def push(self, payload: TransactionPayload): + await self._outstanding_transactions.put(payload) + + async def next(self, sender: Account, sequence_number: int) -> SignedTransaction: + payload = await self._outstanding_transactions.get() + return await self._client.create_bcs_signed_transaction( + sender, payload, sequence_number=sequence_number + ) + + +import unittest +import unittest.mock + +from aptos_sdk.bcs import Serializer +from aptos_sdk.transactions import EntryFunction, TransactionArgument + + +class Test(unittest.IsolatedAsyncioTestCase): + async def test_common_path(self): + transaction_arguments = [ + TransactionArgument(AccountAddress.from_hex("b0b"), Serializer.struct), + TransactionArgument(100, Serializer.u64), + ] + payload = EntryFunction.natural( + "0x1::aptos_accounts", + "transfer", + [], + transaction_arguments, + ) + + seq_num_patcher = unittest.mock.patch( + "aptos_sdk.async_client.RestClient.account_sequence_number", return_value=0 + ) + seq_num_patcher.start() + submit_txn_patcher = unittest.mock.patch( + "aptos_sdk.async_client.RestClient.submit_bcs_transaction", + return_value="0xff", + ) + submit_txn_patcher.start() + + rest_client = RestClient("https://fullnode.devnet.aptoslabs.com/v1") + txn_queue = TransactionQueue(rest_client) + txn_worker = TransactionWorker(Account.generate(), rest_client, txn_queue.next) + txn_worker.start() + + await txn_queue.push(payload) + processed_txn = await txn_worker.next_processed_transaction() + self.assertEqual(processed_txn[0], 0) + self.assertEqual(processed_txn[1], "0xff") + self.assertEqual(processed_txn[2], None) + + submit_txn_patcher.stop() + exception = Exception("Power overwhelming") + submit_txn_patcher = unittest.mock.patch( + "aptos_sdk.async_client.RestClient.submit_bcs_transaction", + side_effect=exception, + ) + submit_txn_patcher.start() + + await txn_queue.push(payload) + processed_txn = await txn_worker.next_processed_transaction() + self.assertEqual(processed_txn[0], 1) + self.assertEqual(processed_txn[1], None) + self.assertEqual(processed_txn[2], exception) + + txn_worker.stop() diff --git a/ecosystem/python/sdk/examples/transaction-batching.py b/ecosystem/python/sdk/examples/transaction-batching.py index 4e5d02cfb9fbc..1bb671b5abc5c 100644 --- a/ecosystem/python/sdk/examples/transaction-batching.py +++ b/ecosystem/python/sdk/examples/transaction-batching.py @@ -1,231 +1,431 @@ # Copyright © Aptos Foundation # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + import asyncio import logging import time +from multiprocessing import Pipe, Process +from multiprocessing.connection import Connection +from typing import Any, List from aptos_sdk.account import Account from aptos_sdk.account_address import AccountAddress +from aptos_sdk.account_sequence_number import AccountSequenceNumber +from aptos_sdk.aptos_token_client import AptosTokenClient, Property, PropertyMap from aptos_sdk.async_client import ClientConfig, FaucetClient, RestClient -from aptos_sdk.authenticator import Authenticator, Ed25519Authenticator from aptos_sdk.bcs import Serializer +from aptos_sdk.transaction_worker import TransactionWorker from aptos_sdk.transactions import ( EntryFunction, - RawTransaction, SignedTransaction, TransactionArgument, TransactionPayload, ) -from aptos_sdk.type_tag import StructTag, TypeTag from .common import FAUCET_URL, NODE_URL +class TransactionGenerator: + """ + Demonstrate how one might make a harness for submitting transactions. This class just keeps + submitting the same transaction payload. In practice, this could be a queue, where new payloads + accumulate and are consumed by the call to next_transaction. + + Todo: add tracking of transaction status to this and come up with some general logic to retry + or exit upon failure. + """ + + _client: RestClient + _recipient: AccountAddress + _offset: int + _remaining_transactions: int + _waiting_for_more = asyncio.Event + _complete = asyncio.Event + _lock = asyncio.Lock + + def __init__(self, client: RestClient, recipient: AccountAddress): + self._client = client + self._recipient = recipient + self._waiting_for_more = asyncio.Event() + self._waiting_for_more.clear() + self._complete = asyncio.Event() + self._complete.set() + self._lock = asyncio.Lock() + self._remaining_transactions = 0 + + async def next_transaction( + self, sender: Account, sequence_number: int + ) -> SignedTransaction: + while self._remaining_transactions == 0: + await self._waiting_for_more.wait() + + async with self._lock: + self._remaining_transactions -= 1 + if self._remaining_transactions == 0: + self._waiting_for_more.clear() + self._complete.set() + + return await transfer_transaction( + self._client, sender, sequence_number, self._recipient, 0 + ) + + async def increase_transaction_count(self, number: int): + if number <= 0: + return + + async with self._lock: + self._remaining_transactions += number + self._waiting_for_more.set() + self._complete.clear() + + async def wait(self): + await self._complete.wait() + + +class WorkerContainer: + _conn: Connection + _process: Process + + def __init__(self, node_url: str, account: Account, recipient: AccountAddress): + (self._conn, conn) = Pipe() + self._process = Process( + target=Worker.run, args=(conn, node_url, account, recipient) + ) + + def get(self) -> Any: + self._conn.recv() + + def join(self): + self._process.join() + + def put(self, value: Any): + self._conn.send(value) + + def start(self): + self._process.start() + + +class Worker: + _conn: Connection + _rest_client: RestClient + _account: Account + _recipient: AccountAddress + _txn_generator: TransactionGenerator + _txn_worker: TransactionWorker + + def __init__( + self, + conn: Connection, + node_url: str, + account: Account, + recipient: AccountAddress, + ): + self._conn = conn + self._rest_client = RestClient(node_url) + self._account = account + self._recipient = recipient + self._txn_generator = TransactionGenerator(self._rest_client, self._recipient) + self._txn_worker = TransactionWorker( + self._account, self._rest_client, self._txn_generator.next_transaction + ) + + def run(queue: Pipe, node_url: str, account: Account, recipient: AccountAddress): + worker = Worker(queue, node_url, account, recipient) + asyncio.run(worker.async_run()) + + async def async_run(self): + try: + self._txn_worker.start() + + self._conn.send(True) + num_txns = self._conn.recv() + + await self._txn_generator.increase_transaction_count(num_txns) + + logging.info(f"Increase txns from {self._account.address()}") + self._conn.send(True) + self._conn.recv() + + txn_hashes = [] + while num_txns != 0: + if num_txns % 100 == 0: + logging.info( + f"{self._txn_worker.address()} remaining transactions {num_txns}" + ) + num_txns -= 1 + ( + sequence_number, + txn_hash, + exception, + ) = await self._txn_worker.next_processed_transaction() + if exception: + logging.error( + f"Account {self._txn_worker.address()}, transaction {sequence_number} submission failed.", + exc_info=exception, + ) + else: + txn_hashes.append(txn_hash) + + logging.info(f"Submitted txns from {self._account.address()}") + self._conn.send(True) + self._conn.recv() + + for txn_hash in txn_hashes: + await self._rest_client.wait_for_transaction(txn_hash) + + await self._rest_client.close() + logging.info(f"Verified txns from {self._account.address()}") + self._conn.send(True) + except Exception as e: + logging.error( + "Failed during run.", + exc_info=e, + ) + + +# This performs a simple p2p transaction +async def transfer_transaction( + client: RestClient, + sender: Account, + sequence_number: int, + recipient: AccountAddress, + amount: int, +) -> str: + transaction_arguments = [ + TransactionArgument(recipient, Serializer.struct), + TransactionArgument(amount, Serializer.u64), + ] + payload = EntryFunction.natural( + "0x1::aptos_account", + "transfer", + [], + transaction_arguments, + ) + + return await client.create_bcs_signed_transaction( + sender, TransactionPayload(payload), sequence_number + ) + + +# This will create a collection in the first transaction and then create NFTs thereafter. +# Note: Please adjust the sequence number and the name of the collection if run on the same set of +# accounts, otherwise you may end up not creating a collection and failing all transactions. +async def token_transaction( + client: RestClient, + sender: Account, + sequence_number: int, + recipient: AccountAddress, + amount: int, +) -> str: + collection_name = "Funky Alice's" + if sequence_number == 8351: + payload = AptosTokenClient.create_collection_payload( + "Alice's simple collection", + 20000000000, + collection_name, + "https://aptos.dev", + True, + True, + True, + True, + True, + True, + True, + True, + True, + 0, + 1, + ) + else: + payload = AptosTokenClient.mint_token_payload( + collection_name, + "Alice's simple token", + f"token {sequence_number}", + "https://aptos.dev/img/nyan.jpeg", + PropertyMap([Property.string("string", "string value")]), + ) + return await client.create_bcs_signed_transaction(sender, payload, sequence_number) + + +class Accounts: + source: Account + senders: List[Account] + receivers: List[Account] + + def __init__(self, source, senders, receivers): + self.source = source + self.senders = senders + self.receivers = receivers + + def generate(path: str, num_accounts: int) -> Accounts: + source = Account.generate() + source.store(f"{path}/source.txt") + senders = [] + receivers = [] + for idx in range(num_accounts): + senders.append(Account.generate()) + receivers.append(Account.generate()) + senders[-1].store(f"{path}/sender_{idx}.txt") + receivers[-1].store(f"{path}/receiver_{idx}.txt") + return Accounts(source, senders, receivers) + + def load(path: str, num_accounts: int) -> Accounts: + source = Account.load(f"{path}/source.txt") + senders = [] + receivers = [] + for idx in range(num_accounts): + senders.append(Account.load(f"{path}/sender_{idx}.txt")) + receivers.append(Account.load(f"{path}/receiver_{idx}.txt")) + return Accounts(source, senders, receivers) + + +async def fund_from_faucet(rest_client: RestClient, source: Account): + faucet_client = FaucetClient(FAUCET_URL, rest_client) + + fund_txns = [] + for _ in range(40): + fund_txns.append(faucet_client.fund_account(source.address(), 100_000_000_000)) + await asyncio.gather(*fund_txns) + + +async def distribute_portionally( + rest_client: RestClient, + source: Account, + senders: List[Account], + receivers: List[Account], +): + balance = int(await rest_client.account_balance(source.address())) + per_node_balance = balance // (len(senders) + 1) + await distribute(rest_client, source, senders, receivers, per_node_balance) + + +async def distribute( + rest_client: RestClient, + source: Account, + senders: List[Account], + receivers: List[Account], + per_node_amount: int, +): + all_accounts = list(map(lambda account: (account.address(), True), senders)) + all_accounts.extend(map(lambda account: (account.address(), False), receivers)) + + account_sequence_number = AccountSequenceNumber(rest_client, source.address()) + + txns = [] + txn_hashes = [] + + for (account, fund) in all_accounts: + sequence_number = await account_sequence_number.next_sequence_number( + block=False + ) + if sequence_number is None: + txn_hashes.extend(await asyncio.gather(*txns)) + txns = [] + sequence_number = await account_sequence_number.next_sequence_number() + amount = per_node_amount if fund else 0 + txn = await transfer_transaction( + rest_client, source, sequence_number, account, amount + ) + txns.append(rest_client.submit_bcs_transaction(txn)) + + txn_hashes.extend(await asyncio.gather(*txns)) + for txn_hash in txn_hashes: + await rest_client.wait_for_transaction(txn_hash) + await account_sequence_number.synchronize() + + async def main(): client_config = ClientConfig() - # Toggle to benchmark - client_config.http2 = False client_config.http2 = True rest_client = RestClient(NODE_URL, client_config) - faucet_client = FaucetClient(FAUCET_URL, rest_client) - num_accounts = 5 - read_amplification = 1000 - first_pass = 100 + num_accounts = 64 + transactions = 100000 start = time.time() + logging.getLogger().setLevel(20) + print("Starting...") - accounts = [] - recipient_accounts = [] - for _ in range(num_accounts): - accounts.append(Account.generate()) - recipient_accounts.append(Account.generate()) + # Generate will create new accounts, load will load existing accounts + all_accounts = Accounts.generate("nodes", num_accounts) + # all_accounts = Accounts.load("nodes", num_accounts) + accounts = all_accounts.senders + receivers = all_accounts.receivers + source = all_accounts.source + + print(f"source: {source.address()}") last = time.time() - print(f"Accounts generated at {last - start}") + print(f"Accounts generated / loaded at {last - start}") - funds = [] - for account in accounts: - funds.append(faucet_client.fund_account(account.address(), 100_000_000)) - for account in recipient_accounts: - funds.append(faucet_client.fund_account(account.address(), 0)) - await asyncio.gather(*funds) + await fund_from_faucet(rest_client, source) - print(f"Funded accounts at {time.time() - start} {time.time() - last}") + print(f"Initial account funded at {time.time() - start} {time.time() - last}") last = time.time() - balances = [] - for _ in range(read_amplification): - for account in accounts: - balances.append(rest_client.account_balance(account.address())) - await asyncio.gather(*balances) + balance = await rest_client.account_balance(source.address()) + amount = int(balance * 0.9 / num_accounts) + await distribute(rest_client, source, accounts, receivers, amount) - print(f"Accounts checked at {time.time() - start} {time.time() - last}") + print(f"Funded all accounts at {time.time() - start} {time.time() - last}") last = time.time() - account_sequence_numbers = [] - await_account_sequence_numbers = [] + balances = [] for account in accounts: - account_sequence_number = AccountSequenceNumber(rest_client, account.address()) - await_account_sequence_numbers.append(account_sequence_number.initialize()) - account_sequence_numbers.append(account_sequence_number) - await asyncio.gather(*await_account_sequence_numbers) + balances.append(rest_client.account_balance(account.address())) + await asyncio.gather(*balances) - print(f"Accounts initialized at {time.time() - start} {time.time() - last}") + print(f"Accounts checked at {time.time() - start} {time.time() - last}") last = time.time() - txn_hashes = [] - for _ in range(first_pass): - for idx in range(num_accounts): - sender = accounts[idx] - recipient = recipient_accounts[idx].address() - sequence_number = await account_sequence_numbers[idx].next_sequence_number() - txn_hash = transfer(rest_client, sender, recipient, sequence_number, 1) - txn_hashes.append(txn_hash) - txn_hashes = await asyncio.gather(*txn_hashes) + workers = [] + for (account, recipient) in zip(accounts, receivers): + workers.append(WorkerContainer(NODE_URL, account, recipient.address())) + workers[-1].start() - print(f"Transactions submitted at {time.time() - start} {time.time() - last}") - last = time.time() + for worker in workers: + worker.get() - wait_for = [] - for txn_hash in txn_hashes: - wait_for.append(account_sequence_number.synchronize()) - await asyncio.gather(*wait_for) - - print(f"Transactions committed at {time.time() - start} {time.time() - last}") + print(f"Workers started at {time.time() - start} {time.time() - last}") last = time.time() - await rest_client.close() - + to_take = (transactions // num_accounts) + ( + 1 if transactions % num_accounts != 0 else 0 + ) + remaining_transactions = transactions + for worker in workers: + taking = min(to_take, remaining_transactions) + remaining_transactions -= taking + worker.put(taking) -class AccountSequenceNumber: - """ - A managed wrapper around sequence numbers that implements the trivial flow control used by the - Aptos faucet: - * Submit up to 50 transactions per account in parallel with a timeout of 20 seconds - * If local assumes 50 are in flight, determine the actual committed state from the network - * If there are less than 50 due to some being committed, adjust the window - * If 50 are in flight Wait .1 seconds before re-evaluating - * If ever waiting more than 30 seconds restart the sequence number to the current on-chain state - - Assumptions: - * Accounts are expected to be managed by a single AccountSequenceNumber and not used otherwise. - * They are initialized to the current on-chain state, so if there are already transactions in flight, they make take some time to reset. - * Accounts are automatically initialized if not explicitly - * - """ + for worker in workers: + worker.get() - client: RestClient - account: AccountAddress - last_committed_number: int - current_number: int - maximum_in_flight: int = 50 - lock = asyncio.Lock - sleep_time = 0.01 - maximum_wait_time = 30 - - def __init__(self, client: RestClient, account: AccountAddress): - self.client = client - self.account = account - self.last_uncommitted_number = None - self.current_number = None - self.lock = asyncio.Lock() - - async def next_sequence_number(self) -> int: - await self.lock.acquire() - try: - if self.last_uncommitted_number is None or self.current_number is None: - await self.initialize() - - if ( - self.current_number - self.last_uncommitted_number - >= self.maximum_in_flight - ): - await self.__update() - - start_time = time.time() - while ( - self.last_uncommitted_number - self.current_number - >= self.maximum_in_flight - ): - asyncio.sleep(self.sleep_time) - if time.time() - start_time > self.maximum_wait_time: - logging.warn( - f"Waited over 30 seconds for a transaction to commit, resyncing {self.account.address()}" - ) - await self.__initialize() - else: - await self.__update() - - next_number = self.current_number - self.current_number += 1 - finally: - self.lock.release() - - return next_number - - async def initialize(self): - self.current_number = await self.__current_sequence_number() - self.last_uncommitted_number = self.current_number - - async def synchronize(self): - if self.last_uncommitted_number == self.current_number: - return + print(f"Transactions submitted at {time.time() - start} {time.time() - last}") + last = time.time() - await self.__update() - start_time = time.time() - while self.last_uncommitted_number != self.current_number: - if time.time() - start_time > self.maximum_wait_time: - logging.warn( - f"Waited over 30 seconds for a transaction to commit, resyncing {self.account.address()}" - ) - await self.__initialize() - else: - await asyncio.sleep(self.sleep_time) - await self.__update() + for worker in workers: + worker.put(True) - async def __update(self): - self.last_uncommitted_number = await self.__current_sequence_number() - return self.last_uncommitted_number + for worker in workers: + worker.get() - async def __current_sequence_number(self) -> int: - return await self.client.account_sequence_number(self.account) + print(f"Transactions processed at {time.time() - start} {time.time() - last}") + last = time.time() + for worker in workers: + worker.put(True) -async def transfer( - client: RestClient, - sender: Account, - recipient: AccountAddress, - sequence_number: int, - amount: int, -): - transaction_arguments = [ - TransactionArgument(recipient, Serializer.struct), - TransactionArgument(amount, Serializer.u64), - ] - payload = EntryFunction.natural( - "0x1::coin", - "transfer", - [TypeTag(StructTag.from_str("0x1::aptos_coin::AptosCoin"))], - transaction_arguments, - ) + for worker in workers: + worker.get() - raw_transaction = RawTransaction( - sender.address(), - sequence_number, - TransactionPayload(payload), - client.client_config.max_gas_amount, - client.client_config.gas_unit_price, - int(time.time()) + client.client_config.expiration_ttl, - await client.chain_id(), - ) + print(f"Transactions verified at {time.time() - start} {time.time() - last}") + last = time.time() - signature = sender.sign(raw_transaction.keyed()) - authenticator = Authenticator(Ed25519Authenticator(sender.public_key(), signature)) - signed_transaction = SignedTransaction(raw_transaction, authenticator) - return await client.submit_bcs_transaction(signed_transaction) + await rest_client.close() if __name__ == "__main__": diff --git a/ecosystem/python/sdk/poetry.lock b/ecosystem/python/sdk/poetry.lock index 5ab076e9b46fa..e5708e47e2648 100644 --- a/ecosystem/python/sdk/poetry.lock +++ b/ecosystem/python/sdk/poetry.lock @@ -1,32 +1,31 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "anyio" -version = "3.6.2" +version = "3.7.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "main" optional = false -python-versions = ">=3.6.2" +python-versions = ">=3.7" files = [ - {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"}, - {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"}, + {file = "anyio-3.7.0-py3-none-any.whl", hash = "sha256:eddca883c4175f14df8aedce21054bfca3adb70ffe76a9f607aef9d7fa2ea7f0"}, + {file = "anyio-3.7.0.tar.gz", hash = "sha256:275d9973793619a5374e1c89a4f4ad3f4b0a5510a2b5b939444bee8f4c4d37ce"}, ] [package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"] -trio = ["trio (>=0.16,<0.22)"] +doc = ["Sphinx (>=6.1.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme", "sphinxcontrib-jquery"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (<0.22)"] [[package]] name = "autoflake" version = "1.4" description = "Removes unused imports and unused variables" -category = "dev" optional = false python-versions = "*" files = [ @@ -40,7 +39,6 @@ pyflakes = ">=1.1.0" name = "black" version = "22.12.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -75,21 +73,19 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2022.12.7" +version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, + {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, + {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, ] [[package]] name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" files = [ @@ -166,7 +162,6 @@ pycparser = "*" name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -182,7 +177,6 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -190,11 +184,96 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "coverage" +version = "7.2.7" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"}, + {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495"}, + {file = "coverage-7.2.7-cp310-cp310-win32.whl", hash = "sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818"}, + {file = "coverage-7.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a"}, + {file = "coverage-7.2.7-cp311-cp311-win32.whl", hash = "sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a"}, + {file = "coverage-7.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562"}, + {file = "coverage-7.2.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d"}, + {file = "coverage-7.2.7-cp312-cp312-win32.whl", hash = "sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511"}, + {file = "coverage-7.2.7-cp312-cp312-win_amd64.whl", hash = "sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3"}, + {file = "coverage-7.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02"}, + {file = "coverage-7.2.7-cp37-cp37m-win32.whl", hash = "sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f"}, + {file = "coverage-7.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f"}, + {file = "coverage-7.2.7-cp38-cp38-win32.whl", hash = "sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e"}, + {file = "coverage-7.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2"}, + {file = "coverage-7.2.7-cp39-cp39-win32.whl", hash = "sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb"}, + {file = "coverage-7.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27"}, + {file = "coverage-7.2.7-pp37.pp38.pp39-none-any.whl", hash = "sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d"}, + {file = "coverage-7.2.7.tar.gz", hash = "sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59"}, +] + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] + +[package.extras] +test = ["pytest (>=6)"] + [[package]] name = "flake8" version = "5.0.4" description = "the modular source code checker: pep8 pyflakes and co" -category = "dev" optional = false python-versions = ">=3.6.1" files = [ @@ -212,7 +291,6 @@ pyflakes = ">=2.5.0,<2.6.0" name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -227,7 +305,6 @@ typing-extensions = {version = "*", markers = "python_version < \"3.8\""} name = "h2" version = "4.1.0" description = "HTTP/2 State-Machine based protocol implementation" -category = "main" optional = false python-versions = ">=3.6.1" files = [ @@ -243,7 +320,6 @@ hyperframe = ">=6.0,<7" name = "hpack" version = "4.0.0" description = "Pure-Python HPACK header compression" -category = "main" optional = false python-versions = ">=3.6.1" files = [ @@ -255,7 +331,6 @@ files = [ name = "httpcore" version = "0.16.3" description = "A minimal low-level HTTP client." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -267,17 +342,16 @@ files = [ anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = ">=1.0.0,<2.0.0" +sniffio = "==1.*" [package.extras] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "httpx" version = "0.23.3" description = "The next generation HTTP client." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -293,15 +367,14 @@ sniffio = "*" [package.extras] brotli = ["brotli", "brotlicffi"] -cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<13)"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<13)"] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "hyperframe" version = "6.0.1" description = "HTTP/2 framing layer for Python" -category = "main" optional = false python-versions = ">=3.6.1" files = [ @@ -313,7 +386,6 @@ files = [ name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -325,7 +397,6 @@ files = [ name = "importlib-metadata" version = "4.2.0" description = "Read metadata from Python packages" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -345,7 +416,6 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", name = "isort" version = "5.11.5" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -363,7 +433,6 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -375,7 +444,6 @@ files = [ name = "mypy" version = "0.982" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -420,7 +488,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "0.4.4" description = "Experimental type system extensions for programs checked with the mypy typechecker." -category = "dev" optional = false python-versions = ">=2.7" files = [ @@ -431,7 +498,6 @@ files = [ name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -441,28 +507,26 @@ files = [ [[package]] name = "platformdirs" -version = "3.2.0" +version = "3.5.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "platformdirs-3.2.0-py3-none-any.whl", hash = "sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"}, - {file = "platformdirs-3.2.0.tar.gz", hash = "sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08"}, + {file = "platformdirs-3.5.1-py3-none-any.whl", hash = "sha256:e2378146f1964972c03c085bb5662ae80b2b8c06226c54b2ff4aa9483e8a13a5"}, + {file = "platformdirs-3.5.1.tar.gz", hash = "sha256:412dae91f52a6f84830f39a8078cecd0e866cb72294a5c66808e74d5e88d251f"}, ] [package.dependencies] typing-extensions = {version = ">=4.5", markers = "python_version < \"3.8\""} [package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] +docs = ["furo (>=2023.3.27)", "proselint (>=0.13)", "sphinx (>=6.2.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] [[package]] name = "pycodestyle" version = "2.9.1" description = "Python style guide checker" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -474,7 +538,6 @@ files = [ name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -486,7 +549,6 @@ files = [ name = "pyflakes" version = "2.5.0" description = "passive checker of Python programs" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -498,7 +560,6 @@ files = [ name = "pynacl" version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -525,7 +586,6 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] name = "rfc3986" version = "1.5.0" description = "Validating URI References per RFC 3986" -category = "main" optional = false python-versions = "*" files = [ @@ -543,7 +603,6 @@ idna2008 = ["idna"] name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -555,7 +614,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -567,7 +625,6 @@ files = [ name = "typed-ast" version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -599,21 +656,19 @@ files = [ [[package]] name = "typing-extensions" -version = "4.5.0" +version = "4.6.3" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, - {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, + {file = "typing_extensions-4.6.3-py3-none-any.whl", hash = "sha256:88a4153d8505aabbb4e13aacb7c486c2b4a33ca3b3f807914a9b4c844c471c26"}, + {file = "typing_extensions-4.6.3.tar.gz", hash = "sha256:d91d5919357fe7f681a9f2b5b4cb2a5f1ef0a1e9f59c4d8ff0d3491e05c0ffd5"}, ] [[package]] name = "zipp" version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -628,4 +683,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" python-versions = ">=3.7,<4.0" -content-hash = "b85a2cf5a06aeef9570c7771739aff4ada8fcf0715e940ecc7cf701c7ccfd1e3" +content-hash = "58444a4ad25fc804f24845b567348e41f0a28dfc8197fe25f6e2391f8bdf2c1b" diff --git a/ecosystem/python/sdk/pyproject.toml b/ecosystem/python/sdk/pyproject.toml index fc9f36b6a7daf..40473f3ca53ec 100644 --- a/ecosystem/python/sdk/pyproject.toml +++ b/ecosystem/python/sdk/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aptos-sdk" -version = "0.6.1" +version = "0.6.3" description = "Aptos SDK" authors = ["Aptos Labs "] license = "Apache-2.0" @@ -18,6 +18,7 @@ python = ">=3.7,<4.0" [tool.poetry.dev-dependencies] autoflake = "1.4.0" black = "^22.6.0" +coverage = "^7.2.4" flake8 = ">=3.8.3,<6.0.0" isort = "^5.10.1" mypy = "^0.982" diff --git a/ecosystem/python/sdk/setup.py b/ecosystem/python/sdk/setup.py deleted file mode 100644 index fe9a526f997d2..0000000000000 --- a/ecosystem/python/sdk/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -import setuptools - -with open("README.md", "r", encoding="utf-8") as fh: - long_description = fh.read() - -setuptools.setup( - author="Aptos Labs", - author_email="opensource@aptoslabs.com", - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - ], - include_package_data=True, - install_requires=["httpx", "pynacl"], - long_description=long_description, - long_description_content_type="text/markdown", - name="aptos_sdk", - packages=["aptos_sdk"], - python_requires=">=3.7", - url="https://github.com/aptos-labs/aptos-core", - version="0.6.0", -) diff --git a/ecosystem/typescript/sdk/CHANGELOG.md b/ecosystem/typescript/sdk/CHANGELOG.md index 876b9fc26fbd6..159286c05d29f 100644 --- a/ecosystem/typescript/sdk/CHANGELOG.md +++ b/ecosystem/typescript/sdk/CHANGELOG.md @@ -4,7 +4,15 @@ All notable changes to the Aptos Node SDK will be captured in this file. This ch ## Unreleased +## 1.10.0 (2023-06-07) + - Add `x-aptos-client` header to `IndexerClient` requests +- Add `standardizeAddress` static function to `AccountAddress` class to standardizes an address to the format "0x" followed by 64 lowercase hexadecimal digits. +- Change `indexerUrl` param on `Provider` class to an optional parameter +- Add `getCollectionsWithOwnedTokens` query to fetch all collections that an account has tokens for +- Support `tokenStandard` param in `getOwnedTokens` and `getTokenOwnedFromCollectionAddress` queries +- Add `FungibleAssetClient` plugin to support fungible assets +- Support fungible assets in `CoinClient` class operations ## 1.9.1 (2023-05-24) diff --git a/ecosystem/typescript/sdk/examples/typescript/ambassador.ts b/ecosystem/typescript/sdk/examples/typescript/ambassador.ts new file mode 100644 index 0000000000000..2e5eab2e9581b --- /dev/null +++ b/ecosystem/typescript/sdk/examples/typescript/ambassador.ts @@ -0,0 +1,171 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +import { AptosAccount, HexString, Provider, Network, Types, FaucetClient, BCS } from "aptos"; +import { NODE_URL, FAUCET_URL } from "./common"; + +const provider = new Provider(Network.DEVNET); +const faucetClient = new FaucetClient(NODE_URL, FAUCET_URL); + +async function getTokenAddr(ownerAddr: HexString, tokenName: string): Promise { + const tokenOwnership = await provider.getOwnedTokens(ownerAddr); + for (const ownership of tokenOwnership.current_token_ownerships_v2) { + if (ownership.current_token_data.token_name === tokenName) { + return new HexString(ownership.current_token_data.token_data_id); + } + } + console.log(`Token ${tokenName} not found`); + process.exit(1); +} + +async function waitForEnter() { + return new Promise((resolve, reject) => { + const rl = require("readline").createInterface({ + input: process.stdin, + output: process.stdout, + }); + + rl.question("Please press the Enter key to proceed ...\n", () => { + rl.close(); + resolve(); + }); + }); +} + +class AmbassadorClient { + async setAmbassadorLevel( + creator: AptosAccount, + token: HexString, + new_ambassador_level: BCS.AnyNumber, + ): Promise { + const rawTxn = await provider.generateTransaction(creator.address(), { + function: `${creator.address()}::ambassador::set_ambassador_level`, + type_arguments: [], + arguments: [token.hex(), new_ambassador_level], + }); + + const bcsTxn = await provider.signTransaction(creator, rawTxn); + const pendingTxn = await provider.submitTransaction(bcsTxn); + + return pendingTxn.hash; + } + + async burn(creator: AptosAccount, token: HexString): Promise { + const rawTxn = await provider.generateTransaction(creator.address(), { + function: `${creator.address()}::ambassador::burn`, + type_arguments: [], + arguments: [token.hex()], + }); + + const bcsTxn = await provider.signTransaction(creator, rawTxn); + const pendingTxn = await provider.submitTransaction(bcsTxn); + + return pendingTxn.hash; + } + + async mintAmbassadorToken( + creator: AptosAccount, + description: string, + name: string, + uri: string, + soul_bound_to: HexString, + ): Promise { + const rawTxn = await provider.generateTransaction(creator.address(), { + function: `${creator.address()}::ambassador::mint_ambassador_token`, + type_arguments: [], + arguments: [description, name, uri, soul_bound_to.hex()], + }); + + const bcsTxn = await provider.signTransaction(creator, rawTxn); + const pendingTxn = await provider.submitTransaction(bcsTxn); + + return pendingTxn.hash; + } + + async ambassadorLevel(creator_addr: HexString, token_addr: HexString): Promise { + const payload: Types.ViewRequest = { + function: `${creator_addr.hex()}::ambassador::ambassador_level`, + type_arguments: [], + arguments: [token_addr.hex()], + }; + + const result = await provider.view(payload); + return BigInt(result[0] as any); + } +} + +/** run our demo! */ +async function main(): Promise { + const client = new AmbassadorClient(); + + const admin = new AptosAccount(); + const user = new AptosAccount(); + + await faucetClient.fundAccount(admin.address(), 100_000_000); + await faucetClient.fundAccount(user.address(), 100_000_000); + + console.log( + "\nCompile and publish the Ambassador module (`aptos-core/aptos-move/move-examples/token_objects/ambassador`) using the following profile, and press enter:", + ); + console.log(` ambassador_admin:`); + console.log(` private_key: "${admin.toPrivateKeyObject().privateKeyHex}"`); + console.log(` public_key: "${admin.pubKey()}"`); + console.log(` account: ${admin.address()}`); + console.log(` rest_url: "https://fullnode.devnet.aptoslabs.com"`); + console.log(` faucet_url: "https://faucet.devnet.aptoslabs.com"`); + + await waitForEnter(); + + const adminAddr = admin.address(); + const userAddr = user.address(); + const tokenName = "Aptos Ambassador #1"; + + console.log("\n=== Addresses ==="); + console.log(`Admin: ${adminAddr} `); + console.log(`User: ${userAddr} `); + + // Mint Ambassador Token + let txnHash = await client.mintAmbassadorToken( + admin, + "Aptos Ambassador Token", + tokenName, + "https://raw.githubusercontent.com/aptos-labs/aptos-core/main/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/", + userAddr, + ); + await provider.waitForTransaction(txnHash, { checkSuccess: true }); + console.log("\n=== Ambassador Token Minted ==="); + console.log(`Txn: https://explorer.aptoslabs.com/txn/${txnHash}?network=devnet`); + // Get the address of the minted token + const tokenAddr = await getTokenAddr(userAddr, tokenName); + console.log(`The address of the minted token: ${tokenAddr}`); + console.log(`The level of the token: ${await client.ambassadorLevel(adminAddr, tokenAddr)}`); + await waitForEnter(); + + // Set Ambassador Level to 15 + txnHash = await client.setAmbassadorLevel(admin, tokenAddr, 15); + await provider.waitForTransaction(txnHash, { checkSuccess: true }); + console.log("\n=== Level set to 15 ==="); + console.log(`Txn: https://explorer.aptoslabs.com/txn/${txnHash}?network=devnet`); + console.log(`The level of the token: ${await client.ambassadorLevel(adminAddr, tokenAddr)}`); + await waitForEnter(); + + // Set Ambassador Level to 25 + txnHash = await client.setAmbassadorLevel(admin, tokenAddr, 25); + await provider.waitForTransaction(txnHash, { checkSuccess: true }); + console.log("\n=== Level set to 25 ==="); + console.log(`Txn: https://explorer.aptoslabs.com/txn/${txnHash}?network=devnet`); + console.log(`The level of the token: ${await client.ambassadorLevel(adminAddr, tokenAddr)}`); + await waitForEnter(); + + // Burn the token + txnHash = await client.burn(admin, tokenAddr); + await provider.waitForTransaction(txnHash, { checkSuccess: true }); + console.log("\n=== Token burned ==="); + console.log(`Txn: https://explorer.aptoslabs.com/txn/${txnHash}?network=devnet`); + await waitForEnter(); +} + +main().then(() => { + console.log("Done!"); + process.exit(0); +}); diff --git a/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Bronze b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Bronze new file mode 100644 index 0000000000000..266637b8f7898 --- /dev/null +++ b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Bronze @@ -0,0 +1,6 @@ +{ + "name": "Ambassador #1", + "description": "Aptos Ambassador Token", + "image": "https://raw.githubusercontent.com/aptos-labs/aptos-core/main/aptos-move/move-examples/token_objects/ambassador/metadata/Bronze.png", + "attributes": [] +} \ No newline at end of file diff --git a/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Bronze.png b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Bronze.png new file mode 100644 index 0000000000000..44e6522d2747c Binary files /dev/null and b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Bronze.png differ diff --git a/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Gold b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Gold new file mode 100644 index 0000000000000..7d1fc928aa1ad --- /dev/null +++ b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Gold @@ -0,0 +1,6 @@ +{ + "name": "Ambassador #1", + "description": "Aptos Ambassador Token", + "image": "https://raw.githubusercontent.com/aptos-labs/aptos-core/main/aptos-move/move-examples/token_objects/ambassador/metadata/Gold.png", + "attributes": [] +} \ No newline at end of file diff --git a/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Gold.png b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Gold.png new file mode 100644 index 0000000000000..7b50a69c47254 Binary files /dev/null and b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Gold.png differ diff --git a/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Silver b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Silver new file mode 100644 index 0000000000000..99392527d7060 --- /dev/null +++ b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Silver @@ -0,0 +1,6 @@ +{ + "name": "Ambassador #1", + "description": "Aptos Ambassador Token", + "image": "https://raw.githubusercontent.com/aptos-labs/aptos-core/main/aptos-move/move-examples/token_objects/ambassador/metadata/Silver.png", + "attributes": [] +} \ No newline at end of file diff --git a/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Silver.png b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Silver.png new file mode 100644 index 0000000000000..fa46647a9f048 Binary files /dev/null and b/ecosystem/typescript/sdk/examples/typescript/metadata/ambassador/Silver.png differ diff --git a/ecosystem/typescript/sdk/examples/typescript/package.json b/ecosystem/typescript/sdk/examples/typescript/package.json index 46e700c5de9f8..3f144b572749c 100644 --- a/ecosystem/typescript/sdk/examples/typescript/package.json +++ b/ecosystem/typescript/sdk/examples/typescript/package.json @@ -11,7 +11,8 @@ "transfer_coin": "ts-node transfer_coin.ts", "test": "run-s bcs_transaction multisig_transaction simple_nft transfer_coin", "your_coin": "ts-node your_coin.ts", - "call_aptos_cli": "ts-node call_aptos_cli.ts" + "call_aptos_cli": "ts-node call_aptos_cli.ts", + "ambassador": "ts-node ambassador" }, "keywords": [], "author": "", @@ -29,4 +30,4 @@ "ts-node": "10.9.1", "typescript": "4.8.2" } -} +} \ No newline at end of file diff --git a/ecosystem/typescript/sdk/examples/typescript/pnpm-lock.yaml b/ecosystem/typescript/sdk/examples/typescript/pnpm-lock.yaml index 71fba9cfa3bd9..dded6fd21fb3d 100644 --- a/ecosystem/typescript/sdk/examples/typescript/pnpm-lock.yaml +++ b/ecosystem/typescript/sdk/examples/typescript/pnpm-lock.yaml @@ -3,7 +3,7 @@ lockfileVersion: '6.0' dependencies: aptos: specifier: latest - version: 1.8.4 + version: 1.9.1 dotenv: specifier: 16.0.1 version: 16.0.1 @@ -40,20 +40,20 @@ packages: '@jridgewell/trace-mapping': 0.3.9 dev: true - /@jridgewell/resolve-uri@3.1.0: - resolution: {integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==} + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} engines: {node: '>=6.0.0'} dev: true - /@jridgewell/sourcemap-codec@1.4.14: - resolution: {integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==} + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} dev: true /@jridgewell/trace-mapping@0.3.9: resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} dependencies: - '@jridgewell/resolve-uri': 3.1.0 - '@jridgewell/sourcemap-codec': 1.4.14 + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 dev: true /@noble/hashes@1.1.3: @@ -83,8 +83,8 @@ packages: resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} dev: true - /@tsconfig/node16@1.0.3: - resolution: {integrity: sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==} + /@tsconfig/node16@1.0.4: + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} dev: true /@types/ffi-napi@4.0.7: @@ -129,8 +129,8 @@ packages: color-convert: 1.9.3 dev: true - /aptos@1.8.4: - resolution: {integrity: sha512-LWasWcz8+SMj4nCGQzB8kC0P/b2PRraUSjIQmeQH6jJ4O2WqS4MASzQZdk3vkG+i5O2dgLRgDK2QUZaxHqfydQ==} + /aptos@1.9.1: + resolution: {integrity: sha512-QM15VoQTtkTSppKGtwolAX2ZUZdYD4dRBQYm1oSimUrLI/bYjUejqMqxwZtTVvEw61lJv0l3wGdNIYH0luynqg==} engines: {node: '>=11.0.0'} dependencies: '@noble/hashes': 1.1.3 @@ -146,6 +146,13 @@ packages: resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} dev: true + /array-buffer-byte-length@1.0.0: + resolution: {integrity: sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==} + dependencies: + call-bind: 1.0.2 + is-array-buffer: 3.0.2 + dev: true + /array-index@1.0.0: resolution: {integrity: sha512-jesyNbBkLQgGZMSwA1FanaFjalb1mZUGxGeUEkSDidzgrbjBGhvizJkaItdhkt8eIHFOJC7nDsrXk+BaehTdRw==} dependencies: @@ -188,7 +195,7 @@ packages: resolution: {integrity: sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==} dependencies: function-bind: 1.1.1 - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 dev: true /chalk@2.4.2: @@ -306,17 +313,17 @@ packages: is-arrayish: 0.2.1 dev: true - /es-abstract@1.21.1: - resolution: {integrity: sha512-QudMsPOz86xYz/1dG1OuGBKOELjCh99IIWHLzy5znUB6j8xG2yMA7bfTV86VSqKF+Y/H08vQPR+9jyXpuC6hfg==} + /es-abstract@1.21.2: + resolution: {integrity: sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==} engines: {node: '>= 0.4'} dependencies: + array-buffer-byte-length: 1.0.0 available-typed-arrays: 1.0.5 call-bind: 1.0.2 es-set-tostringtag: 2.0.1 es-to-primitive: 1.2.1 - function-bind: 1.1.1 function.prototype.name: 1.1.5 - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 get-symbol-description: 1.0.0 globalthis: 1.0.3 gopd: 1.0.1 @@ -336,8 +343,9 @@ packages: object-inspect: 1.12.3 object-keys: 1.1.1 object.assign: 4.1.4 - regexp.prototype.flags: 1.4.3 + regexp.prototype.flags: 1.5.0 safe-regex-test: 1.0.0 + string.prototype.trim: 1.2.7 string.prototype.trimend: 1.0.6 string.prototype.trimstart: 1.0.6 typed-array-length: 1.0.4 @@ -349,7 +357,7 @@ packages: resolution: {integrity: sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==} engines: {node: '>= 0.4'} dependencies: - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 has: 1.0.3 has-tostringtag: 1.0.0 dev: true @@ -449,7 +457,7 @@ packages: dependencies: call-bind: 1.0.2 define-properties: 1.2.0 - es-abstract: 1.21.1 + es-abstract: 1.21.2 functions-have-names: 1.2.3 dev: true @@ -457,11 +465,12 @@ packages: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} dev: true - /get-intrinsic@1.2.0: - resolution: {integrity: sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==} + /get-intrinsic@1.2.1: + resolution: {integrity: sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==} dependencies: function-bind: 1.1.1 has: 1.0.3 + has-proto: 1.0.1 has-symbols: 1.0.3 dev: true @@ -470,7 +479,7 @@ packages: engines: {node: '>= 0.4'} dependencies: call-bind: 1.0.2 - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 dev: true /get-symbol-from-current-process-h@1.0.2: @@ -493,11 +502,11 @@ packages: /gopd@1.0.1: resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} dependencies: - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 dev: true - /graceful-fs@4.2.10: - resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} + /graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} dev: true /has-bigints@1.0.2: @@ -512,7 +521,7 @@ packages: /has-property-descriptors@1.0.0: resolution: {integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==} dependencies: - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 dev: true /has-proto@1.0.1: @@ -547,7 +556,7 @@ packages: resolution: {integrity: sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==} engines: {node: '>= 0.4'} dependencies: - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 has: 1.0.3 side-channel: 1.0.4 dev: true @@ -556,7 +565,7 @@ packages: resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==} dependencies: call-bind: 1.0.2 - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 is-typed-array: 1.1.10 dev: true @@ -583,8 +592,8 @@ packages: engines: {node: '>= 0.4'} dev: true - /is-core-module@2.11.0: - resolution: {integrity: sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==} + /is-core-module@2.12.1: + resolution: {integrity: sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==} dependencies: has: 1.0.3 dev: true @@ -665,7 +674,7 @@ packages: resolution: {integrity: sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==} engines: {node: '>=4'} dependencies: - graceful-fs: 4.2.10 + graceful-fs: 4.2.11 parse-json: 4.0.0 pify: 3.0.0 strip-bom: 3.0.0 @@ -731,7 +740,7 @@ packages: resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} dependencies: hosted-git-info: 2.8.9 - resolve: 1.22.1 + resolve: 1.22.2 semver: 5.7.1 validate-npm-package-license: 3.0.4 dev: true @@ -748,7 +757,7 @@ packages: minimatch: 3.1.2 pidtree: 0.3.1 read-pkg: 3.0.0 - shell-quote: 1.8.0 + shell-quote: 1.8.1 string.prototype.padend: 3.1.4 dev: true @@ -845,8 +854,8 @@ packages: - supports-color dev: false - /regexp.prototype.flags@1.4.3: - resolution: {integrity: sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==} + /regexp.prototype.flags@1.5.0: + resolution: {integrity: sha512-0SutC3pNudRKgquxGoRGIz946MZVHqbNfPjBdxeOhBrdgDKlRoXmYLQN9xRbrR09ZXWeGAdPuif7egofn6v5LA==} engines: {node: '>= 0.4'} dependencies: call-bind: 1.0.2 @@ -854,11 +863,11 @@ packages: functions-have-names: 1.2.3 dev: true - /resolve@1.22.1: - resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} + /resolve@1.22.2: + resolution: {integrity: sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==} hasBin: true dependencies: - is-core-module: 2.11.0 + is-core-module: 2.12.1 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 dev: true @@ -867,7 +876,7 @@ packages: resolution: {integrity: sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==} dependencies: call-bind: 1.0.2 - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 is-regex: 1.1.4 dev: true @@ -888,15 +897,15 @@ packages: engines: {node: '>=0.10.0'} dev: true - /shell-quote@1.8.0: - resolution: {integrity: sha512-QHsz8GgQIGKlRi24yFc6a6lN69Idnx634w49ay6+jA5yFh7a1UY+4Rp6HPx/L/1zcEDPEij8cIsiqR6bQsE5VQ==} + /shell-quote@1.8.1: + resolution: {integrity: sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==} dev: true /side-channel@1.0.4: resolution: {integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==} dependencies: call-bind: 1.0.2 - get-intrinsic: 1.2.0 + get-intrinsic: 1.2.1 object-inspect: 1.12.3 dev: true @@ -928,7 +937,16 @@ packages: dependencies: call-bind: 1.0.2 define-properties: 1.2.0 - es-abstract: 1.21.1 + es-abstract: 1.21.2 + dev: true + + /string.prototype.trim@1.2.7: + resolution: {integrity: sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + es-abstract: 1.21.2 dev: true /string.prototype.trimend@1.0.6: @@ -936,7 +954,7 @@ packages: dependencies: call-bind: 1.0.2 define-properties: 1.2.0 - es-abstract: 1.21.1 + es-abstract: 1.21.2 dev: true /string.prototype.trimstart@1.0.6: @@ -944,7 +962,7 @@ packages: dependencies: call-bind: 1.0.2 define-properties: 1.2.0 - es-abstract: 1.21.1 + es-abstract: 1.21.2 dev: true /strip-bom@3.0.0: @@ -982,7 +1000,7 @@ packages: '@tsconfig/node10': 1.0.9 '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 - '@tsconfig/node16': 1.0.3 + '@tsconfig/node16': 1.0.4 '@types/node': 18.6.2 acorn: 8.8.2 acorn-walk: 8.2.0 diff --git a/ecosystem/typescript/sdk/package.json b/ecosystem/typescript/sdk/package.json index 00dcaf5cb1635..5f25d03e79885 100644 --- a/ecosystem/typescript/sdk/package.json +++ b/ecosystem/typescript/sdk/package.json @@ -24,6 +24,8 @@ "_build:node": "tsup --format cjs,esm --dts", "lint": "eslint \"**/*.ts\"", "test": "pnpm run publish-ans-contract && jest", + "test:ci": "pnpm run publish-ans-contract && jest --testPathIgnorePatterns=indexer.test.ts", + "test:indexer": "jest --collectCoverageFrom='./src/tests/e2e/indexer.test.ts' -- ./src/tests/e2e/indexer.test.ts", "_fmt": "prettier 'scripts/**/*.ts' 'src/**/*.ts' 'examples/**/*.js' 'examples/**/*.ts' '.eslintrc.js'", "fmt": "pnpm _fmt --write", "fmt:check": "pnpm _fmt --check", @@ -83,5 +85,5 @@ "typedoc": "^0.23.20", "typescript": "4.8.2" }, - "version": "1.9.1" + "version": "1.10.0" } diff --git a/ecosystem/typescript/sdk/src/aptos_types/account_address.ts b/ecosystem/typescript/sdk/src/aptos_types/account_address.ts index 3417f0d3deccf..d64945e8c6676 100644 --- a/ecosystem/typescript/sdk/src/aptos_types/account_address.ts +++ b/ecosystem/typescript/sdk/src/aptos_types/account_address.ts @@ -85,4 +85,19 @@ export class AccountAddress { static deserialize(deserializer: Deserializer): AccountAddress { return new AccountAddress(deserializer.deserializeFixedBytes(AccountAddress.LENGTH)); } + + /** + * Standardizes an address to the format "0x" followed by 64 lowercase hexadecimal digits. + */ + static standardizeAddress(address: string): string { + // Convert the address to lowercase + const lowercaseAddress = address.toLowerCase(); + // Remove the "0x" prefix if present + const addressWithoutPrefix = lowercaseAddress.startsWith("0x") ? lowercaseAddress.slice(2) : lowercaseAddress; + // Pad the address with leading zeros if necessary + // to ensure it has exactly 64 characters (excluding the "0x" prefix) + const addressWithPadding = addressWithoutPrefix.padStart(64, "0"); + // Return the standardized address with the "0x" prefix + return `0x${addressWithPadding}`; + } } diff --git a/ecosystem/typescript/sdk/src/bcs/helper.ts b/ecosystem/typescript/sdk/src/bcs/helper.ts index 21059c0ae43b6..327e29449263e 100644 --- a/ecosystem/typescript/sdk/src/bcs/helper.ts +++ b/ecosystem/typescript/sdk/src/bcs/helper.ts @@ -81,6 +81,12 @@ export function bcsSerializeU128(value: AnyNumber): Bytes { return serializer.getBytes(); } +export function bcsSerializeU256(value: AnyNumber): Bytes { + const serializer = new Serializer(); + serializer.serializeU256(value); + return serializer.getBytes(); +} + export function bcsSerializeBool(value: boolean): Bytes { const serializer = new Serializer(); serializer.serializeBool(value); diff --git a/ecosystem/typescript/sdk/src/indexer/generated/operations.ts b/ecosystem/typescript/sdk/src/indexer/generated/operations.ts index 63fa1aeb5585e..0b8b583328459 100644 --- a/ecosystem/typescript/sdk/src/indexer/generated/operations.ts +++ b/ecosystem/typescript/sdk/src/indexer/generated/operations.ts @@ -56,6 +56,15 @@ export type GetCollectionDataQueryVariables = Types.Exact<{ export type GetCollectionDataQuery = { __typename?: 'query_root', current_collections_v2: Array<{ __typename?: 'current_collections_v2', collection_id: string, token_standard: string, collection_name: string, creator_address: string, current_supply: any, description: string, uri: string }> }; +export type GetCollectionsWithOwnedTokensQueryVariables = Types.Exact<{ + where_condition: Types.Current_Collection_Ownership_V2_View_Bool_Exp; + offset?: Types.InputMaybe; + limit?: Types.InputMaybe; +}>; + + +export type GetCollectionsWithOwnedTokensQuery = { __typename?: 'query_root', current_collection_ownership_v2_view: Array<{ __typename?: 'current_collection_ownership_v2_view', distinct_tokens?: any | null, last_transaction_version?: any | null, current_collection?: { __typename?: 'current_collections_v2', creator_address: string, collection_name: string, token_standard: string, collection_id: string, description: string, table_handle_v1?: string | null, uri: string, total_minted_v2?: any | null, max_supply?: any | null } | null }> }; + export type GetDelegatedStakingActivitiesQueryVariables = Types.Exact<{ delegatorAddress?: Types.InputMaybe; poolAddress?: Types.InputMaybe; @@ -77,7 +86,7 @@ export type GetNumberOfDelegatorsQueryVariables = Types.Exact<{ export type GetNumberOfDelegatorsQuery = { __typename?: 'query_root', num_active_delegator_per_pool: Array<{ __typename?: 'num_active_delegator_per_pool', num_active_delegator?: any | null }> }; export type GetOwnedTokensQueryVariables = Types.Exact<{ - address: Types.Scalars['String']; + where_condition: Types.Current_Token_Ownerships_V2_Bool_Exp; offset?: Types.InputMaybe; limit?: Types.InputMaybe; }>; @@ -109,8 +118,7 @@ export type GetTokenDataQueryVariables = Types.Exact<{ export type GetTokenDataQuery = { __typename?: 'query_root', current_token_datas: Array<{ __typename?: 'current_token_datas', token_data_id_hash: string, name: string, collection_name: string, creator_address: string, default_properties: any, largest_property_version: any, maximum: any, metadata_uri: string, payee_address: string, royalty_points_denominator: any, royalty_points_numerator: any, supply: any }> }; export type GetTokenOwnedFromCollectionQueryVariables = Types.Exact<{ - collection_id: Types.Scalars['String']; - owner_address: Types.Scalars['String']; + where_condition: Types.Current_Token_Ownerships_V2_Bool_Exp; offset?: Types.InputMaybe; limit?: Types.InputMaybe; }>; diff --git a/ecosystem/typescript/sdk/src/indexer/generated/queries.ts b/ecosystem/typescript/sdk/src/indexer/generated/queries.ts index 33d71de4a5184..cfea440ee9244 100644 --- a/ecosystem/typescript/sdk/src/indexer/generated/queries.ts +++ b/ecosystem/typescript/sdk/src/indexer/generated/queries.ts @@ -150,6 +150,30 @@ export const GetCollectionData = ` } } `; +export const GetCollectionsWithOwnedTokens = ` + query getCollectionsWithOwnedTokens($where_condition: current_collection_ownership_v2_view_bool_exp!, $offset: Int, $limit: Int) { + current_collection_ownership_v2_view( + where: $where_condition + order_by: {last_transaction_version: desc} + offset: $offset + limit: $limit + ) { + current_collection { + creator_address + collection_name + token_standard + collection_id + description + table_handle_v1 + uri + total_minted_v2 + max_supply + } + distinct_tokens + last_transaction_version + } +} + `; export const GetDelegatedStakingActivities = ` query getDelegatedStakingActivities($delegatorAddress: String, $poolAddress: String) { delegated_staking_activities( @@ -182,9 +206,9 @@ export const GetNumberOfDelegators = ` } `; export const GetOwnedTokens = ` - query getOwnedTokens($address: String!, $offset: Int, $limit: Int) { + query getOwnedTokens($where_condition: current_token_ownerships_v2_bool_exp!, $offset: Int, $limit: Int) { current_token_ownerships_v2( - where: {owner_address: {_eq: $address}, amount: {_gt: 0}} + where: $where_condition offset: $offset limit: $limit ) { @@ -244,9 +268,9 @@ export const GetTokenData = ` } `; export const GetTokenOwnedFromCollection = ` - query getTokenOwnedFromCollection($collection_id: String!, $owner_address: String!, $offset: Int, $limit: Int) { + query getTokenOwnedFromCollection($where_condition: current_token_ownerships_v2_bool_exp!, $offset: Int, $limit: Int) { current_token_ownerships_v2( - where: {owner_address: {_eq: $owner_address}, current_token_data: {collection_id: {_eq: $collection_id}}, amount: {_gt: 0}} + where: $where_condition offset: $offset limit: $limit ) { @@ -308,6 +332,9 @@ export function getSdk(client: GraphQLClient, withWrapper: SdkFunctionWrapper = getCollectionData(variables: Types.GetCollectionDataQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise { return withWrapper((wrappedRequestHeaders) => client.request(GetCollectionData, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'getCollectionData', 'query'); }, + getCollectionsWithOwnedTokens(variables: Types.GetCollectionsWithOwnedTokensQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise { + return withWrapper((wrappedRequestHeaders) => client.request(GetCollectionsWithOwnedTokens, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'getCollectionsWithOwnedTokens', 'query'); + }, getDelegatedStakingActivities(variables?: Types.GetDelegatedStakingActivitiesQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise { return withWrapper((wrappedRequestHeaders) => client.request(GetDelegatedStakingActivities, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'getDelegatedStakingActivities', 'query'); }, diff --git a/ecosystem/typescript/sdk/src/indexer/generated/types.ts b/ecosystem/typescript/sdk/src/indexer/generated/types.ts index dea693afaa9fc..51460bc8c49fc 100644 --- a/ecosystem/typescript/sdk/src/indexer/generated/types.ts +++ b/ecosystem/typescript/sdk/src/indexer/generated/types.ts @@ -75,11 +75,66 @@ export type String_Comparison_Exp = { _similar?: InputMaybe; }; +/** columns and relationships of "address_events_summary" */ +export type Address_Events_Summary = { + __typename?: 'address_events_summary'; + account_address?: Maybe; + /** An object relationship */ + block_metadata?: Maybe; + min_block_height?: Maybe; + num_distinct_versions?: Maybe; +}; + +/** Boolean expression to filter rows from the table "address_events_summary". All fields are combined with a logical 'AND'. */ +export type Address_Events_Summary_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + account_address?: InputMaybe; + block_metadata?: InputMaybe; + min_block_height?: InputMaybe; + num_distinct_versions?: InputMaybe; +}; + +/** Ordering options when selecting data from "address_events_summary". */ +export type Address_Events_Summary_Order_By = { + account_address?: InputMaybe; + block_metadata?: InputMaybe; + min_block_height?: InputMaybe; + num_distinct_versions?: InputMaybe; +}; + +/** select columns of table "address_events_summary" */ +export enum Address_Events_Summary_Select_Column { + /** column name */ + AccountAddress = 'account_address', + /** column name */ + MinBlockHeight = 'min_block_height', + /** column name */ + NumDistinctVersions = 'num_distinct_versions' +} + +/** Streaming cursor of the table "address_events_summary" */ +export type Address_Events_Summary_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Address_Events_Summary_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Address_Events_Summary_Stream_Cursor_Value_Input = { + account_address?: InputMaybe; + min_block_height?: InputMaybe; + num_distinct_versions?: InputMaybe; +}; + /** columns and relationships of "address_version_from_events" */ export type Address_Version_From_Events = { __typename?: 'address_version_from_events'; account_address?: Maybe; coin_activities: Array; + coin_activities_aggregate: Coin_Activities_Aggregate; token_activities: Array; token_activities_aggregate: Token_Activities_Aggregate; transaction_version?: Maybe; @@ -96,6 +151,16 @@ export type Address_Version_From_EventsCoin_ActivitiesArgs = { }; +/** columns and relationships of "address_version_from_events" */ +export type Address_Version_From_EventsCoin_Activities_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + /** columns and relationships of "address_version_from_events" */ export type Address_Version_From_EventsToken_ActivitiesArgs = { distinct_on?: InputMaybe>; @@ -115,6 +180,42 @@ export type Address_Version_From_EventsToken_Activities_AggregateArgs = { where?: InputMaybe; }; +/** aggregated selection of "address_version_from_events" */ +export type Address_Version_From_Events_Aggregate = { + __typename?: 'address_version_from_events_aggregate'; + aggregate?: Maybe; + nodes: Array; +}; + +/** aggregate fields of "address_version_from_events" */ +export type Address_Version_From_Events_Aggregate_Fields = { + __typename?: 'address_version_from_events_aggregate_fields'; + avg?: Maybe; + count: Scalars['Int']; + max?: Maybe; + min?: Maybe; + stddev?: Maybe; + stddev_pop?: Maybe; + stddev_samp?: Maybe; + sum?: Maybe; + var_pop?: Maybe; + var_samp?: Maybe; + variance?: Maybe; +}; + + +/** aggregate fields of "address_version_from_events" */ +export type Address_Version_From_Events_Aggregate_FieldsCountArgs = { + columns?: InputMaybe>; + distinct?: InputMaybe; +}; + +/** aggregate avg on columns */ +export type Address_Version_From_Events_Avg_Fields = { + __typename?: 'address_version_from_events_avg_fields'; + transaction_version?: Maybe; +}; + /** Boolean expression to filter rows from the table "address_version_from_events". All fields are combined with a logical 'AND'. */ export type Address_Version_From_Events_Bool_Exp = { _and?: InputMaybe>; @@ -124,6 +225,20 @@ export type Address_Version_From_Events_Bool_Exp = { transaction_version?: InputMaybe; }; +/** aggregate max on columns */ +export type Address_Version_From_Events_Max_Fields = { + __typename?: 'address_version_from_events_max_fields'; + account_address?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate min on columns */ +export type Address_Version_From_Events_Min_Fields = { + __typename?: 'address_version_from_events_min_fields'; + account_address?: Maybe; + transaction_version?: Maybe; +}; + /** Ordering options when selecting data from "address_version_from_events". */ export type Address_Version_From_Events_Order_By = { account_address?: InputMaybe; @@ -138,6 +253,24 @@ export enum Address_Version_From_Events_Select_Column { TransactionVersion = 'transaction_version' } +/** aggregate stddev on columns */ +export type Address_Version_From_Events_Stddev_Fields = { + __typename?: 'address_version_from_events_stddev_fields'; + transaction_version?: Maybe; +}; + +/** aggregate stddev_pop on columns */ +export type Address_Version_From_Events_Stddev_Pop_Fields = { + __typename?: 'address_version_from_events_stddev_pop_fields'; + transaction_version?: Maybe; +}; + +/** aggregate stddev_samp on columns */ +export type Address_Version_From_Events_Stddev_Samp_Fields = { + __typename?: 'address_version_from_events_stddev_samp_fields'; + transaction_version?: Maybe; +}; + /** Streaming cursor of the table "address_version_from_events" */ export type Address_Version_From_Events_Stream_Cursor_Input = { /** Stream column input with initial value */ @@ -152,6 +285,30 @@ export type Address_Version_From_Events_Stream_Cursor_Value_Input = { transaction_version?: InputMaybe; }; +/** aggregate sum on columns */ +export type Address_Version_From_Events_Sum_Fields = { + __typename?: 'address_version_from_events_sum_fields'; + transaction_version?: Maybe; +}; + +/** aggregate var_pop on columns */ +export type Address_Version_From_Events_Var_Pop_Fields = { + __typename?: 'address_version_from_events_var_pop_fields'; + transaction_version?: Maybe; +}; + +/** aggregate var_samp on columns */ +export type Address_Version_From_Events_Var_Samp_Fields = { + __typename?: 'address_version_from_events_var_samp_fields'; + transaction_version?: Maybe; +}; + +/** aggregate variance on columns */ +export type Address_Version_From_Events_Variance_Fields = { + __typename?: 'address_version_from_events_variance_fields'; + transaction_version?: Maybe; +}; + /** columns and relationships of "address_version_from_move_resources" */ export type Address_Version_From_Move_Resources = { __typename?: 'address_version_from_move_resources'; @@ -209,6 +366,104 @@ export type Bigint_Comparison_Exp = { _nin?: InputMaybe>; }; +/** columns and relationships of "block_metadata_transactions" */ +export type Block_Metadata_Transactions = { + __typename?: 'block_metadata_transactions'; + block_height: Scalars['bigint']; + epoch: Scalars['bigint']; + failed_proposer_indices: Scalars['jsonb']; + id: Scalars['String']; + previous_block_votes_bitvec: Scalars['jsonb']; + proposer: Scalars['String']; + round: Scalars['bigint']; + timestamp: Scalars['timestamp']; + version: Scalars['bigint']; +}; + + +/** columns and relationships of "block_metadata_transactions" */ +export type Block_Metadata_TransactionsFailed_Proposer_IndicesArgs = { + path?: InputMaybe; +}; + + +/** columns and relationships of "block_metadata_transactions" */ +export type Block_Metadata_TransactionsPrevious_Block_Votes_BitvecArgs = { + path?: InputMaybe; +}; + +/** Boolean expression to filter rows from the table "block_metadata_transactions". All fields are combined with a logical 'AND'. */ +export type Block_Metadata_Transactions_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + block_height?: InputMaybe; + epoch?: InputMaybe; + failed_proposer_indices?: InputMaybe; + id?: InputMaybe; + previous_block_votes_bitvec?: InputMaybe; + proposer?: InputMaybe; + round?: InputMaybe; + timestamp?: InputMaybe; + version?: InputMaybe; +}; + +/** Ordering options when selecting data from "block_metadata_transactions". */ +export type Block_Metadata_Transactions_Order_By = { + block_height?: InputMaybe; + epoch?: InputMaybe; + failed_proposer_indices?: InputMaybe; + id?: InputMaybe; + previous_block_votes_bitvec?: InputMaybe; + proposer?: InputMaybe; + round?: InputMaybe; + timestamp?: InputMaybe; + version?: InputMaybe; +}; + +/** select columns of table "block_metadata_transactions" */ +export enum Block_Metadata_Transactions_Select_Column { + /** column name */ + BlockHeight = 'block_height', + /** column name */ + Epoch = 'epoch', + /** column name */ + FailedProposerIndices = 'failed_proposer_indices', + /** column name */ + Id = 'id', + /** column name */ + PreviousBlockVotesBitvec = 'previous_block_votes_bitvec', + /** column name */ + Proposer = 'proposer', + /** column name */ + Round = 'round', + /** column name */ + Timestamp = 'timestamp', + /** column name */ + Version = 'version' +} + +/** Streaming cursor of the table "block_metadata_transactions" */ +export type Block_Metadata_Transactions_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Block_Metadata_Transactions_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Block_Metadata_Transactions_Stream_Cursor_Value_Input = { + block_height?: InputMaybe; + epoch?: InputMaybe; + failed_proposer_indices?: InputMaybe; + id?: InputMaybe; + previous_block_votes_bitvec?: InputMaybe; + proposer?: InputMaybe; + round?: InputMaybe; + timestamp?: InputMaybe; + version?: InputMaybe; +}; + /** columns and relationships of "coin_activities" */ export type Coin_Activities = { __typename?: 'coin_activities'; @@ -242,6 +497,47 @@ export type Coin_ActivitiesAptos_NamesArgs = { where?: InputMaybe; }; +/** aggregated selection of "coin_activities" */ +export type Coin_Activities_Aggregate = { + __typename?: 'coin_activities_aggregate'; + aggregate?: Maybe; + nodes: Array; +}; + +/** aggregate fields of "coin_activities" */ +export type Coin_Activities_Aggregate_Fields = { + __typename?: 'coin_activities_aggregate_fields'; + avg?: Maybe; + count: Scalars['Int']; + max?: Maybe; + min?: Maybe; + stddev?: Maybe; + stddev_pop?: Maybe; + stddev_samp?: Maybe; + sum?: Maybe; + var_pop?: Maybe; + var_samp?: Maybe; + variance?: Maybe; +}; + + +/** aggregate fields of "coin_activities" */ +export type Coin_Activities_Aggregate_FieldsCountArgs = { + columns?: InputMaybe>; + distinct?: InputMaybe; +}; + +/** aggregate avg on columns */ +export type Coin_Activities_Avg_Fields = { + __typename?: 'coin_activities_avg_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + /** Boolean expression to filter rows from the table "coin_activities". All fields are combined with a logical 'AND'. */ export type Coin_Activities_Bool_Exp = { _and?: InputMaybe>; @@ -265,6 +561,40 @@ export type Coin_Activities_Bool_Exp = { transaction_version?: InputMaybe; }; +/** aggregate max on columns */ +export type Coin_Activities_Max_Fields = { + __typename?: 'coin_activities_max_fields'; + activity_type?: Maybe; + amount?: Maybe; + block_height?: Maybe; + coin_type?: Maybe; + entry_function_id_str?: Maybe; + event_account_address?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + owner_address?: Maybe; + transaction_timestamp?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate min on columns */ +export type Coin_Activities_Min_Fields = { + __typename?: 'coin_activities_min_fields'; + activity_type?: Maybe; + amount?: Maybe; + block_height?: Maybe; + coin_type?: Maybe; + entry_function_id_str?: Maybe; + event_account_address?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + owner_address?: Maybe; + transaction_timestamp?: Maybe; + transaction_version?: Maybe; +}; + /** Ordering options when selecting data from "coin_activities". */ export type Coin_Activities_Order_By = { activity_type?: InputMaybe; @@ -317,6 +647,39 @@ export enum Coin_Activities_Select_Column { TransactionVersion = 'transaction_version' } +/** aggregate stddev on columns */ +export type Coin_Activities_Stddev_Fields = { + __typename?: 'coin_activities_stddev_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate stddev_pop on columns */ +export type Coin_Activities_Stddev_Pop_Fields = { + __typename?: 'coin_activities_stddev_pop_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate stddev_samp on columns */ +export type Coin_Activities_Stddev_Samp_Fields = { + __typename?: 'coin_activities_stddev_samp_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + /** Streaming cursor of the table "coin_activities" */ export type Coin_Activities_Stream_Cursor_Input = { /** Stream column input with initial value */ @@ -343,6 +706,50 @@ export type Coin_Activities_Stream_Cursor_Value_Input = { transaction_version?: InputMaybe; }; +/** aggregate sum on columns */ +export type Coin_Activities_Sum_Fields = { + __typename?: 'coin_activities_sum_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate var_pop on columns */ +export type Coin_Activities_Var_Pop_Fields = { + __typename?: 'coin_activities_var_pop_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate var_samp on columns */ +export type Coin_Activities_Var_Samp_Fields = { + __typename?: 'coin_activities_var_samp_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + +/** aggregate variance on columns */ +export type Coin_Activities_Variance_Fields = { + __typename?: 'coin_activities_variance_fields'; + amount?: Maybe; + block_height?: Maybe; + event_creation_number?: Maybe; + event_index?: Maybe; + event_sequence_number?: Maybe; + transaction_version?: Maybe; +}; + /** columns and relationships of "coin_balances" */ export type Coin_Balances = { __typename?: 'coin_balances'; @@ -999,29 +1406,193 @@ export enum Current_Collection_Datas_Select_Column { UriMutable = 'uri_mutable' } -/** Streaming cursor of the table "current_collection_datas" */ -export type Current_Collection_Datas_Stream_Cursor_Input = { +/** Streaming cursor of the table "current_collection_datas" */ +export type Current_Collection_Datas_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Current_Collection_Datas_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Current_Collection_Datas_Stream_Cursor_Value_Input = { + collection_data_id_hash?: InputMaybe; + collection_name?: InputMaybe; + creator_address?: InputMaybe; + description?: InputMaybe; + description_mutable?: InputMaybe; + last_transaction_timestamp?: InputMaybe; + last_transaction_version?: InputMaybe; + maximum?: InputMaybe; + maximum_mutable?: InputMaybe; + metadata_uri?: InputMaybe; + supply?: InputMaybe; + table_handle?: InputMaybe; + uri_mutable?: InputMaybe; +}; + +/** columns and relationships of "current_collection_ownership_v2_view" */ +export type Current_Collection_Ownership_V2_View = { + __typename?: 'current_collection_ownership_v2_view'; + collection_id?: Maybe; + /** An object relationship */ + current_collection?: Maybe; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; + owner_address?: Maybe; +}; + +/** aggregated selection of "current_collection_ownership_v2_view" */ +export type Current_Collection_Ownership_V2_View_Aggregate = { + __typename?: 'current_collection_ownership_v2_view_aggregate'; + aggregate?: Maybe; + nodes: Array; +}; + +/** aggregate fields of "current_collection_ownership_v2_view" */ +export type Current_Collection_Ownership_V2_View_Aggregate_Fields = { + __typename?: 'current_collection_ownership_v2_view_aggregate_fields'; + avg?: Maybe; + count: Scalars['Int']; + max?: Maybe; + min?: Maybe; + stddev?: Maybe; + stddev_pop?: Maybe; + stddev_samp?: Maybe; + sum?: Maybe; + var_pop?: Maybe; + var_samp?: Maybe; + variance?: Maybe; +}; + + +/** aggregate fields of "current_collection_ownership_v2_view" */ +export type Current_Collection_Ownership_V2_View_Aggregate_FieldsCountArgs = { + columns?: InputMaybe>; + distinct?: InputMaybe; +}; + +/** aggregate avg on columns */ +export type Current_Collection_Ownership_V2_View_Avg_Fields = { + __typename?: 'current_collection_ownership_v2_view_avg_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** Boolean expression to filter rows from the table "current_collection_ownership_v2_view". All fields are combined with a logical 'AND'. */ +export type Current_Collection_Ownership_V2_View_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + collection_id?: InputMaybe; + current_collection?: InputMaybe; + distinct_tokens?: InputMaybe; + last_transaction_version?: InputMaybe; + owner_address?: InputMaybe; +}; + +/** aggregate max on columns */ +export type Current_Collection_Ownership_V2_View_Max_Fields = { + __typename?: 'current_collection_ownership_v2_view_max_fields'; + collection_id?: Maybe; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; + owner_address?: Maybe; +}; + +/** aggregate min on columns */ +export type Current_Collection_Ownership_V2_View_Min_Fields = { + __typename?: 'current_collection_ownership_v2_view_min_fields'; + collection_id?: Maybe; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; + owner_address?: Maybe; +}; + +/** Ordering options when selecting data from "current_collection_ownership_v2_view". */ +export type Current_Collection_Ownership_V2_View_Order_By = { + collection_id?: InputMaybe; + current_collection?: InputMaybe; + distinct_tokens?: InputMaybe; + last_transaction_version?: InputMaybe; + owner_address?: InputMaybe; +}; + +/** select columns of table "current_collection_ownership_v2_view" */ +export enum Current_Collection_Ownership_V2_View_Select_Column { + /** column name */ + CollectionId = 'collection_id', + /** column name */ + DistinctTokens = 'distinct_tokens', + /** column name */ + LastTransactionVersion = 'last_transaction_version', + /** column name */ + OwnerAddress = 'owner_address' +} + +/** aggregate stddev on columns */ +export type Current_Collection_Ownership_V2_View_Stddev_Fields = { + __typename?: 'current_collection_ownership_v2_view_stddev_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** aggregate stddev_pop on columns */ +export type Current_Collection_Ownership_V2_View_Stddev_Pop_Fields = { + __typename?: 'current_collection_ownership_v2_view_stddev_pop_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** aggregate stddev_samp on columns */ +export type Current_Collection_Ownership_V2_View_Stddev_Samp_Fields = { + __typename?: 'current_collection_ownership_v2_view_stddev_samp_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** Streaming cursor of the table "current_collection_ownership_v2_view" */ +export type Current_Collection_Ownership_V2_View_Stream_Cursor_Input = { /** Stream column input with initial value */ - initial_value: Current_Collection_Datas_Stream_Cursor_Value_Input; + initial_value: Current_Collection_Ownership_V2_View_Stream_Cursor_Value_Input; /** cursor ordering */ ordering?: InputMaybe; }; /** Initial value of the column from where the streaming should start */ -export type Current_Collection_Datas_Stream_Cursor_Value_Input = { - collection_data_id_hash?: InputMaybe; - collection_name?: InputMaybe; - creator_address?: InputMaybe; - description?: InputMaybe; - description_mutable?: InputMaybe; - last_transaction_timestamp?: InputMaybe; +export type Current_Collection_Ownership_V2_View_Stream_Cursor_Value_Input = { + collection_id?: InputMaybe; + distinct_tokens?: InputMaybe; last_transaction_version?: InputMaybe; - maximum?: InputMaybe; - maximum_mutable?: InputMaybe; - metadata_uri?: InputMaybe; - supply?: InputMaybe; - table_handle?: InputMaybe; - uri_mutable?: InputMaybe; + owner_address?: InputMaybe; +}; + +/** aggregate sum on columns */ +export type Current_Collection_Ownership_V2_View_Sum_Fields = { + __typename?: 'current_collection_ownership_v2_view_sum_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** aggregate var_pop on columns */ +export type Current_Collection_Ownership_V2_View_Var_Pop_Fields = { + __typename?: 'current_collection_ownership_v2_view_var_pop_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** aggregate var_samp on columns */ +export type Current_Collection_Ownership_V2_View_Var_Samp_Fields = { + __typename?: 'current_collection_ownership_v2_view_var_samp_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; +}; + +/** aggregate variance on columns */ +export type Current_Collection_Ownership_V2_View_Variance_Fields = { + __typename?: 'current_collection_ownership_v2_view_variance_fields'; + distinct_tokens?: Maybe; + last_transaction_version?: Maybe; }; /** columns and relationships of "current_collection_ownership_view" */ @@ -1208,11 +1779,86 @@ export type Current_Collections_V2_Stream_Cursor_Value_Input = { uri?: InputMaybe; }; +/** columns and relationships of "current_delegated_staking_pool_balances" */ +export type Current_Delegated_Staking_Pool_Balances = { + __typename?: 'current_delegated_staking_pool_balances'; + active_table_handle: Scalars['String']; + inactive_table_handle: Scalars['String']; + last_transaction_version: Scalars['bigint']; + operator_commission_percentage: Scalars['numeric']; + staking_pool_address: Scalars['String']; + total_coins: Scalars['numeric']; + total_shares: Scalars['numeric']; +}; + +/** Boolean expression to filter rows from the table "current_delegated_staking_pool_balances". All fields are combined with a logical 'AND'. */ +export type Current_Delegated_Staking_Pool_Balances_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + active_table_handle?: InputMaybe; + inactive_table_handle?: InputMaybe; + last_transaction_version?: InputMaybe; + operator_commission_percentage?: InputMaybe; + staking_pool_address?: InputMaybe; + total_coins?: InputMaybe; + total_shares?: InputMaybe; +}; + +/** Ordering options when selecting data from "current_delegated_staking_pool_balances". */ +export type Current_Delegated_Staking_Pool_Balances_Order_By = { + active_table_handle?: InputMaybe; + inactive_table_handle?: InputMaybe; + last_transaction_version?: InputMaybe; + operator_commission_percentage?: InputMaybe; + staking_pool_address?: InputMaybe; + total_coins?: InputMaybe; + total_shares?: InputMaybe; +}; + +/** select columns of table "current_delegated_staking_pool_balances" */ +export enum Current_Delegated_Staking_Pool_Balances_Select_Column { + /** column name */ + ActiveTableHandle = 'active_table_handle', + /** column name */ + InactiveTableHandle = 'inactive_table_handle', + /** column name */ + LastTransactionVersion = 'last_transaction_version', + /** column name */ + OperatorCommissionPercentage = 'operator_commission_percentage', + /** column name */ + StakingPoolAddress = 'staking_pool_address', + /** column name */ + TotalCoins = 'total_coins', + /** column name */ + TotalShares = 'total_shares' +} + +/** Streaming cursor of the table "current_delegated_staking_pool_balances" */ +export type Current_Delegated_Staking_Pool_Balances_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Current_Delegated_Staking_Pool_Balances_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Current_Delegated_Staking_Pool_Balances_Stream_Cursor_Value_Input = { + active_table_handle?: InputMaybe; + inactive_table_handle?: InputMaybe; + last_transaction_version?: InputMaybe; + operator_commission_percentage?: InputMaybe; + staking_pool_address?: InputMaybe; + total_coins?: InputMaybe; + total_shares?: InputMaybe; +}; + /** columns and relationships of "current_delegator_balances" */ export type Current_Delegator_Balances = { __typename?: 'current_delegator_balances'; delegator_address: Scalars['String']; last_transaction_version: Scalars['bigint']; + parent_table_handle: Scalars['String']; pool_address: Scalars['String']; pool_type: Scalars['String']; shares: Scalars['numeric']; @@ -1226,6 +1872,7 @@ export type Current_Delegator_Balances_Bool_Exp = { _or?: InputMaybe>; delegator_address?: InputMaybe; last_transaction_version?: InputMaybe; + parent_table_handle?: InputMaybe; pool_address?: InputMaybe; pool_type?: InputMaybe; shares?: InputMaybe; @@ -1236,6 +1883,7 @@ export type Current_Delegator_Balances_Bool_Exp = { export type Current_Delegator_Balances_Order_By = { delegator_address?: InputMaybe; last_transaction_version?: InputMaybe; + parent_table_handle?: InputMaybe; pool_address?: InputMaybe; pool_type?: InputMaybe; shares?: InputMaybe; @@ -1249,6 +1897,8 @@ export enum Current_Delegator_Balances_Select_Column { /** column name */ LastTransactionVersion = 'last_transaction_version', /** column name */ + ParentTableHandle = 'parent_table_handle', + /** column name */ PoolAddress = 'pool_address', /** column name */ PoolType = 'pool_type', @@ -1270,6 +1920,7 @@ export type Current_Delegator_Balances_Stream_Cursor_Input = { export type Current_Delegator_Balances_Stream_Cursor_Value_Input = { delegator_address?: InputMaybe; last_transaction_version?: InputMaybe; + parent_table_handle?: InputMaybe; pool_address?: InputMaybe; pool_type?: InputMaybe; shares?: InputMaybe; @@ -1281,10 +1932,22 @@ export type Current_Staking_Pool_Voter = { __typename?: 'current_staking_pool_voter'; last_transaction_version: Scalars['bigint']; operator_address: Scalars['String']; + /** An array relationship */ + operator_aptos_name: Array; staking_pool_address: Scalars['String']; voter_address: Scalars['String']; }; + +/** columns and relationships of "current_staking_pool_voter" */ +export type Current_Staking_Pool_VoterOperator_Aptos_NameArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + /** Boolean expression to filter rows from the table "current_staking_pool_voter". All fields are combined with a logical 'AND'. */ export type Current_Staking_Pool_Voter_Bool_Exp = { _and?: InputMaybe>; @@ -1292,6 +1955,7 @@ export type Current_Staking_Pool_Voter_Bool_Exp = { _or?: InputMaybe>; last_transaction_version?: InputMaybe; operator_address?: InputMaybe; + operator_aptos_name?: InputMaybe; staking_pool_address?: InputMaybe; voter_address?: InputMaybe; }; @@ -1300,6 +1964,7 @@ export type Current_Staking_Pool_Voter_Bool_Exp = { export type Current_Staking_Pool_Voter_Order_By = { last_transaction_version?: InputMaybe; operator_address?: InputMaybe; + operator_aptos_name_aggregate?: InputMaybe; staking_pool_address?: InputMaybe; voter_address?: InputMaybe; }; @@ -2296,12 +2961,17 @@ export type Current_Token_Pending_Claims = { __typename?: 'current_token_pending_claims'; amount: Scalars['numeric']; collection_data_id_hash: Scalars['String']; + collection_id: Scalars['String']; collection_name: Scalars['String']; creator_address: Scalars['String']; /** An object relationship */ current_collection_data?: Maybe; /** An object relationship */ + current_collection_v2?: Maybe; + /** An object relationship */ current_token_data?: Maybe; + /** An object relationship */ + current_token_data_v2?: Maybe; from_address: Scalars['String']; last_transaction_timestamp: Scalars['timestamp']; last_transaction_version: Scalars['bigint']; @@ -2311,6 +2981,7 @@ export type Current_Token_Pending_Claims = { to_address: Scalars['String']; /** An object relationship */ token?: Maybe; + token_data_id: Scalars['String']; token_data_id_hash: Scalars['String']; }; @@ -2321,10 +2992,13 @@ export type Current_Token_Pending_Claims_Bool_Exp = { _or?: InputMaybe>; amount?: InputMaybe; collection_data_id_hash?: InputMaybe; + collection_id?: InputMaybe; collection_name?: InputMaybe; creator_address?: InputMaybe; current_collection_data?: InputMaybe; + current_collection_v2?: InputMaybe; current_token_data?: InputMaybe; + current_token_data_v2?: InputMaybe; from_address?: InputMaybe; last_transaction_timestamp?: InputMaybe; last_transaction_version?: InputMaybe; @@ -2333,6 +3007,7 @@ export type Current_Token_Pending_Claims_Bool_Exp = { table_handle?: InputMaybe; to_address?: InputMaybe; token?: InputMaybe; + token_data_id?: InputMaybe; token_data_id_hash?: InputMaybe; }; @@ -2340,10 +3015,13 @@ export type Current_Token_Pending_Claims_Bool_Exp = { export type Current_Token_Pending_Claims_Order_By = { amount?: InputMaybe; collection_data_id_hash?: InputMaybe; + collection_id?: InputMaybe; collection_name?: InputMaybe; creator_address?: InputMaybe; current_collection_data?: InputMaybe; + current_collection_v2?: InputMaybe; current_token_data?: InputMaybe; + current_token_data_v2?: InputMaybe; from_address?: InputMaybe; last_transaction_timestamp?: InputMaybe; last_transaction_version?: InputMaybe; @@ -2352,6 +3030,7 @@ export type Current_Token_Pending_Claims_Order_By = { table_handle?: InputMaybe; to_address?: InputMaybe; token?: InputMaybe; + token_data_id?: InputMaybe; token_data_id_hash?: InputMaybe; }; @@ -2362,6 +3041,8 @@ export enum Current_Token_Pending_Claims_Select_Column { /** column name */ CollectionDataIdHash = 'collection_data_id_hash', /** column name */ + CollectionId = 'collection_id', + /** column name */ CollectionName = 'collection_name', /** column name */ CreatorAddress = 'creator_address', @@ -2380,6 +3061,8 @@ export enum Current_Token_Pending_Claims_Select_Column { /** column name */ ToAddress = 'to_address', /** column name */ + TokenDataId = 'token_data_id', + /** column name */ TokenDataIdHash = 'token_data_id_hash' } @@ -2395,6 +3078,7 @@ export type Current_Token_Pending_Claims_Stream_Cursor_Input = { export type Current_Token_Pending_Claims_Stream_Cursor_Value_Input = { amount?: InputMaybe; collection_data_id_hash?: InputMaybe; + collection_id?: InputMaybe; collection_name?: InputMaybe; creator_address?: InputMaybe; from_address?: InputMaybe; @@ -2404,6 +3088,7 @@ export type Current_Token_Pending_Claims_Stream_Cursor_Value_Input = { property_version?: InputMaybe; table_handle?: InputMaybe; to_address?: InputMaybe; + token_data_id?: InputMaybe; token_data_id_hash?: InputMaybe; }; @@ -2531,6 +3216,58 @@ export type Delegated_Staking_Pools_Stream_Cursor_Value_Input = { staking_pool_address?: InputMaybe; }; +/** columns and relationships of "delegator_distinct_pool" */ +export type Delegator_Distinct_Pool = { + __typename?: 'delegator_distinct_pool'; + /** An object relationship */ + current_pool_balance?: Maybe; + delegator_address?: Maybe; + pool_address?: Maybe; + /** An object relationship */ + staking_pool_metadata?: Maybe; +}; + +/** Boolean expression to filter rows from the table "delegator_distinct_pool". All fields are combined with a logical 'AND'. */ +export type Delegator_Distinct_Pool_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + current_pool_balance?: InputMaybe; + delegator_address?: InputMaybe; + pool_address?: InputMaybe; + staking_pool_metadata?: InputMaybe; +}; + +/** Ordering options when selecting data from "delegator_distinct_pool". */ +export type Delegator_Distinct_Pool_Order_By = { + current_pool_balance?: InputMaybe; + delegator_address?: InputMaybe; + pool_address?: InputMaybe; + staking_pool_metadata?: InputMaybe; +}; + +/** select columns of table "delegator_distinct_pool" */ +export enum Delegator_Distinct_Pool_Select_Column { + /** column name */ + DelegatorAddress = 'delegator_address', + /** column name */ + PoolAddress = 'pool_address' +} + +/** Streaming cursor of the table "delegator_distinct_pool" */ +export type Delegator_Distinct_Pool_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Delegator_Distinct_Pool_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Delegator_Distinct_Pool_Stream_Cursor_Value_Input = { + delegator_address?: InputMaybe; + pool_address?: InputMaybe; +}; + /** columns and relationships of "events" */ export type Events = { __typename?: 'events'; @@ -3172,11 +3909,20 @@ export type Proposal_Votes_Variance_Fields = { export type Query_Root = { __typename?: 'query_root'; + /** fetch data from the table: "address_events_summary" */ + address_events_summary: Array; /** fetch data from the table: "address_version_from_events" */ address_version_from_events: Array; + /** fetch aggregated fields from the table: "address_version_from_events" */ + address_version_from_events_aggregate: Address_Version_From_Events_Aggregate; /** fetch data from the table: "address_version_from_move_resources" */ address_version_from_move_resources: Array; + /** fetch data from the table: "block_metadata_transactions" */ + block_metadata_transactions: Array; + /** fetch data from the table: "block_metadata_transactions" using primary key columns */ + block_metadata_transactions_by_pk?: Maybe; coin_activities: Array; + coin_activities_aggregate: Coin_Activities_Aggregate; /** fetch data from the table: "coin_activities" using primary key columns */ coin_activities_by_pk?: Maybe; /** fetch data from the table: "coin_balances" */ @@ -3207,12 +3953,20 @@ export type Query_Root = { current_collection_datas: Array; /** fetch data from the table: "current_collection_datas" using primary key columns */ current_collection_datas_by_pk?: Maybe; + /** fetch data from the table: "current_collection_ownership_v2_view" */ + current_collection_ownership_v2_view: Array; + /** fetch aggregated fields from the table: "current_collection_ownership_v2_view" */ + current_collection_ownership_v2_view_aggregate: Current_Collection_Ownership_V2_View_Aggregate; /** fetch data from the table: "current_collection_ownership_view" */ current_collection_ownership_view: Array; /** fetch data from the table: "current_collections_v2" */ current_collections_v2: Array; /** fetch data from the table: "current_collections_v2" using primary key columns */ current_collections_v2_by_pk?: Maybe; + /** fetch data from the table: "current_delegated_staking_pool_balances" */ + current_delegated_staking_pool_balances: Array; + /** fetch data from the table: "current_delegated_staking_pool_balances" using primary key columns */ + current_delegated_staking_pool_balances_by_pk?: Maybe; /** fetch data from the table: "current_delegator_balances" */ current_delegator_balances: Array; /** fetch data from the table: "current_delegator_balances" using primary key columns */ @@ -3257,6 +4011,8 @@ export type Query_Root = { delegated_staking_pools: Array; /** fetch data from the table: "delegated_staking_pools" using primary key columns */ delegated_staking_pools_by_pk?: Maybe; + /** fetch data from the table: "delegator_distinct_pool" */ + delegator_distinct_pool: Array; /** fetch data from the table: "events" */ events: Array; /** fetch data from the table: "events" using primary key columns */ @@ -3316,6 +4072,15 @@ export type Query_Root = { }; +export type Query_RootAddress_Events_SummaryArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Query_RootAddress_Version_From_EventsArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -3325,6 +4090,15 @@ export type Query_RootAddress_Version_From_EventsArgs = { }; +export type Query_RootAddress_Version_From_Events_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Query_RootAddress_Version_From_Move_ResourcesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -3334,6 +4108,20 @@ export type Query_RootAddress_Version_From_Move_ResourcesArgs = { }; +export type Query_RootBlock_Metadata_TransactionsArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Query_RootBlock_Metadata_Transactions_By_PkArgs = { + version: Scalars['bigint']; +}; + + export type Query_RootCoin_ActivitiesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -3343,6 +4131,15 @@ export type Query_RootCoin_ActivitiesArgs = { }; +export type Query_RootCoin_Activities_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Query_RootCoin_Activities_By_PkArgs = { event_account_address: Scalars['String']; event_creation_number: Scalars['bigint']; @@ -3455,6 +4252,24 @@ export type Query_RootCurrent_Collection_Datas_By_PkArgs = { }; +export type Query_RootCurrent_Collection_Ownership_V2_ViewArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Query_RootCurrent_Collection_Ownership_V2_View_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Query_RootCurrent_Collection_Ownership_ViewArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -3478,6 +4293,20 @@ export type Query_RootCurrent_Collections_V2_By_PkArgs = { }; +export type Query_RootCurrent_Delegated_Staking_Pool_BalancesArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Query_RootCurrent_Delegated_Staking_Pool_Balances_By_PkArgs = { + staking_pool_address: Scalars['String']; +}; + + export type Query_RootCurrent_Delegator_BalancesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -3491,6 +4320,7 @@ export type Query_RootCurrent_Delegator_Balances_By_PkArgs = { delegator_address: Scalars['String']; pool_address: Scalars['String']; pool_type: Scalars['String']; + table_handle: Scalars['String']; }; @@ -3648,6 +4478,15 @@ export type Query_RootDelegated_Staking_Pools_By_PkArgs = { }; +export type Query_RootDelegator_Distinct_PoolArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Query_RootEventsArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -3876,15 +4715,28 @@ export type Query_RootUser_Transactions_By_PkArgs = { export type Subscription_Root = { __typename?: 'subscription_root'; + /** fetch data from the table: "address_events_summary" */ + address_events_summary: Array; + /** fetch data from the table in a streaming manner : "address_events_summary" */ + address_events_summary_stream: Array; /** fetch data from the table: "address_version_from_events" */ address_version_from_events: Array; + /** fetch aggregated fields from the table: "address_version_from_events" */ + address_version_from_events_aggregate: Address_Version_From_Events_Aggregate; /** fetch data from the table in a streaming manner : "address_version_from_events" */ address_version_from_events_stream: Array; /** fetch data from the table: "address_version_from_move_resources" */ address_version_from_move_resources: Array; /** fetch data from the table in a streaming manner : "address_version_from_move_resources" */ address_version_from_move_resources_stream: Array; + /** fetch data from the table: "block_metadata_transactions" */ + block_metadata_transactions: Array; + /** fetch data from the table: "block_metadata_transactions" using primary key columns */ + block_metadata_transactions_by_pk?: Maybe; + /** fetch data from the table in a streaming manner : "block_metadata_transactions" */ + block_metadata_transactions_stream: Array; coin_activities: Array; + coin_activities_aggregate: Coin_Activities_Aggregate; /** fetch data from the table: "coin_activities" using primary key columns */ coin_activities_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "coin_activities" */ @@ -3931,6 +4783,12 @@ export type Subscription_Root = { current_collection_datas_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "current_collection_datas" */ current_collection_datas_stream: Array; + /** fetch data from the table: "current_collection_ownership_v2_view" */ + current_collection_ownership_v2_view: Array; + /** fetch aggregated fields from the table: "current_collection_ownership_v2_view" */ + current_collection_ownership_v2_view_aggregate: Current_Collection_Ownership_V2_View_Aggregate; + /** fetch data from the table in a streaming manner : "current_collection_ownership_v2_view" */ + current_collection_ownership_v2_view_stream: Array; /** fetch data from the table: "current_collection_ownership_view" */ current_collection_ownership_view: Array; /** fetch data from the table in a streaming manner : "current_collection_ownership_view" */ @@ -3941,6 +4799,12 @@ export type Subscription_Root = { current_collections_v2_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "current_collections_v2" */ current_collections_v2_stream: Array; + /** fetch data from the table: "current_delegated_staking_pool_balances" */ + current_delegated_staking_pool_balances: Array; + /** fetch data from the table: "current_delegated_staking_pool_balances" using primary key columns */ + current_delegated_staking_pool_balances_by_pk?: Maybe; + /** fetch data from the table in a streaming manner : "current_delegated_staking_pool_balances" */ + current_delegated_staking_pool_balances_stream: Array; /** fetch data from the table: "current_delegator_balances" */ current_delegator_balances: Array; /** fetch data from the table: "current_delegator_balances" using primary key columns */ @@ -4005,6 +4869,10 @@ export type Subscription_Root = { delegated_staking_pools_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "delegated_staking_pools" */ delegated_staking_pools_stream: Array; + /** fetch data from the table: "delegator_distinct_pool" */ + delegator_distinct_pool: Array; + /** fetch data from the table in a streaming manner : "delegator_distinct_pool" */ + delegator_distinct_pool_stream: Array; /** fetch data from the table: "events" */ events: Array; /** fetch data from the table: "events" using primary key columns */ @@ -4092,6 +4960,22 @@ export type Subscription_Root = { }; +export type Subscription_RootAddress_Events_SummaryArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootAddress_Events_Summary_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootAddress_Version_From_EventsArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -4101,6 +4985,15 @@ export type Subscription_RootAddress_Version_From_EventsArgs = { }; +export type Subscription_RootAddress_Version_From_Events_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Subscription_RootAddress_Version_From_Events_StreamArgs = { batch_size: Scalars['Int']; cursor: Array>; @@ -4124,6 +5017,27 @@ export type Subscription_RootAddress_Version_From_Move_Resources_StreamArgs = { }; +export type Subscription_RootBlock_Metadata_TransactionsArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootBlock_Metadata_Transactions_By_PkArgs = { + version: Scalars['bigint']; +}; + + +export type Subscription_RootBlock_Metadata_Transactions_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootCoin_ActivitiesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -4133,6 +5047,15 @@ export type Subscription_RootCoin_ActivitiesArgs = { }; +export type Subscription_RootCoin_Activities_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Subscription_RootCoin_Activities_By_PkArgs = { event_account_address: Scalars['String']; event_creation_number: Scalars['bigint']; @@ -4301,6 +5224,31 @@ export type Subscription_RootCurrent_Collection_Datas_StreamArgs = { }; +export type Subscription_RootCurrent_Collection_Ownership_V2_ViewArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootCurrent_Collection_Ownership_V2_View_AggregateArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootCurrent_Collection_Ownership_V2_View_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootCurrent_Collection_Ownership_ViewArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -4338,6 +5286,27 @@ export type Subscription_RootCurrent_Collections_V2_StreamArgs = { }; +export type Subscription_RootCurrent_Delegated_Staking_Pool_BalancesArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootCurrent_Delegated_Staking_Pool_Balances_By_PkArgs = { + staking_pool_address: Scalars['String']; +}; + + +export type Subscription_RootCurrent_Delegated_Staking_Pool_Balances_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootCurrent_Delegator_BalancesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -4351,6 +5320,7 @@ export type Subscription_RootCurrent_Delegator_Balances_By_PkArgs = { delegator_address: Scalars['String']; pool_address: Scalars['String']; pool_type: Scalars['String']; + table_handle: Scalars['String']; }; @@ -4578,6 +5548,22 @@ export type Subscription_RootDelegated_Staking_Pools_StreamArgs = { }; +export type Subscription_RootDelegator_Distinct_PoolArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootDelegator_Distinct_Pool_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootEventsArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; diff --git a/ecosystem/typescript/sdk/src/indexer/queries/getCollectionsWithOwnedTokens.graphql b/ecosystem/typescript/sdk/src/indexer/queries/getCollectionsWithOwnedTokens.graphql new file mode 100644 index 0000000000000..e919780286597 --- /dev/null +++ b/ecosystem/typescript/sdk/src/indexer/queries/getCollectionsWithOwnedTokens.graphql @@ -0,0 +1,26 @@ +query getCollectionsWithOwnedTokens( + $where_condition: current_collection_ownership_v2_view_bool_exp! + $offset: Int + $limit: Int +) { + current_collection_ownership_v2_view( + where: $where_condition + order_by: { last_transaction_version: desc } + offset: $offset + limit: $limit + ) { + current_collection { + creator_address + collection_name + token_standard + collection_id + description + table_handle_v1 + uri + total_minted_v2 + max_supply + } + distinct_tokens + last_transaction_version + } +} diff --git a/ecosystem/typescript/sdk/src/indexer/queries/getOwnedTokens.graphql b/ecosystem/typescript/sdk/src/indexer/queries/getOwnedTokens.graphql index 1920e431db6bc..1ac45b0dfbed7 100644 --- a/ecosystem/typescript/sdk/src/indexer/queries/getOwnedTokens.graphql +++ b/ecosystem/typescript/sdk/src/indexer/queries/getOwnedTokens.graphql @@ -1,10 +1,6 @@ #import "./CurrentTokenOwnershipFieldsFragment"; -query getOwnedTokens($address: String!, $offset: Int, $limit: Int) { - current_token_ownerships_v2( - where: { owner_address: { _eq: $address }, amount: { _gt: 0 } } - offset: $offset - limit: $limit - ) { +query getOwnedTokens($where_condition: current_token_ownerships_v2_bool_exp!, $offset: Int, $limit: Int) { + current_token_ownerships_v2(where: $where_condition, offset: $offset, limit: $limit) { ...CurrentTokenOwnershipFields } } diff --git a/ecosystem/typescript/sdk/src/indexer/queries/getTokenOwnedFromCollection.graphql b/ecosystem/typescript/sdk/src/indexer/queries/getTokenOwnedFromCollection.graphql index 978fa420c16dd..fae4705a36e92 100644 --- a/ecosystem/typescript/sdk/src/indexer/queries/getTokenOwnedFromCollection.graphql +++ b/ecosystem/typescript/sdk/src/indexer/queries/getTokenOwnedFromCollection.graphql @@ -1,14 +1,6 @@ #import "./CurrentTokenOwnershipFieldsFragment"; -query getTokenOwnedFromCollection($collection_id: String!, $owner_address: String!, $offset: Int, $limit: Int) { - current_token_ownerships_v2( - where: { - owner_address: { _eq: $owner_address } - current_token_data: { collection_id: { _eq: $collection_id } } - amount: { _gt: 0 } - } - offset: $offset - limit: $limit - ) { +query getTokenOwnedFromCollection($where_condition: current_token_ownerships_v2_bool_exp!, $offset: Int, $limit: Int) { + current_token_ownerships_v2(where: $where_condition, offset: $offset, limit: $limit) { ...CurrentTokenOwnershipFields } } diff --git a/ecosystem/typescript/sdk/src/plugins/coin_client.ts b/ecosystem/typescript/sdk/src/plugins/coin_client.ts index 8eff1e09d1337..4d45f86a4472a 100644 --- a/ecosystem/typescript/sdk/src/plugins/coin_client.ts +++ b/ecosystem/typescript/sdk/src/plugins/coin_client.ts @@ -1,10 +1,12 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 - import { AptosAccount, getAddressFromAccountOrAddress } from "../account/aptos_account"; import { AptosClient, OptionalTransactionArgs } from "../providers/aptos_client"; -import { MaybeHexString, APTOS_COIN } from "../utils"; +import { MaybeHexString, APTOS_COIN, NetworkToIndexerAPI, NodeAPIToNetwork } from "../utils"; import { TransactionBuilderRemoteABI } from "../transaction_builder"; +import { FungibleAssetClient } from "./fungible_asset_client"; +import { Provider } from "../providers"; +import { AccountAddress } from "../aptos_types"; /** * Class for working with the coin module, such as transferring coins and @@ -32,6 +34,11 @@ export class CoinClient { * this to true, the transaction will fail if the receiver account does not * exist on-chain. * + * The TS SDK supports fungible assets operations. If you want to use CoinClient + * with this feature, set the `coinType` to be the fungible asset metadata address. + * This option uses the `FungibleAssetClient` class and queries the + * fungible asset primary store. + * * @param from Account sending the coins * @param to Account to receive the coins * @param amount Number of coins to transfer @@ -45,8 +52,10 @@ export class CoinClient { to: AptosAccount | MaybeHexString, amount: number | bigint, extraArgs?: OptionalTransactionArgs & { - // The coin type to use, defaults to 0x1::aptos_coin::AptosCoin - coinType?: string; + // The coin type to use, defaults to 0x1::aptos_coin::AptosCoin. + // If you want to transfer a fungible asset, set this param to be the + // fungible asset address + coinType?: string | MaybeHexString; // If set, create the `receiver` account if it doesn't exist on-chain. // This is done by calling `0x1::aptos_account::transfer` instead, which // will create the account on-chain first if it doesn't exist before @@ -56,6 +65,23 @@ export class CoinClient { createReceiverIfMissing?: boolean; }, ): Promise { + if (extraArgs?.coinType && AccountAddress.isValid(extraArgs.coinType)) { + /* eslint-disable no-console */ + console.warn("to transfer a fungible asset, use `FungibleAssetClient()` class for better support"); + const provider = new Provider({ + fullnodeUrl: this.aptosClient.nodeUrl, + indexerUrl: NetworkToIndexerAPI[NodeAPIToNetwork[this.aptosClient.nodeUrl]] ?? this.aptosClient.nodeUrl, + }); + const fungibleAsset = new FungibleAssetClient(provider); + const txnHash = await fungibleAsset.transfer( + from, + extraArgs?.coinType, + getAddressFromAccountOrAddress(to), + amount, + ); + return txnHash; + } + // If none is explicitly given, use 0x1::aptos_coin::AptosCoin as the coin type. const coinTypeToTransfer = extraArgs?.coinType ?? APTOS_COIN; @@ -67,7 +93,7 @@ export class CoinClient { const toAddress = getAddressFromAccountOrAddress(to); const builder = new TransactionBuilderRemoteABI(this.aptosClient, { sender: from.address(), ...extraArgs }); - const rawTxn = await builder.build(func, [coinTypeToTransfer], [toAddress, amount]); + const rawTxn = await builder.build(func, [coinTypeToTransfer as string], [toAddress, amount]); const bcsTxn = AptosClient.generateBCSTransaction(from, rawTxn); const pendingTransaction = await this.aptosClient.submitSignedBCSTransaction(bcsTxn); @@ -78,6 +104,13 @@ export class CoinClient { * Get the balance of the account. By default it checks the balance of * 0x1::aptos_coin::AptosCoin, but you can specify a different coin type. * + * to use a different type, set the `coinType` to be the fungible asset type. + * + * The TS SDK supports fungible assets operations. If you want to use CoinClient + * with this feature, set the `coinType` to be the fungible asset metadata address. + * This option uses the FungibleAssetClient class and queries the + * fungible asset primary store. + * * @param account Account that you want to get the balance of. * @param extraArgs Extra args for checking the balance. * @returns Promise that resolves to the balance as a bigint. @@ -86,10 +119,27 @@ export class CoinClient { async checkBalance( account: AptosAccount | MaybeHexString, extraArgs?: { - // The coin type to use, defaults to 0x1::aptos_coin::AptosCoin + // The coin type to use, defaults to 0x1::aptos_coin::AptosCoin. + // If you want to check the balance of a fungible asset, set this param to be the + // fungible asset address coinType?: string; }, ): Promise { + if (extraArgs?.coinType && AccountAddress.isValid(extraArgs.coinType)) { + /* eslint-disable no-console */ + console.warn("to check balance of a fungible asset, use `FungibleAssetClient()` class for better support"); + const provider = new Provider({ + fullnodeUrl: this.aptosClient.nodeUrl, + indexerUrl: NetworkToIndexerAPI[NodeAPIToNetwork[this.aptosClient.nodeUrl]] ?? this.aptosClient.nodeUrl, + }); + const fungibleAsset = new FungibleAssetClient(provider); + const balance = await fungibleAsset.getPrimaryBalance( + getAddressFromAccountOrAddress(account), + extraArgs?.coinType, + ); + return balance; + } + const coinType = extraArgs?.coinType ?? APTOS_COIN; const typeTag = `0x1::coin::CoinStore<${coinType}>`; const address = getAddressFromAccountOrAddress(account); diff --git a/ecosystem/typescript/sdk/src/plugins/fungible_asset_client.ts b/ecosystem/typescript/sdk/src/plugins/fungible_asset_client.ts new file mode 100644 index 0000000000000..519413e6fc14d --- /dev/null +++ b/ecosystem/typescript/sdk/src/plugins/fungible_asset_client.ts @@ -0,0 +1,102 @@ +import { AptosAccount } from "../account"; +import { RawTransaction } from "../aptos_types"; +import * as Gen from "../generated/index"; +import { OptionalTransactionArgs, Provider } from "../providers"; +import { TransactionBuilderRemoteABI } from "../transaction_builder"; +import { MaybeHexString, HexString } from "../utils"; + +export class FungibleAssetClient { + provider: Provider; + + readonly assetType: string = "0x1::fungible_asset::Metadata"; + + /** + * Creates new FungibleAssetClient instance + * + * @param provider Provider instance + */ + constructor(provider: Provider) { + this.provider = provider; + } + + /** + * Transfer `amount` of fungible asset from sender's primary store to recipient's primary store. + * + * Use this method to transfer any fungible asset including fungible token. + * + * @param sender The sender account + * @param fungibleAssetMetadataAddress The fungible asset address. + * For example if you’re transferring USDT this would be the USDT address + * @param recipient Recipient address + * @param amount Number of assets to transfer + * @returns The hash of the transaction submitted to the API + */ + async transfer( + sender: AptosAccount, + fungibleAssetMetadataAddress: MaybeHexString, + recipient: MaybeHexString, + amount: number | bigint, + extraArgs?: OptionalTransactionArgs, + ): Promise { + const rawTransaction = await this.generateTransfer( + sender, + fungibleAssetMetadataAddress, + recipient, + amount, + extraArgs, + ); + const txnHash = await this.provider.signAndSubmitTransaction(sender, rawTransaction); + return txnHash; + } + + /** + * Get the balance of a fungible asset from the account's primary fungible store. + * + * @param account Account that you want to get the balance of. + * @param fungibleAssetMetadataAddress The fungible asset address you want to check the balance of + * @returns Promise that resolves to the balance + */ + async getPrimaryBalance(account: MaybeHexString, fungibleAssetMetadataAddress: MaybeHexString): Promise { + const payload: Gen.ViewRequest = { + function: "0x1::primary_fungible_store::balance", + type_arguments: [this.assetType], + arguments: [HexString.ensure(account).hex(), HexString.ensure(fungibleAssetMetadataAddress).hex()], + }; + const response = await this.provider.view(payload); + return BigInt((response as any)[0]); + } + + /** + * + * Generate a transfer transaction that can be used to sign and submit to transfer an asset amount + * from the sender primary fungible store to the recipient primary fungible store. + * + * This method can be used if you want/need to get the raw transaction so you can + * first simulate the transaction and then sign and submit it. + * + * @param sender The sender account + * @param fungibleAssetMetadataAddress The fungible asset address. + * For example if you’re transferring USDT this would be the USDT address + * @param recipient Recipient address + * @param amount Number of assets to transfer + * @returns Raw Transaction + */ + async generateTransfer( + sender: AptosAccount, + fungibleAssetMetadataAddress: MaybeHexString, + recipient: MaybeHexString, + amount: number | bigint, + extraArgs?: OptionalTransactionArgs, + ): Promise { + const builder = new TransactionBuilderRemoteABI(this.provider, { + sender: sender.address(), + ...extraArgs, + }); + const rawTxn = await builder.build( + "0x1::primary_fungible_store::transfer", + [this.assetType], + [HexString.ensure(fungibleAssetMetadataAddress).hex(), HexString.ensure(recipient).hex(), amount], + ); + return rawTxn; + } +} diff --git a/ecosystem/typescript/sdk/src/plugins/index.ts b/ecosystem/typescript/sdk/src/plugins/index.ts index 9e2df7900c51c..007f729f63e18 100644 --- a/ecosystem/typescript/sdk/src/plugins/index.ts +++ b/ecosystem/typescript/sdk/src/plugins/index.ts @@ -3,3 +3,4 @@ export * from "./aptos_token"; export * from "./coin_client"; export * from "./faucet_client"; export * from "./ans_client"; +export * from "./fungible_asset_client"; diff --git a/ecosystem/typescript/sdk/src/providers/aptos_client.ts b/ecosystem/typescript/sdk/src/providers/aptos_client.ts index 4e83deab9e4b4..67a50acdc577e 100644 --- a/ecosystem/typescript/sdk/src/providers/aptos_client.ts +++ b/ecosystem/typescript/sdk/src/providers/aptos_client.ts @@ -35,7 +35,7 @@ import { Uint64, AnyNumber, } from "../bcs"; -import { Ed25519PublicKey, MultiEd25519PublicKey } from "../aptos_types"; +import { Ed25519PublicKey, MultiEd25519PublicKey, RawTransaction } from "../aptos_types"; export interface OptionalTransactionArgs { maxGasAmount?: Uint64; @@ -742,6 +742,19 @@ export class AptosClient { // <:!:generateSignSubmitTransactionInner } + /** + * Helper for signing and submitting a transaction. + * + * @param sender AptosAccount of transaction sender. + * @param transaction A generated Raw transaction payload. + * @returns The transaction response from the API. + */ + async signAndSubmitTransaction(sender: AptosAccount, transaction: RawTransaction): Promise { + const bcsTxn = AptosClient.generateBCSTransaction(sender, transaction); + const pendingTransaction = await this.submitSignedBCSTransaction(bcsTxn); + return pendingTransaction.hash; + } + /** * Publishes a move package. `packageMetadata` and `modules` can be generated with command * `aptos move compile --save-metadata [ --included-artifacts=<...> ]`. diff --git a/ecosystem/typescript/sdk/src/providers/indexer.ts b/ecosystem/typescript/sdk/src/providers/indexer.ts index 7186dc1fa66f2..834ca38a2b573 100644 --- a/ecosystem/typescript/sdk/src/providers/indexer.ts +++ b/ecosystem/typescript/sdk/src/providers/indexer.ts @@ -20,6 +20,7 @@ import { GetOwnedTokensQuery, GetTokenOwnedFromCollectionQuery, GetCollectionDataQuery, + GetCollectionsWithOwnedTokensQuery, } from "../indexer/generated/operations"; import { GetAccountTokensCount, @@ -39,6 +40,7 @@ import { GetOwnedTokens, GetTokenOwnedFromCollection, GetCollectionData, + GetCollectionsWithOwnedTokens, } from "../indexer/generated/queries"; /** @@ -324,21 +326,42 @@ export class IndexerClient { /** * Queries account's current owned tokens. * This query returns all tokens (v1 and v2 standards) an account owns, including NFTs, fungible, soulbound, etc. - * + * If you want to get only the token from a specific standrd, you can pass an optional tokenStandard param + * @example An example of how to pass a specific token standard + * ``` + * { + * tokenStandard:"v2" + * } + * ``` * @param ownerAddress The token owner address we want to get the tokens for * @returns GetOwnedTokensQuery response type */ async getOwnedTokens( ownerAddress: MaybeHexString, extraArgs?: { + tokenStandard?: TokenStandard; options?: PaginationArgs; }, ): Promise { const address = HexString.ensure(ownerAddress).hex(); IndexerClient.validateAddress(address); + + const whereCondition: any = { + owner_address: { _eq: address }, + amount: { _gt: 0 }, + }; + + if (extraArgs?.tokenStandard) { + whereCondition.token_standard = { _eq: extraArgs?.tokenStandard }; + } + const graphqlQuery = { query: GetOwnedTokens, - variables: { address, offset: extraArgs?.options?.offset, limit: extraArgs?.options?.limit }, + variables: { + where_condition: whereCondition, + offset: extraArgs?.options?.offset, + limit: extraArgs?.options?.limit, + }, }; return this.queryIndexer(graphqlQuery); } @@ -364,11 +387,20 @@ export class IndexerClient { const collectionHexAddress = HexString.ensure(collectionAddress).hex(); IndexerClient.validateAddress(collectionHexAddress); + const whereCondition: any = { + owner_address: { _eq: ownerHexAddress }, + current_token_data: { collection_id: { _eq: collectionHexAddress } }, + amount: { _gt: 0 }, + }; + + if (extraArgs?.tokenStandard) { + whereCondition.token_standard = { _eq: extraArgs?.tokenStandard }; + } + const graphqlQuery = { query: GetTokenOwnedFromCollection, variables: { - collection_id: collectionHexAddress, - owner_address: ownerHexAddress, + where_condition: whereCondition, offset: extraArgs?.options?.offset, limit: extraArgs?.options?.limit, }, @@ -457,4 +489,39 @@ export class IndexerClient { return (await this.getCollectionData(creatorAddress, collectionName, extraArgs)).current_collections_v2[0] .collection_id; } + + /** + * Queries for all collections that an account has tokens for. + * + * @param ownerAddress the account address that owns the tokens + * @returns GetCollectionsWithOwnedTokensQuery response type + */ + async getCollectionsWithOwnedTokens( + ownerAddress: MaybeHexString, + extraArgs?: { + tokenStandard?: TokenStandard; + options?: PaginationArgs; + }, + ): Promise { + const ownerHexAddress = HexString.ensure(ownerAddress).hex(); + IndexerClient.validateAddress(ownerHexAddress); + + const whereCondition: any = { + owner_address: { _eq: ownerHexAddress }, + }; + + if (extraArgs?.tokenStandard) { + whereCondition.current_collection = { token_standard: { _eq: extraArgs?.tokenStandard } }; + } + + const graphqlQuery = { + query: GetCollectionsWithOwnedTokens, + variables: { + where_condition: whereCondition, + offset: extraArgs?.options?.offset, + limit: extraArgs?.options?.limit, + }, + }; + return this.queryIndexer(graphqlQuery); + } } diff --git a/ecosystem/typescript/sdk/src/tests/e2e/ans_client.test.ts b/ecosystem/typescript/sdk/src/tests/e2e/ans_client.test.ts index 641058e51562b..15945e78e2021 100644 --- a/ecosystem/typescript/sdk/src/tests/e2e/ans_client.test.ts +++ b/ecosystem/typescript/sdk/src/tests/e2e/ans_client.test.ts @@ -1,11 +1,12 @@ import { AptosAccount } from "../../account"; +import { AccountAddress } from "../../aptos_types"; import { AnsClient } from "../../plugins/ans_client"; import { Provider } from "../../providers"; import { HexString, Network } from "../../utils"; import { ANS_OWNER_ADDRESS, ANS_OWNER_PK, getFaucetClient, longTestTimeout, NODE_URL } from "../unit/test_helper.test"; const alice = new AptosAccount(); -const ACCOUNT_ADDRESS = alice.address().hex(); +const ACCOUNT_ADDRESS = AccountAddress.standardizeAddress(alice.address().hex()); // generate random name so we can run the test against local tesnet without the need to re-run it each time. // This will produce a string anywhere between zero and 12 characters long, usually 11 characters, only lower-case and numbers const DOMAIN_NAME = Math.random().toString(36).slice(2); @@ -82,7 +83,8 @@ describe("ANS", () => { const ans = new AnsClient(provider, ANS_OWNER_ADDRESS); const address = await ans.getAddressByName(DOMAIN_NAME); - expect(address).toEqual(ACCOUNT_ADDRESS); + const standardizeAddress = AccountAddress.standardizeAddress(address as string); + expect(standardizeAddress).toEqual(ACCOUNT_ADDRESS); }, longTestTimeout, ); @@ -94,7 +96,8 @@ describe("ANS", () => { const ans = new AnsClient(provider, ANS_OWNER_ADDRESS); const address = await ans.getAddressByName(`${DOMAIN_NAME}.apt`); - expect(address).toEqual(ACCOUNT_ADDRESS); + const standardizeAddress = AccountAddress.standardizeAddress(address as string); + expect(standardizeAddress).toEqual(ACCOUNT_ADDRESS); }, longTestTimeout, ); diff --git a/ecosystem/typescript/sdk/src/tests/e2e/fungible_asset_client.test.ts b/ecosystem/typescript/sdk/src/tests/e2e/fungible_asset_client.test.ts new file mode 100644 index 0000000000000..541a23f254ece --- /dev/null +++ b/ecosystem/typescript/sdk/src/tests/e2e/fungible_asset_client.test.ts @@ -0,0 +1,128 @@ +import * as Gen from "../../generated/index"; +import { AptosAccount } from "../../account"; +import { AptosClient, Provider } from "../../providers"; +import { TxnBuilderTypes } from "../../transaction_builder"; +import { HexString } from "../../utils"; +import { getFaucetClient, longTestTimeout, PROVIDER_LOCAL_NETWORK_CONFIG } from "../unit/test_helper.test"; +import { CoinClient, FungibleAssetClient } from "../../plugins"; +import { RawTransaction } from "../../aptos_types"; + +const provider = new Provider(PROVIDER_LOCAL_NETWORK_CONFIG); +const faucetClient = getFaucetClient(); +const publisher = new AptosAccount( + new HexString("0x1c2b344cdc1ca1cc33d5810cf93278fd3c2a8e8ba9cd78240c1193766b06a724").toUint8Array(), +); +const alice = new AptosAccount(); +const bob = new AptosAccount(); +let fungibleAssetMetadataAddress = ""; +/** + * Since there is no ready-to-use fungible asset contract/module on an aptos framework address + * we pre compiled ../../../aptos-move/move-examples/fungible_token contract and publish + * it here to local testnet so we can interact with it to mint a fungible asset and then + * test FungibleAssetClient class + */ +describe("fungible asset", () => { + /** + * Publish the fungible_token module + * Mint 5 amount of fungible assets to Alice account + * Get the asset address and store it to a later use + */ + beforeAll(async () => { + await faucetClient.fundAccount(publisher.address(), 100_000_000); + await faucetClient.fundAccount(alice.address(), 100_000_000); + await faucetClient.fundAccount(bob.address(), 100_000_000); + + // Publish contract + const txnHash = await provider.publishPackage( + publisher, + new HexString( + // eslint-disable-next-line max-len + "0d46756e6769626c65546f6b656e0100000000000000004045334431344231344134414439413146423742463233424534344546313232313739303138453736304544413330463346384344373435423338383138314237b6011f8b08000000000002ff858fbb0ec3200c4577be02b12769a5ae1dba64edd22d8a22024e94260504f42155fdf76240a85b652ff6b5cfb53bc3c5ca67e889e237a047cadabb9a9771838b5e4131f200eb16ad50d9d52118211d97d28273e07ac28dd76e986c587e6abbc6b1d79ee5be47c6a0c72b08ef92766064ca0e49c6f68054090694042516049f10d0fe70df74d3826f385ed74dc862da44b3aad48c7ed27a7ce15cdcff12e23d553e17295f4b33167a1e01000001166d616e616765645f66756e6769626c655f746f6b656eee0e1f8b08000000000002ffed59eb6edb3614fe9fa7e052c0930b217686b6ebd4a668bba55b81a5019a0cc5500c0a2d51361749f4482a4e1af8dd7778d18d92eca4698a025b7e04327578782edfb95193c904bdca11b9c4d932252862d98ce6349fa3a4c8e774064b580822055a51b940929d931c56eab77a650fbdcd915c5051f2f111b6b46cf637892482578520316cdd99c089199138c65211a188132c49cdb1c8a914c0e01cfbce31626f27637191d6c4a15e0f820ce7784ee2b0bd8eae7710fcc1c1082f251361c27146568c9f074145a9d50b82eb1392263e3aa2b97c4fe0e194e35c2484eb1faf0b9eeb87232bb78fded8fdafd4f6f5b3e1838c01aa038ef5cf4d1b969c66985fd5ba08c938a93708190701e19c71674dd0794e3a8b928337e1f842264f7d74a27fae1d22b69494e5ae4cda86a1915f048135f5b5f1579883c0b1a1f1ad0fed0e4148bcdec22c62690a4f702a3c9bcd09bd0486f58b673b9a8582cb719e5e3980ac21c456a0358a708e327c0e085ee07c4e002a6a73c47221d1e1bbe3d3f0f8c3bbc3f7012a9e3c420768df3237ef5f9d9c1c9e86277f1ebd3efe3d4017703ce3cf8ba72f8070b67b7a7872ba6bc91f7ce444b08247249c73562cc38c6433c23dfd03a8079d6fbcfeab221bff55a9f51b4b63c449227418b05c729642181194010ac14d3e92168508e7319a010a5564b2c40d4ea32cb8ba80503b32b1d002285a40cc9e932b1b11ea4f9d11c2e14185f9ea5579aa79dd8c848a44c9625e97b1a15fad6b9fbd8530a638a59f48c3552619286534a8b5b2ca004601d00a41f291a189720fc72064804606d8e386f029a85423456331b0d8063728a87bb35d4944930a29aaddf1b3161303cb0dfb4d2271b70e40d6ab08d49f16df6f2d0d4816131171aa837077dcdeb1dffee928dd7e69c238087296136fdc7bf242ca65309968a0a67826f620e14f6aa6cdd31d4be9505100631a1760a6513713783d3a6f14b96bea0db6a89df565f4d6fc1c95abe7fe345c65abf2b55e0d498e81a4517f745c7a8e1d5a06bcb50ada464e1234b8f4d1e4a17e440f27dd9dcdec6648c55536834ce310ffa0dfc52402c552d1cb6ac08e09bea0a0dd1efcdbbd85f977c7bd8687dcf1b36909548a9aa84c33a9f260992d719ab2950943c6d582e900744269db68af85e232eb017cdd0e604e20c928d796349ee3312720ca14b8895549b3855533e16e62d7a4dbc2b2ccb9b6e486268702f3b224554ccd9b0dec3276a14a7b1bcda3fe03dadeef2d45d79513fc96e27e6dd1750d8c665979f0f18292555d3fdf13093bb4cf711c436916aa36ea026a0e76c3452eb0fc5ed8ec1da3d582d8b6d5b695f0149365caae486c60b32c66298d74619a13a84b56676f1cd846ee79d911bef886aa93daaa150e4bb3d46eb7e9cb3acdbe775cfbb2dd463be9dc6df5bc919be1d1a8167cdc574f4a61ecf1c0cc0a549bd36b293076bb0bd5b3a8394439db3480e079a7cf68b9904077a57b488d3ea7b580692363452e757b08a8644109a831c2d13f0585c70130778d0ec66e63c5094c77542937e1422e18877e290e670cdafb950a05612baa61dd8147b88244a8770f952b989a0ade2957975448e149d6cf37c13d49489b6dd42ffd5e1dd1c6900d862e1f083026a0c753f3a4ce63433cdbb9a1d21586426cd937f0705a35ca9b3181c0df4c07d2992e21219ca97241955acf5042491a0f60a7a4eae027e12cab40d342500b5b5f1e4e4ee918dd04452dd33aae073dee062ac5e1ebc1d54557a55905af368c1afab530d5826d03576abae95c857c5ede51d56d1b6eee152a8d86e5663029377c1d88b8aed4a72b52af94c371df90cfde704260ea840b011c458a040986a854570439ab230631d54e46845e90fe79bae3c144330e2dd76e0d31ebf7583bbe70b0dfc98956dbed7e54651cfcf6091a8224c5732720ab58e405e9baf28f3c1976e61d3c59e4fffbf2fe7c99c0fcd8e3cc0f9092638e57f752a3955757f680cddd5d2be54223df76e47fb43ebb8e2e4d79a33a6a13b1ebef5f4ca7776feeb69d64c7dbade62bc18e87bff948fe1a6d52a709bf55b3fd5a6ba59d4ab3ac90eaee4bddcc104ef2889433b8beaa81e7b3d26e677b75bbae86ee0bb81d867522f45caef7d85b0a6a10e3000526778d21c386e629cd75be1f9a9cea7153edaa0152ad1b6374267943009961d40b909bc24771e7f23baf1c76a908b52066b2f5adaef510cc124fbf1fc3259dfebc039f8208cfa8106ab08e494e61d6ae3f658c1beeb55acf5336c3e9f33eb95e54623893ffc840c5b96751970b5e79bb76809c0b01fb09430f444018ceb0a0111404b6f2da37e4ccb5facd83aff91dc0f2722fa4cd6ae38ea3c7a2fd5b31e660d27ae3cbe9658223d2b884d4d3aeddeca3fde9d477cfebbb6fd990284a340c4534380e43f078ce296528a3830323c5a36654b77b984adc6149b74941852def4372f8e871835d350e0f1dedb74dad2cf919367178342de2a327cdbbe34e5f770b9b7c7707a3fcd80c4635e70d1be4a7693ba16e0b366bc21aa536fa1e7c24974b8864757182690ae5c0c39007247c8e8a09504f2f1f4fa7d37d1fa52cc2fa86f100a94fd06ef07673cc9618d636532255ebdf4a689b9b3d25dac6905defac77fe05f79b78dcf720000000000400000000000000000000000000000000000000000000000000000000000000010e4170746f734672616d65776f726b00000000000000000000000000000000000000000000000000000000000000010b4170746f735374646c696200000000000000000000000000000000000000000000000000000000000000010a4d6f76655374646c69620000000000000000000000000000000000000000000000000000000000000004114170746f73546f6b656e4f626a6563747300", + ).toUint8Array(), + [ + new TxnBuilderTypes.Module( + new HexString( + // eslint-disable-next-line max-len + "a11ceb0b060000000c010016021634034aaa0104f40116058a028c03079605fb0508910b6006f10bb20210a30ea1010ac40f0c0cd00fef040dbf14060000010101020103010401050106010702080209020a000b0800020d00000310070100010211080002190600021b0600021d0600021e080007270700032c0200092d0b00042e07010000000c000100000e020100000f030100001201040000130501000014060100001507010000160301000017060800061f050a0003200c0d010801210e0e0003220f0a01080523101101080224130101080225150101080226170101080728191a000a291b1900032a1c0a00032b0a1d0108042f0120010008302122000a3123220005322501000233262700023426280002352629000336262a0002142c080002372e0101080238300801080a0b0c0b0d0b0e120f121012140b151f15241e121f1203060c05030003060c05080102060c05010b0201080301060c03060c030504060c050503010801050b020108030b020108030608060b02010807060c0105010803020b02010900050101010301060b0201090002050b02010900010b02010807010807030608060b0201090003050b020108030b02010803060c0b02010807060805030608050b020109000801050b020108030b02010803060c0608050b02010807030608050b0201090001030508080808010a02010808020608080608080206050a02010b02010900080809080608080608090c08040808080501080a010b0b01090006060c08080308080b0b01080a080801080906060c0808080808080b0b01080a08080104070608090b0b010408080808020808080801060809010804010806010805010c060b020108030b020108030801060800060c0b020108070206080403060b020108030b020108030b02010807060c0b02010807060805040608050b020109000b0201090003050b020108030b020108030b02010807060c060805030608050b0201090003166d616e616765645f66756e6769626c655f746f6b656e056572726f720e66756e6769626c655f6173736574066f626a656374066f7074696f6e167072696d6172795f66756e6769626c655f73746f7265067369676e657206737472696e670a636f6c6c656374696f6e07726f79616c747905746f6b656e144d616e6167656446756e6769626c654173736574046275726e0d46756e6769626c654173736574076465706f7369740e667265657a655f6163636f756e74064f626a656374084d657461646174610c6765745f6d657461646174610b696e69745f6d6f64756c65046d696e74087472616e7366657210756e667265657a655f6163636f756e74087769746864726177086d696e745f726566074d696e745265660c7472616e736665725f7265660b5472616e73666572526566086275726e5f726566074275726e5265660d46756e6769626c6553746f72650a616464726573735f6f660869735f6f776e6572117065726d697373696f6e5f64656e6965640e6f626a6563745f616464726573731b656e737572655f7072696d6172795f73746f72655f657869737473096275726e5f66726f6d106465706f7369745f776974685f7265660f7365745f66726f7a656e5f666c616706537472696e670475746638116372656174655f746f6b656e5f73656564156372656174655f6f626a6563745f6164647265737311616464726573735f746f5f6f626a6563740e436f6e7374727563746f7252656607526f79616c7479064f7074696f6e046e6f6e65176372656174655f66697865645f636f6c6c656374696f6e126372656174655f6e616d65645f746f6b656e2b6372656174655f7072696d6172795f73746f72655f656e61626c65645f66756e6769626c655f61737365741167656e65726174655f6d696e745f7265661167656e65726174655f6275726e5f7265661567656e65726174655f7472616e736665725f7265660f67656e65726174655f7369676e6572117472616e736665725f776974685f7265661177697468647261775f776974685f726566d6921a4cfe909980a4012c004e13e5ae6a9e535dbe177b52f24f7fc64b36cb52000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000040a02050454455354030801000000000000000a0215147465737420636f6c6c656374696f6e206e616d650a02100f7465737420746f6b656e206e616d650520d6921a4cfe909980a4012c004e13e5ae6a9e535dbe177b52f24f7fc64b36cb520a021c1b7465737420636f6c6c656374696f6e206465736372697074696f6e0a02201f687474703a2f2f6170746f736c6162732e636f6d2f636f6c6c656374696f6e0a0217167465737420746f6b656e206465736372697074696f6e0a021b1a687474703a2f2f6170746f736c6162732e636f6d2f746f6b656e0a021918746573742066756e6769626c65206173736574206e616d650a022120687474703a2f2f6170746f736c6162732e636f6d2f66617669636f6e2e69636f0a021615687474703a2f2f6170746f736c6162732e636f6d2f126170746f733a3a6d657461646174615f76318c010101000000000000000a454e4f545f4f574e4552344f6e6c792066756e6769626c65206173736574206d65746164617461206f776e65722063616e206d616b65206368616e6765732e01144d616e6167656446756e6769626c654173736574010301183078313a3a6f626a6563743a3a4f626a65637447726f7570010c6765745f6d657461646174610101000002031808041a08051c08060001040100091d11030c030b000a030c040c070a040b0711093800040c050f0701110b270e0438012b0010000c050b010b0338020c060b050b060b023803020101000100141d11030c030b000a030c040c050a040b0511093800040c050f0701110b270e0438012b0010010c070b010b0338020c060b070b060b023804020201040100161d11030c020b000a020c030c040a030b0411093800040c050f0701110b270e0338012b0010010c050b010b0238020c060b050b060838050203010000180f070211110c01070311110c0207040c000e000e010e0211121113380602040000001e3b070211110c03070311110c070a00070511110601000000000000000a033807070611111116010b000b03070711110b0738070708111111170c010e010c040a04380807091111070011113102070a1111070b111111180a0411190c060a04111a0c020a04111b0c080b04111c0c050e050b060b080b0212002d000205010401002b2211030c030b000a030c040c070a040b0711093800040c050f0701110b270e0438012b000c060b020b0338020c080a0610020b01111d0c050b0610010b080b0538040206010401002d2211030c040b000a040c050c070a050b0711093800040c050f0701110b270e0538012b0010010c090b010a0438020c060b020b0438020c080b090b060b080b033809020701040100161d11030c020b000a020c030c040a030b0411093800040c050f0701110b270e0338012b0010010c050b010b0238020c060b050b060938050208010001002f1d11030c030b000a030c040c060a040b0611093800040c050f0701110b270e0438012b0010010c070b020b0338020c050b070b050b01380a0200020001000000", + ).toUint8Array(), + ), + ], + ); + await provider.waitForTransaction(txnHash); + + // Mint 5 fungible assets to Alice + const payload: Gen.EntryFunctionPayload = { + function: `${publisher.address().hex()}::managed_fungible_token::mint`, + type_arguments: [], + arguments: [5, alice.address().hex()], + }; + const rawTxn = await provider.generateTransaction(publisher.address(), payload); + const bcsTxn = AptosClient.generateBCSTransaction(publisher, rawTxn); + const transactionRes = await provider.submitSignedBCSTransaction(bcsTxn); + await provider.waitForTransaction(transactionRes.hash); + + // Get the asset address + const viewPayload: Gen.ViewRequest = { + function: `${publisher.address().hex()}::managed_fungible_token::get_metadata`, + type_arguments: [], + arguments: [], + }; + const metadata = await provider.view(viewPayload); + fungibleAssetMetadataAddress = (metadata as any)[0].inner; + }, longTestTimeout); + + /** + * Test `transferFromPrimaryFungibleStore` and `balance` functions in FungibleAssetClient class + */ + test( + "it trasfers amount of fungible asset and gets the correct balance", + async () => { + const fungibleAsset = new FungibleAssetClient(provider); + // Alice has 5 amounts of the fungible asset + const aliceInitialBalance = await fungibleAsset.getPrimaryBalance(alice.address(), fungibleAssetMetadataAddress); + expect(aliceInitialBalance).toEqual(BigInt(5)); + + // Alice transfers 2 amounts of the fungible asset to Bob + const transactionHash = await fungibleAsset.transfer(alice, fungibleAssetMetadataAddress, bob.address(), 2); + await provider.waitForTransaction(transactionHash); + + // Alice has 3 amounts of the fungible asset + const aliceCurrentBalance = await fungibleAsset.getPrimaryBalance(alice.address(), fungibleAssetMetadataAddress); + expect(aliceCurrentBalance).toEqual(BigInt(3)); + + // Bob has 2 amounts of the fungible asset + const bobBalance = await fungibleAsset.getPrimaryBalance(bob.address(), fungibleAssetMetadataAddress); + expect(bobBalance).toEqual(BigInt(2)); + }, + longTestTimeout, + ); + + /** + * Test `transferFromPrimaryFungibleStore` and `checkBalance` functions in `CoinClient` class + */ + test("coin client supports fungible assets operations", async () => { + const coinClient = new CoinClient(provider.aptosClient); + // Test `transferFromPrimaryFungibleStore` and `checkBalance` + + // Alice transfers 2 more amount of fungible asset to Bob + await provider.waitForTransaction( + await coinClient.transfer(alice, bob, 2, { + coinType: fungibleAssetMetadataAddress, + }), + { checkSuccess: true }, + ); + // Bob balance is now 4 + expect( + await coinClient.checkBalance(bob, { + coinType: fungibleAssetMetadataAddress, + }), + ).toEqual(BigInt(4)); + }); + + test("it generates and returns a transferFromPrimaryFungibleStore raw transaction", async () => { + const fungibleAsset = new FungibleAssetClient(provider); + const rawTxn = await fungibleAsset.generateTransfer(alice, fungibleAssetMetadataAddress, bob.address(), 2); + expect(rawTxn instanceof RawTransaction).toBeTruthy(); + expect(rawTxn.sender.toHexString()).toEqual(alice.address().hex()); + }); +}); diff --git a/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts b/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts index a448492f8f94a..64d6399951363 100644 --- a/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts +++ b/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts @@ -5,7 +5,7 @@ import { FaucetClient } from "../../plugins/faucet_client"; import { IndexerClient } from "../../providers/indexer"; import { TokenClient } from "../../plugins/token_client"; import { FAUCET_AUTH_TOKEN, longTestTimeout } from "../unit/test_helper.test"; -import { Network, NetworkToIndexerAPI, NetworkToNodeAPI, sleep } from "../../utils"; +import { Network, NetworkToIndexerAPI, sleep } from "../../utils"; import { Provider } from "../../providers"; import { AptosToken } from "../../plugins"; @@ -25,19 +25,22 @@ const indexerClient = new IndexerClient(NetworkToIndexerAPI[Network.TESTNET]); describe("Indexer", () => { it("should throw an error when account address is not valid", async () => { + const address1 = "702ca08576f66393140967fef983bb6bf160dafeb73de9c4ddac4d2dc"; expect(async () => { - await indexerClient.getAccountNFTs("702ca08576f66393140967fef983bb6bf160dafeb73de9c4ddac4d2dc"); - }).rejects.toThrow("Address needs to be 66 chars long."); + await indexerClient.getAccountNFTs(address1); + }).rejects.toThrow(`${address1} is less than 66 chars long.`); + const address2 = "0x702ca08576f66393140967fef983bb6bf160dafeb73de9c4ddac4d2dc"; expect(async () => { - await indexerClient.getAccountNFTs("0x702ca08576f66393140967fef983bb6bf160dafeb73de9c4ddac4d2dc"); - }).rejects.toThrow("Address needs to be 66 chars long."); + await indexerClient.getAccountNFTs(address2); + }).rejects.toThrow(`${address2} is less than 66 chars long.`); }); it("should not throw an error when account address is missing 0x", async () => { + const address = "790a34c702ca08576f66393140967fef983bb6bf160dafeb73de9c4ddac4d2dc"; expect(async () => { - await indexerClient.getAccountNFTs("790a34c702ca08576f66393140967fef983bb6bf160dafeb73de9c4ddac4d2dc"); - }).not.toThrow("Address needs to be 66 chars long."); + await indexerClient.getAccountNFTs(address); + }).not.toThrow(); }); beforeAll(async () => { @@ -232,6 +235,11 @@ describe("Indexer", () => { expect(tokens.current_token_ownerships_v2).toHaveLength(2); }); + it("gets account current tokens from a specified token standard", async () => { + const tokens = await indexerClient.getOwnedTokens(alice.address().hex(), { tokenStandard: "v2" }); + expect(tokens.current_token_ownerships_v2).toHaveLength(1); + }); + it("gets the collection data", async () => { const collectionData = await indexerClient.getCollectionData(alice.address().hex(), collectionName); expect(collectionData.current_collections_v2).toHaveLength(1); @@ -281,5 +289,25 @@ describe("Indexer", () => { }, longTestTimeout, ); + + it( + "queries for all collections that an account has tokens for", + async () => { + const collections = await indexerClient.getCollectionsWithOwnedTokens(alice.address().hex()); + expect(collections.current_collection_ownership_v2_view.length).toEqual(2); + }, + longTestTimeout, + ); + + it( + "queries for all v2 collections that an account has tokens for", + async () => { + const collections = await indexerClient.getCollectionsWithOwnedTokens(alice.address().hex(), { + tokenStandard: "v2", + }); + expect(collections.current_collection_ownership_v2_view.length).toEqual(1); + }, + longTestTimeout, + ); }); }); diff --git a/ecosystem/typescript/sdk/src/tests/e2e/provider.test.ts b/ecosystem/typescript/sdk/src/tests/e2e/provider.test.ts index 0c77cd67dc4fe..f3064e0be12d5 100644 --- a/ecosystem/typescript/sdk/src/tests/e2e/provider.test.ts +++ b/ecosystem/typescript/sdk/src/tests/e2e/provider.test.ts @@ -38,67 +38,13 @@ describe("Provider", () => { }).toThrow("network is not provided"); }); - describe("requests", () => { - beforeAll(async () => { - await faucetClient.fundAccount(alice.address(), 100000000); - }); - - describe("query full node", () => { - it("gets genesis account from fullnode", async () => { - const provider = new Provider(Network.TESTNET); - const genesisAccount = await provider.getAccount("0x1"); - expect(genesisAccount.authentication_key.length).toBe(66); - expect(genesisAccount.sequence_number).not.toBeNull(); - }); - }); - - describe("query indexer", () => { - const aptosClient = new AptosClient("https://fullnode.testnet.aptoslabs.com"); - const tokenClient = new TokenClient(aptosClient); - const collectionName = "AliceCollection"; - const tokenName = "Alice Token"; - - beforeAll(async () => { - // Create collection and token on Alice's account - await aptosClient.waitForTransaction( - await tokenClient.createCollection(alice, collectionName, "Alice's simple collection", "https://aptos.dev"), - { checkSuccess: true }, - ); - - await aptosClient.waitForTransaction( - await tokenClient.createTokenWithMutabilityConfig( - alice, - collectionName, - tokenName, - "Alice's simple token", - 1, - "https://aptos.dev/img/nyan.jpeg", - 1000, - alice.address(), - 1, - 0, - ["TOKEN_BURNABLE_BY_OWNER"], - [bcsSerializeBool(true)], - ["bool"], - [false, false, false, false, true], - ), - { checkSuccess: true }, - ); - }, longTestTimeout); - - jest.retryTimes(5); - beforeEach(async () => { - await sleep(1000); - }); + it("has AptosClient method defined", () => { + const provider = new Provider(Network.TESTNET); + expect(provider.getAccount).toBeDefined(); + }); - it("gets account NFTs from indexer", async () => { - let provider = new Provider(Network.TESTNET); - const accountNFTs = await provider.getAccountNFTs(alice.address().hex(), { limit: 20, offset: 0 }); - expect(accountNFTs.current_token_ownerships).toHaveLength(1); - expect(accountNFTs.current_token_ownerships[0]).toHaveProperty("current_token_data"); - expect(accountNFTs.current_token_ownerships[0]).toHaveProperty("current_collection_data"); - expect(accountNFTs.current_token_ownerships[0].current_token_data?.name).toBe("Alice Token"); - }); - }); + it("has IndexerClient method defined", () => { + const provider = new Provider(Network.TESTNET); + expect(provider.getAccountNFTs).toBeDefined(); }); }); diff --git a/ecosystem/typescript/sdk/src/tests/unit/account_address.test.ts b/ecosystem/typescript/sdk/src/tests/unit/account_address.test.ts index 687c531f8212b..3284f7ec1f736 100644 --- a/ecosystem/typescript/sdk/src/tests/unit/account_address.test.ts +++ b/ecosystem/typescript/sdk/src/tests/unit/account_address.test.ts @@ -77,4 +77,12 @@ describe("AccountAddress", () => { it("not isValid too long with 0x", async () => { expect(AccountAddress.isValid(`0x00${ADDRESS_LONG}`)).toBe(false); }); + + it.only("standardize address", () => { + const validAddress = "0x08743724fea179336994e9a66cff08676e3be6f8b227450cb3148288ba20a2e5"; + expect(AccountAddress.standardizeAddress(validAddress)).toBe(validAddress); + + const invalidAddress = "0x8743724fea179336994e9a66cff08676e3be6f8b227450cb3148288ba20a2e5"; + expect(AccountAddress.standardizeAddress(invalidAddress)).toBe(validAddress); + }); }); diff --git a/ecosystem/typescript/sdk/src/utils/api-endpoints.ts b/ecosystem/typescript/sdk/src/utils/api-endpoints.ts index 7645b82783f60..3b317b3c98fb5 100644 --- a/ecosystem/typescript/sdk/src/utils/api-endpoints.ts +++ b/ecosystem/typescript/sdk/src/utils/api-endpoints.ts @@ -10,6 +10,12 @@ export const NetworkToNodeAPI: Record = { devnet: "https://fullnode.devnet.aptoslabs.com/v1", }; +export const NodeAPIToNetwork: Record = { + "https://fullnode.mainnet.aptoslabs.com/v1": "mainnet", + "https://fullnode.testnet.aptoslabs.com/v1": "testnet", + "https://fullnode.devnet.aptoslabs.com/v1": "devnet", +}; + export enum Network { MAINNET = "mainnet", TESTNET = "testnet", @@ -18,5 +24,5 @@ export enum Network { export interface CustomEndpoints { fullnodeUrl: string; - indexerUrl: string; + indexerUrl?: string; } diff --git a/ecosystem/typescript/sdk/src/version.ts b/ecosystem/typescript/sdk/src/version.ts index ab7311f69fa54..db143ad4badcd 100644 --- a/ecosystem/typescript/sdk/src/version.ts +++ b/ecosystem/typescript/sdk/src/version.ts @@ -1,2 +1,2 @@ // hardcoded for now, we would want to have it injected dynamically -export const VERSION = "1.9.1"; +export const VERSION = "1.10.0"; diff --git a/execution/block-partitioner/Cargo.toml b/execution/block-partitioner/Cargo.toml new file mode 100644 index 0000000000000..9a800231d3f8c --- /dev/null +++ b/execution/block-partitioner/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "aptos-block-partitioner" +description = "A tool to partition a block store into smaller chunks based on graph partitioning." + +version = "0.1.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +aptos-crypto = { workspace = true } +aptos-logger = { workspace = true } +aptos-metrics-core = { workspace = true } +aptos-types = { workspace = true } +bcs = { workspace = true } +clap = { workspace = true } +dashmap = { workspace = true } +itertools = { workspace = true } +move-core-types = { workspace = true } +rand = { workspace = true } +rayon = { workspace = true } + +[features] +default = [] diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/block_partitioner.rs b/execution/block-partitioner/src/lib.rs similarity index 94% rename from aptos-move/aptos-vm/src/sharded_block_executor/block_partitioner.rs rename to execution/block-partitioner/src/lib.rs index b05dd053ac463..000f232ca6686 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/block_partitioner.rs +++ b/execution/block-partitioner/src/lib.rs @@ -1,6 +1,10 @@ // Copyright © Aptos Foundation // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 + +pub mod sharded_block_partitioner; +pub mod test_utils; + use aptos_types::transaction::Transaction; pub trait BlockPartitioner: Send + Sync { diff --git a/execution/block-partitioner/src/main.rs b/execution/block-partitioner/src/main.rs new file mode 100644 index 0000000000000..5dade2bc352af --- /dev/null +++ b/execution/block-partitioner/src/main.rs @@ -0,0 +1,60 @@ +// Copyright © Aptos Foundation + +use aptos_block_partitioner::{ + sharded_block_partitioner::ShardedBlockPartitioner, + test_utils::{create_signed_p2p_transaction, generate_test_account, TestAccount}, +}; +use aptos_types::transaction::analyzed_transaction::AnalyzedTransaction; +use clap::Parser; +use rand::rngs::OsRng; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use std::{sync::Mutex, time::Instant}; + +#[derive(Debug, Parser)] +struct Args { + #[clap(long, default_value = "2000000")] + pub num_accounts: usize, + + #[clap(long, default_value = "100000")] + pub block_size: usize, + + #[clap(long, default_value = "10")] + pub num_blocks: usize, + + #[clap(long, default_value = "12")] + pub num_shards: usize, +} + +fn main() { + println!("Starting the block partitioning benchmark"); + let args = Args::parse(); + let num_accounts = args.num_accounts; + println!("Creating {} accounts", num_accounts); + let accounts: Vec> = (0..num_accounts) + .into_par_iter() + .map(|_i| Mutex::new(generate_test_account())) + .collect(); + println!("Created {} accounts", num_accounts); + println!("Creating {} transactions", args.block_size); + let transactions: Vec = (0..args.block_size) + .map(|_| { + // randomly select a sender and receiver from accounts + let mut rng = OsRng; + + let indices = rand::seq::index::sample(&mut rng, num_accounts, 2); + let receiver = accounts[indices.index(1)].lock().unwrap(); + let mut sender = accounts[indices.index(0)].lock().unwrap(); + create_signed_p2p_transaction(&mut sender, vec![&receiver]).remove(0) + }) + .collect(); + + let partitioner = ShardedBlockPartitioner::new(args.num_shards); + for _ in 0..args.num_blocks { + let transactions = transactions.clone(); + println!("Starting to partition"); + let now = Instant::now(); + partitioner.partition(transactions, 1); + let elapsed = now.elapsed(); + println!("Time taken to partition: {:?}", elapsed); + } +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/conflict_detector.rs b/execution/block-partitioner/src/sharded_block_partitioner/conflict_detector.rs new file mode 100644 index 0000000000000..e760b77d4fe85 --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/conflict_detector.rs @@ -0,0 +1,208 @@ +// Copyright © Aptos Foundation + +use crate::sharded_block_partitioner::dependency_analysis::{RWSet, WriteSetWithTxnIndex}; +use aptos_types::{ + block_executor::partitioner::{ + CrossShardDependencies, ShardId, SubBlock, TransactionWithDependencies, TxnIdxWithShardId, + TxnIndex, + }, + transaction::{ + analyzed_transaction::{AnalyzedTransaction, StorageLocation}, + Transaction, + }, +}; +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + sync::Arc, +}; + +pub struct CrossShardConflictDetector { + shard_id: ShardId, + num_shards: usize, +} + +impl CrossShardConflictDetector { + pub fn new(shard_id: ShardId, num_shards: usize) -> Self { + Self { + shard_id, + num_shards, + } + } + + pub fn discard_txns_with_cross_shard_deps( + &mut self, + txns: Vec, + cross_shard_rw_set: &[RWSet], + prev_rounds_rw_set_with_index: Arc>, + ) -> ( + Vec, + Vec, + Vec, + ) { + // Iterate through all the transactions and if any shard has taken read/write lock on the storage location + // and has a higher priority than this shard id, then this transaction needs to be moved to the end of the block. + let mut accepted_txns = Vec::new(); + let mut accepted_txn_dependencies = Vec::new(); + let mut rejected_txns = Vec::new(); + for (_, txn) in txns.into_iter().enumerate() { + if self.check_for_cross_shard_conflict(self.shard_id, &txn, cross_shard_rw_set) { + rejected_txns.push(txn); + } else { + accepted_txn_dependencies.push(self.get_deps_for_frozen_txn( + &txn, + Arc::new(vec![]), + prev_rounds_rw_set_with_index.clone(), + )); + accepted_txns.push(txn); + } + } + (accepted_txns, accepted_txn_dependencies, rejected_txns) + } + + /// Adds a cross shard dependency for a transaction. This can be done by finding the maximum transaction index + /// that has taken a read/write lock on the storage location the current transaction is trying to read/write. + /// We traverse the current round read/write set in reverse order starting from shard id -1 and look for the first + /// txn index that has taken a read/write lock on the storage location. If we can't find any such txn index, we + /// traverse the previous rounds read/write set in reverse order and look for the first txn index that has taken + /// a read/write lock on the storage location. + fn get_deps_for_frozen_txn( + &self, + frozen_txn: &AnalyzedTransaction, + current_round_rw_set_with_index: Arc>, + prev_rounds_rw_set_with_index: Arc>, + ) -> CrossShardDependencies { + if current_round_rw_set_with_index.is_empty() && prev_rounds_rw_set_with_index.is_empty() { + return CrossShardDependencies::default(); + } + // Iterate through the frozen dependencies and add the max transaction index for each storage location + let mut cross_shard_dependencies = CrossShardDependencies::default(); + for storage_location in frozen_txn + .read_hints() + .iter() + .chain(frozen_txn.write_hints().iter()) + { + // For current round, iterate through all shards less than current shards in the reverse order and for previous rounds iterate through all shards in the reverse order + // and find the first shard id that has taken a write lock on the storage location. This ensures that we find the highest txn index that is conflicting + // with the current transaction. Please note that since we use a multi-version database, there is no conflict if any previous txn index has taken + // a read lock on the storage location. + let mut current_shard_id = (self.shard_id + self.num_shards - 1) % self.num_shards; // current shard id - 1 in a wrapping fashion + for rw_set_with_index in current_round_rw_set_with_index + .iter() + .take(self.shard_id) + .rev() + .chain(prev_rounds_rw_set_with_index.iter().rev()) + { + if rw_set_with_index.has_write_lock(storage_location) { + cross_shard_dependencies.add_required_edge( + TxnIdxWithShardId::new( + rw_set_with_index.get_write_lock_txn_index(storage_location), + current_shard_id, + ), + storage_location.clone(), + ); + break; + } + // perform a wrapping substraction + current_shard_id = (current_shard_id + self.num_shards - 1) % self.num_shards; + } + } + + cross_shard_dependencies + } + + pub fn add_deps_for_frozen_sub_block( + &self, + txns: Vec, + current_round_rw_set_with_index: Arc>, + prev_round_rw_set_with_index: Arc>, + index_offset: TxnIndex, + ) -> (SubBlock, Vec) { + let mut frozen_txns = Vec::new(); + let mut cross_shard_dependencies = Vec::new(); + for txn in txns.into_iter() { + let dependency = self.get_deps_for_frozen_txn( + &txn, + current_round_rw_set_with_index.clone(), + prev_round_rw_set_with_index.clone(), + ); + cross_shard_dependencies.push(dependency.clone()); + frozen_txns.push(TransactionWithDependencies::new(txn.into_txn(), dependency)); + } + ( + SubBlock::new(index_offset, frozen_txns), + cross_shard_dependencies, + ) + } + + fn check_for_cross_shard_conflict( + &self, + current_shard_id: ShardId, + txn: &AnalyzedTransaction, + cross_shard_rw_set: &[RWSet], + ) -> bool { + if self.check_for_read_conflict(current_shard_id, txn, cross_shard_rw_set) { + return true; + } + if self.check_for_write_conflict(current_shard_id, txn, cross_shard_rw_set) { + return true; + } + false + } + + fn get_anchor_shard_id(&self, storage_location: &StorageLocation) -> ShardId { + let mut hasher = DefaultHasher::new(); + storage_location.hash(&mut hasher); + (hasher.finish() % self.num_shards as u64) as usize + } + + fn check_for_read_conflict( + &self, + current_shard_id: ShardId, + txn: &AnalyzedTransaction, + cross_shard_rw_set: &[RWSet], + ) -> bool { + for read_location in txn.read_hints().iter() { + // Each storage location is allocated an anchor shard id, which is used to conflict resolution deterministically across shards. + // During conflict resolution, shards starts scanning from the anchor shard id and + // first shard id that has taken a read/write lock on this storage location is the owner of this storage location. + // Please note another alternative is scan from first shard id, but this will result in non-uniform load across shards in case of conflicts. + let anchor_shard_id = self.get_anchor_shard_id(read_location); + for offset in 0..self.num_shards { + let shard_id = (anchor_shard_id + offset) % self.num_shards; + // Ignore if this is from the same shard + if shard_id == current_shard_id { + // We only need to check if any shard id < current shard id has taken a write lock on the storage location + break; + } + if cross_shard_rw_set[shard_id].has_write_lock(read_location) { + return true; + } + } + } + false + } + + fn check_for_write_conflict( + &self, + current_shard_id: usize, + txn: &AnalyzedTransaction, + cross_shard_rw_set: &[RWSet], + ) -> bool { + for write_location in txn.write_hints().iter() { + let anchor_shard_id = self.get_anchor_shard_id(write_location); + for offset in 0..self.num_shards { + let shard_id = (anchor_shard_id + offset) % self.num_shards; + // Ignore if this is from the same shard + if shard_id == current_shard_id { + // We only need to check if any shard id < current shard id has taken a write lock on the storage location + break; + } + if cross_shard_rw_set[shard_id].has_read_or_write_lock(write_location) { + return true; + } + } + } + false + } +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/cross_shard_messages.rs b/execution/block-partitioner/src/sharded_block_partitioner/cross_shard_messages.rs new file mode 100644 index 0000000000000..b7234522069f8 --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/cross_shard_messages.rs @@ -0,0 +1,196 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::sharded_block_partitioner::{ + cross_shard_messages::CrossShardMsg::CrossShardDependentEdgesMsg, + dependency_analysis::{RWSet, WriteSetWithTxnIndex}, +}; +use aptos_types::block_executor::partitioner::{CrossShardEdges, ShardId, TxnIndex}; +use std::sync::mpsc::{Receiver, Sender}; + +#[derive(Clone, Debug)] +pub enum CrossShardMsg { + WriteSetWithTxnIndexMsg(WriteSetWithTxnIndex), + RWSetMsg(RWSet), + // Number of accepted transactions in the shard for the current round. + AcceptedTxnsMsg(usize), + CrossShardDependentEdgesMsg(Vec), +} + +#[derive(Clone, Debug, Default)] +pub struct CrossShardDependentEdges { + pub source_txn_index: TxnIndex, + pub dependent_edges: CrossShardEdges, +} + +impl CrossShardDependentEdges { + pub fn new(source_txn_index: TxnIndex, dependent_edges: CrossShardEdges) -> Self { + Self { + source_txn_index, + dependent_edges, + } + } +} + +// Define the interface for CrossShardClient +pub trait CrossShardClientInterface { + fn broadcast_and_collect_rw_set(&self, rw_set: RWSet) -> Vec; + fn broadcast_and_collect_write_set_with_index( + &self, + rw_set_with_index: WriteSetWithTxnIndex, + ) -> Vec; + fn broadcast_and_collect_num_accepted_txns(&self, num_accepted_txns: usize) -> Vec; + fn broadcast_and_collect_dependent_edges( + &self, + dependent_edges: Vec>, + ) -> Vec>; +} + +pub struct CrossShardClient { + shard_id: ShardId, + message_rxs: Vec>, + message_txs: Vec>, +} + +impl CrossShardClient { + pub fn new( + shard_id: ShardId, + message_rxs: Vec>, + message_txs: Vec>, + ) -> Self { + Self { + shard_id, + message_rxs, + message_txs, + } + } + + fn broadcast_and_collect(&self, f: F, g: G) -> Vec + where + F: Fn() -> CrossShardMsg, + G: Fn(CrossShardMsg) -> Option, + T: Default + Clone, + { + let num_shards = self.message_txs.len(); + let mut vec = vec![T::default(); num_shards]; + + for i in 0..num_shards { + if i != self.shard_id { + self.message_txs[i].send(f()).unwrap(); + } + } + + for (i, msg_rx) in self.message_rxs.iter().enumerate() { + if i == self.shard_id { + continue; + } + let msg = msg_rx.recv().unwrap(); + vec[i] = g(msg).expect("Unexpected message"); + } + vec + } +} + +impl CrossShardClientInterface for CrossShardClient { + fn broadcast_and_collect_rw_set(&self, rw_set: RWSet) -> Vec { + self.broadcast_and_collect( + || CrossShardMsg::RWSetMsg(rw_set.clone()), + |msg| match msg { + CrossShardMsg::RWSetMsg(rw_set) => Some(rw_set), + _ => None, + }, + ) + } + + fn broadcast_and_collect_write_set_with_index( + &self, + rw_set_with_index: WriteSetWithTxnIndex, + ) -> Vec { + self.broadcast_and_collect( + || CrossShardMsg::WriteSetWithTxnIndexMsg(rw_set_with_index.clone()), + |msg| match msg { + CrossShardMsg::WriteSetWithTxnIndexMsg(rw_set_with_index) => { + Some(rw_set_with_index) + }, + _ => None, + }, + ) + } + + fn broadcast_and_collect_num_accepted_txns(&self, num_accepted_txns: usize) -> Vec { + self.broadcast_and_collect( + || CrossShardMsg::AcceptedTxnsMsg(num_accepted_txns), + |msg| match msg { + CrossShardMsg::AcceptedTxnsMsg(num_accepted_txns) => Some(num_accepted_txns), + _ => None, + }, + ) + } + + fn broadcast_and_collect_dependent_edges( + &self, + dependent_edges: Vec>, + ) -> Vec> { + let num_shards = self.message_txs.len(); + + for (shard_id, dependent_edges) in dependent_edges.into_iter().enumerate() { + if shard_id != self.shard_id { + self.message_txs[shard_id] + .send(CrossShardDependentEdgesMsg(dependent_edges)) + .unwrap(); + } + } + + let mut cross_shard_dependent_edges = vec![vec![]; num_shards]; + + for (i, msg_rx) in self.message_rxs.iter().enumerate() { + if i == self.shard_id { + continue; + } + let msg = msg_rx.recv().unwrap(); + match msg { + CrossShardDependentEdgesMsg(dependent_edges) => { + cross_shard_dependent_edges[i] = dependent_edges; + }, + _ => panic!("Unexpected message"), + } + } + + cross_shard_dependent_edges + } +} + +// Create a mock implementation of CrossShardClientInterface for testing +#[cfg(test)] +pub struct MockCrossShardClient { + pub rw_set_results: Vec, + pub write_set_with_index_results: Vec, + pub num_accepted_txns_results: Vec, + pub dependent_edges_results: Vec>, +} + +// Mock CrossShardClient used for testing purposes +#[cfg(test)] +impl CrossShardClientInterface for MockCrossShardClient { + fn broadcast_and_collect_rw_set(&self, _rw_set: RWSet) -> Vec { + self.rw_set_results.clone() + } + + fn broadcast_and_collect_write_set_with_index( + &self, + _rw_set_with_index: WriteSetWithTxnIndex, + ) -> Vec { + self.write_set_with_index_results.clone() + } + + fn broadcast_and_collect_num_accepted_txns(&self, _num_accepted_txns: usize) -> Vec { + self.num_accepted_txns_results.clone() + } + + fn broadcast_and_collect_dependent_edges( + &self, + _dependent_edges: Vec>, + ) -> Vec> { + self.dependent_edges_results.clone() + } +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/dependency_analysis.rs b/execution/block-partitioner/src/sharded_block_partitioner/dependency_analysis.rs new file mode 100644 index 0000000000000..2fea666162fc3 --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/dependency_analysis.rs @@ -0,0 +1,90 @@ +// Copyright © Aptos Foundation + +use aptos_types::{ + block_executor::partitioner::TxnIndex, + transaction::analyzed_transaction::{AnalyzedTransaction, StorageLocation}, +}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +#[derive(Default, Clone, Debug)] +pub struct RWSet { + // Represents a set of storage locations that are read by the transactions in this shard. + read_set: Arc>, + // Represents a set of storage locations that are written by the transactions in this shard. + write_set: Arc>, +} + +impl RWSet { + pub fn new(txns: &[AnalyzedTransaction]) -> Self { + let mut read_set = HashSet::new(); + let mut write_set = HashSet::new(); + for analyzed_txn in txns { + for write_location in analyzed_txn.write_hints().iter() { + write_set.insert(write_location.clone()); + } + for read_location in analyzed_txn.read_hints().iter() { + read_set.insert(read_location.clone()); + } + } + + Self { + read_set: Arc::new(read_set), + write_set: Arc::new(write_set), + } + } + + pub fn has_write_lock(&self, location: &StorageLocation) -> bool { + self.write_set.contains(location) + } + + pub fn has_read_lock(&self, location: &StorageLocation) -> bool { + self.read_set.contains(location) + } + + pub fn has_read_or_write_lock(&self, location: &StorageLocation) -> bool { + self.has_read_lock(location) || self.has_write_lock(location) + } +} + +#[derive(Default, Clone, Debug)] +/// Contains a list of storage location along with the maximum transaction index in this shard +/// that has taken a read/write lock on this storage location. For example, if the chunk contains 3 +/// transactions with read/write set as follows: +/// Txn 0: Write set: [A, B, C] +/// Txn 1: Write set: [A, B] +/// Txn 2: Write set: [A] +/// Then the WriteSetWithTxnIndex will be: +/// Write set: {A: 2, B: 1, C: 0} +/// Please note that the index is the global index which includes the offset of the shard. +pub struct WriteSetWithTxnIndex { + write_set: Arc>, +} + +impl WriteSetWithTxnIndex { + // Creates a new dependency analysis object from a list of transactions. In this case, since the + // transactions are frozen, we can set the maximum transaction index to the index of the last + // transaction in the list. + pub fn new(txns: &[AnalyzedTransaction], txn_index_offset: TxnIndex) -> Self { + let mut write_set = HashMap::new(); + for (index, txn) in txns.iter().enumerate() { + for write_location in txn.write_hints().iter() { + write_set.insert(write_location.clone(), txn_index_offset + index); + } + } + + Self { + write_set: Arc::new(write_set), + } + } + + pub fn has_write_lock(&self, location: &StorageLocation) -> bool { + self.write_set.contains_key(location) + } + + pub fn get_write_lock_txn_index(&self, location: &StorageLocation) -> TxnIndex { + *self.write_set.get(location).unwrap() + } +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/dependent_edges.rs b/execution/block-partitioner/src/sharded_block_partitioner/dependent_edges.rs new file mode 100644 index 0000000000000..01f283cdce4d1 --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/dependent_edges.rs @@ -0,0 +1,296 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::sharded_block_partitioner::cross_shard_messages::{ + CrossShardClientInterface, CrossShardDependentEdges, +}; +use aptos_types::{ + block_executor::partitioner::{ + CrossShardDependencies, CrossShardEdges, ShardId, SubBlocksForShard, TxnIdxWithShardId, + TxnIndex, + }, + transaction::Transaction, +}; +use itertools::Itertools; +use std::{collections::HashMap, sync::Arc}; + +pub struct DependentEdgeCreator { + shard_id: ShardId, + cross_shard_client: Arc, + froze_sub_blocks: SubBlocksForShard, + num_shards: usize, +} + +/// Creates a list of dependent edges for each sub block in the current round. It works in following steps +/// 1. For the current block, it creates a dependent edge list by txn index based on newly required edges in cross shard +/// dependencies. Dependent edge is a reverse of required edge, for example if txn 20 in shard 2 requires txn 10 in shard 1, +/// then txn 10 in shard 1 will have a dependent edge to txn 20 in shard 2. +/// 2. It sends the dependent edge list to all shards and collects the dependent edge list from all shards. +/// 3. It groups the dependent edge list by source txn index. +/// 4. It adds the dependent edge list to the sub blocks in the current round. +/// +impl DependentEdgeCreator { + pub fn new( + shard_id: ShardId, + cross_shard_client: Arc, + froze_sub_blocks: SubBlocksForShard, + num_shards: usize, + ) -> Self { + Self { + shard_id, + cross_shard_client, + froze_sub_blocks, + num_shards, + } + } + + pub fn create_dependent_edges( + &mut self, + curr_cross_shard_deps: &[CrossShardDependencies], + index_offset: usize, + ) { + if self.froze_sub_blocks.is_empty() { + // early return in case this is the first round (no previous sub blocks, so no back edges) + return; + } + // List of dependent edges for each shard and by source txn index + let mut dependent_edges: Vec> = + vec![HashMap::new(); self.num_shards]; + for (index, cross_shard_deps) in curr_cross_shard_deps.iter().enumerate() { + let dependent_index = index + index_offset; + self.insert_dependent_edges_for_txn( + dependent_index, + cross_shard_deps, + &mut dependent_edges, + ); + } + let dep_edges_vec = self.send_and_collect_dependent_edges(dependent_edges); + let dep_edges = self.group_dependent_edges_by_source_idx(dep_edges_vec); + self.add_dependent_edges_to_sub_blocks(dep_edges); + } + + fn insert_dependent_edges_for_txn( + &mut self, + dependent_index: TxnIndex, + cross_shard_deps: &CrossShardDependencies, + back_edges: &mut [HashMap], + ) { + for (index_with_shard, storage_locations) in cross_shard_deps.required_edges_iter() { + let back_edges_for_shard = back_edges.get_mut(index_with_shard.shard_id).unwrap(); + let back_edges = back_edges_for_shard + .entry(index_with_shard.txn_index) + .or_insert_with(CrossShardEdges::default); + back_edges.add_edge( + TxnIdxWithShardId::new(dependent_index, self.shard_id), + storage_locations.clone(), + ); + } + } + + fn send_and_collect_dependent_edges( + &self, + dependent_edges: Vec>, + ) -> Vec> { + let mut back_edges_vec = Vec::new(); + for (_, back_edges_for_shard) in dependent_edges.into_iter().enumerate() { + let mut back_edges = Vec::new(); + for (source_index, dependent_indices) in back_edges_for_shard { + back_edges.push(CrossShardDependentEdges::new( + source_index, + dependent_indices, + )); + } + back_edges_vec.push(back_edges); + } + self.cross_shard_client + .broadcast_and_collect_dependent_edges(back_edges_vec) + } + + fn group_dependent_edges_by_source_idx( + &self, + dependent_edges_vec: Vec>, + ) -> Vec<(TxnIndex, CrossShardEdges)> { + // combine the back edges from different shards by source txn index + let mut dependent_edges_by_source_index = HashMap::new(); + for (_, dependent_edges) in dependent_edges_vec.into_iter().enumerate() { + for dependent_edge in dependent_edges { + let source_index = dependent_edge.source_txn_index; + let dep_edges_for_idx = dependent_edges_by_source_index + .entry(source_index) + .or_insert_with(CrossShardEdges::default); + for (dependent_idx, storage_locations) in dependent_edge.dependent_edges.into_iter() + { + dep_edges_for_idx.add_edge(dependent_idx, storage_locations); + } + } + } + // sort the back edges by source txn index and return a vector + let mut dep_edges_vec = dependent_edges_by_source_index.into_iter().collect_vec(); + dep_edges_vec.sort_by_key(|(source_index, _)| *source_index); + dep_edges_vec + } + + fn add_dependent_edges_to_sub_blocks( + &mut self, + dependent_edges: Vec<(TxnIndex, CrossShardEdges)>, + ) { + let mut current_sub_block_index = 0; + let mut current_sub_block = self.froze_sub_blocks.get_sub_block_mut(0).unwrap(); + // Since the dependent edges are sorted by source txn index, we can iterate through the sub blocks and add the back edges to the sub blocks + for (source_index, dependent_edges) in dependent_edges.into_iter() { + while source_index >= current_sub_block.end_index() { + current_sub_block_index += 1; + current_sub_block = self + .froze_sub_blocks + .get_sub_block_mut(current_sub_block_index) + .unwrap(); + } + + for (dependent_idx, storage_locations) in dependent_edges.into_iter() { + current_sub_block.add_dependent_edge( + source_index, + dependent_idx, + storage_locations, + ); + } + } + } + + pub fn into_frozen_sub_blocks(self) -> SubBlocksForShard { + self.froze_sub_blocks + } +} + +#[cfg(test)] +mod tests { + use crate::{ + sharded_block_partitioner::{ + cross_shard_messages::{CrossShardDependentEdges, MockCrossShardClient}, + dependent_edges::DependentEdgeCreator, + }, + test_utils::create_non_conflicting_p2p_transaction, + }; + use aptos_types::{ + block_executor::partitioner::{ + CrossShardDependencies, CrossShardEdges, SubBlock, SubBlocksForShard, + TransactionWithDependencies, TxnIdxWithShardId, + }, + transaction::analyzed_transaction::StorageLocation, + }; + use itertools::Itertools; + use std::sync::Arc; + + #[test] + fn test_create_dependent_edges() { + let shard_id = 0; + let start_index = 0; + let num_shards = 3; + + let mut transactions_with_deps = Vec::new(); + for _ in 0..10 { + transactions_with_deps.push(TransactionWithDependencies::new( + create_non_conflicting_p2p_transaction(), + CrossShardDependencies::default(), + )); + } + + // cross shard dependent edges from shard 1 + let mut dependent_edges_from_shard_1 = vec![]; + let txn_4_storgae_location: Vec = + transactions_with_deps[4].txn.write_hints().to_vec(); + let txn_5_storgae_location: Vec = + transactions_with_deps[5].txn.write_hints().to_vec(); + // Txn 11 is dependent on Txn 4 + dependent_edges_from_shard_1.push(CrossShardDependentEdges::new( + 4, + CrossShardEdges::new( + TxnIdxWithShardId::new(11, 1), + txn_4_storgae_location.clone(), + ), + )); + // Txn 12 is dependent on Txn 5 + dependent_edges_from_shard_1.push(CrossShardDependentEdges::new( + 5, + CrossShardEdges::new( + TxnIdxWithShardId::new(12, 1), + txn_5_storgae_location.clone(), + ), + )); + + // cross shard dependent edges from shard 2 + let dependent_edges_shard_2 = vec![ + // Txn 21 is dependent on Txn 4 + CrossShardDependentEdges::new( + 4, + CrossShardEdges::new( + TxnIdxWithShardId::new(21, 2), + txn_4_storgae_location.clone(), + ), + ), + // Txn 22 is dependent on Txn 5 + CrossShardDependentEdges::new( + 5, + CrossShardEdges::new( + TxnIdxWithShardId::new(22, 2), + txn_5_storgae_location.clone(), + ), + ), + ]; + + let cross_shard_client = Arc::new(MockCrossShardClient { + rw_set_results: vec![], + write_set_with_index_results: vec![], + num_accepted_txns_results: vec![], + dependent_edges_results: vec![dependent_edges_from_shard_1, dependent_edges_shard_2], + }); + + let mut sub_blocks = SubBlocksForShard::empty(shard_id); + let sub_block = SubBlock::new( + start_index, + transactions_with_deps + .iter() + .map(|txn_with_deps| { + TransactionWithDependencies::new( + txn_with_deps.txn.transaction().clone(), + txn_with_deps.cross_shard_dependencies.clone(), + ) + }) + .collect_vec(), + ); + sub_blocks.add_sub_block(sub_block); + + let mut dependent_edge_creator = + DependentEdgeCreator::new(shard_id, cross_shard_client, sub_blocks, num_shards); + + dependent_edge_creator.create_dependent_edges(&[], 0); + + let sub_blocks_with_dependent_edges = dependent_edge_creator.into_frozen_sub_blocks(); + assert_eq!(sub_blocks_with_dependent_edges.num_sub_blocks(), 1); + let sub_block = sub_blocks_with_dependent_edges.get_sub_block(0).unwrap(); + assert_eq!(sub_block.num_txns(), 10); + + let dependent_storage_locs = sub_block.transactions_with_deps()[4] + .cross_shard_dependencies + .get_dependent_edge_for(TxnIdxWithShardId::new(11, 1)) + .unwrap(); + assert_eq!(dependent_storage_locs, &txn_4_storgae_location); + + let dependent_storage_locs = sub_block.transactions_with_deps()[5] + .cross_shard_dependencies + .get_dependent_edge_for(TxnIdxWithShardId::new(12, 1)) + .unwrap(); + assert_eq!(dependent_storage_locs, &txn_5_storgae_location); + + let dependent_storage_locs = sub_block.transactions_with_deps()[4] + .cross_shard_dependencies + .get_dependent_edge_for(TxnIdxWithShardId::new(21, 2)) + .unwrap(); + assert_eq!(dependent_storage_locs, &txn_4_storgae_location); + + let dependent_storage_locs = sub_block.transactions_with_deps()[5] + .cross_shard_dependencies + .get_dependent_edge_for(TxnIdxWithShardId::new(22, 2)) + .unwrap(); + assert_eq!(dependent_storage_locs, &txn_5_storgae_location); + } +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/messages.rs b/execution/block-partitioner/src/sharded_block_partitioner/messages.rs new file mode 100644 index 0000000000000..c4f0b60a0a982 --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/messages.rs @@ -0,0 +1,84 @@ +// Copyright © Aptos Foundation + +use crate::sharded_block_partitioner::dependency_analysis::WriteSetWithTxnIndex; +use aptos_types::{ + block_executor::partitioner::{SubBlocksForShard, TxnIndex}, + transaction::{analyzed_transaction::AnalyzedTransaction, Transaction}, +}; +use std::sync::Arc; + +pub struct DiscardCrossShardDep { + pub transactions: Vec, + // The frozen dependencies in previous chunks. + pub prev_rounds_write_set_with_index: Arc>, + pub current_round_start_index: TxnIndex, + // This is the frozen sub block for the current shard and is passed because we want to modify + // it to add dependency back edges. + pub frozen_sub_blocks: SubBlocksForShard, +} + +impl DiscardCrossShardDep { + pub fn new( + transactions: Vec, + prev_rounds_write_set_with_index: Arc>, + current_round_start_index: TxnIndex, + frozen_sub_blocks: SubBlocksForShard, + ) -> Self { + Self { + transactions, + prev_rounds_write_set_with_index, + current_round_start_index, + frozen_sub_blocks, + } + } +} + +pub struct AddWithCrossShardDep { + pub transactions: Vec, + pub index_offset: TxnIndex, + // The frozen dependencies in previous chunks. + pub prev_rounds_write_set_with_index: Arc>, + pub frozen_sub_blocks: SubBlocksForShard, +} + +impl AddWithCrossShardDep { + pub fn new( + transactions: Vec, + index_offset: TxnIndex, + prev_rounds_write_set_with_index: Arc>, + frozen_sub_blocks: SubBlocksForShard, + ) -> Self { + Self { + transactions, + index_offset, + prev_rounds_write_set_with_index, + frozen_sub_blocks, + } + } +} + +pub struct PartitioningResp { + pub frozen_sub_blocks: SubBlocksForShard, + pub write_set_with_index: WriteSetWithTxnIndex, + pub discarded_txns: Vec, +} + +impl PartitioningResp { + pub fn new( + frozen_sub_blocks: SubBlocksForShard, + write_set_with_index: WriteSetWithTxnIndex, + discarded_txns: Vec, + ) -> Self { + Self { + frozen_sub_blocks, + write_set_with_index, + discarded_txns, + } + } +} + +pub enum ControlMsg { + DiscardCrossShardDepReq(DiscardCrossShardDep), + AddCrossShardDepReq(AddWithCrossShardDep), + Stop, +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/mod.rs b/execution/block-partitioner/src/sharded_block_partitioner/mod.rs new file mode 100644 index 0000000000000..fe54d2de4d3ab --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/mod.rs @@ -0,0 +1,770 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::sharded_block_partitioner::{ + cross_shard_messages::CrossShardMsg, + dependency_analysis::WriteSetWithTxnIndex, + messages::{ + AddWithCrossShardDep, ControlMsg, + ControlMsg::{AddCrossShardDepReq, DiscardCrossShardDepReq}, + DiscardCrossShardDep, PartitioningResp, + }, + partitioning_shard::PartitioningShard, +}; +use aptos_logger::{error, info}; +use aptos_types::{ + block_executor::partitioner::{ShardId, SubBlocksForShard, TxnIndex}, + transaction::{analyzed_transaction::AnalyzedTransaction, Transaction}, +}; +use itertools::Itertools; +use std::{ + collections::HashMap, + sync::{ + mpsc::{Receiver, Sender}, + Arc, + }, + thread, +}; + +mod conflict_detector; +mod cross_shard_messages; +mod dependency_analysis; +mod dependent_edges; +mod messages; +mod partitioning_shard; + +/// A sharded block partitioner that partitions a block into multiple transaction chunks. +/// On a high level, the partitioning process is as follows: +/// ```plaintext +/// 1. A block is partitioned into equally sized transaction chunks and sent to each shard. +/// +/// Block: +/// +/// T1 {write set: A, B} +/// T2 {write set: B, C} +/// T3 {write set: C, D} +/// T4 {write set: D, E} +/// T5 {write set: E, F} +/// T6 {write set: F, G} +/// T7 {write set: G, H} +/// T8 {write set: H, I} +/// T9 {write set: I, J} +/// +/// 2. Discard a bunch of transactions from the chunks and create new chunks so that +/// there is no cross-shard dependency between transactions in a chunk. +/// 2.1 Following information is passed to each shard: +/// - candidate transaction chunks to be partitioned +/// - previously frozen transaction chunks (if any) +/// - read-write set index mapping from previous iteration (if any) - this contains the maximum absolute index +/// of the transaction that read/wrote to a storage location indexed by the storage location. +/// 2.2 Each shard creates a read-write set for all transactions in the chunk and broadcasts it to all other shards. +/// Shard 0 Shard 1 Shard 2 +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// | Read-Write Set | | Read-Write Set | | Read-Write Set | +/// | | | | | | +/// | T1 {A, B} | | T4 {D, E} | | T7 {G, H} | +/// | T2 {B, C} | | T5 {E, F} | | T8 {H, I} | +/// | T3 {C, D} | | T6 {F, G} | | T9 {I, J} | +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// 2.3 Each shard collects read-write sets from all other shards and discards transactions that have cross-shard dependencies. +/// Shard 0 Shard 1 Shard 2 +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// | Discarded Txns | | Discarded Txns | | Discarded Txns | +/// | | | | | | +/// | - T3 (cross-shard dependency with T4) | | - T6 (cross-shard dependency with T7) | | No discard | +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// 2.4 Each shard broadcasts the number of transactions that it plans to put in the current chunk. +/// Shard 0 Shard 1 Shard 2 +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// | Chunk Count | | Chunk Count | | Chunk Count | +/// | | | | | | +/// | 2 | | 2 | | 3 | +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// 2.5 Each shard collects the number of transactions that all other shards plan to put in the current chunk and based +/// on that, it finalizes the absolute index offset of the current chunk. It uses this information to create a read-write set +/// index, which is a mapping of all the storage location to the maximum absolute index of the transaction that read/wrote to that location. +/// Shard 0 Shard 1 Shard 2 +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// | Index Offset | | Index Offset | | Index Offset | +/// | | | | | | +/// | 0 | | 2 | | 4 | +/// +----------------------------+ +-------------------------------+ +-------------------------------+ +/// 2.6 It also uses the read-write set index mapping passed in previous iteration to add cross-shard dependencies to the transactions. This is +/// done by looking up the read-write set index for each storage location that a transaction reads/writes to and adding a cross-shard dependency +/// 2.7 Returns two lists of transactions: one list of transactions that are discarded and another list of transactions that are kept. +/// 3. Use the discarded transactions to create new chunks and repeat the step 2 until N iterations. +/// 4. For remaining transaction chunks, add cross-shard dependencies to the transactions. This is done as follows: +/// 4.1 Create a read-write set with index mapping for all the transactions in the remaining chunks. +/// 4.2 Broadcast and collect read-write set with index mapping from all shards. +/// 4.3 Add cross-shard dependencies to the transactions in the remaining chunks by looking up the read-write set index +/// for each storage location that a transaction reads/writes to. The idea is to find the maximum transaction index +/// that reads/writes to the same location and add that as a dependency. This can be done as follows: First look up the read-write set index +/// mapping received from other shards in current iteration in descending order of shard id. If the read-write set index is not found, +/// look up the read-write set index mapping received from other shards in previous iteration(s) in descending order of shard id. +/// ``` +/// +pub struct ShardedBlockPartitioner { + num_shards: usize, + control_txs: Vec>, + result_rxs: Vec>, + shard_threads: Vec>, +} + +impl ShardedBlockPartitioner { + pub fn new(num_shards: usize) -> Self { + info!( + "Creating a new sharded block partitioner with {} shards", + num_shards + ); + assert!(num_shards > 0, "num_partitioning_shards must be > 0"); + // create channels for cross shard messages across all shards. This is a full mesh connection. + // Each shard has a vector of channels for sending messages to other shards and + // a vector of channels for receiving messages from other shards. + let mut messages_txs = vec![]; + let mut messages_rxs = vec![]; + for _ in 0..num_shards { + messages_txs.push(vec![]); + messages_rxs.push(vec![]); + for _ in 0..num_shards { + let (messages_tx, messages_rx) = std::sync::mpsc::channel(); + messages_txs.last_mut().unwrap().push(messages_tx); + messages_rxs.last_mut().unwrap().push(messages_rx); + } + } + let mut control_txs = vec![]; + let mut result_rxs = vec![]; + let mut shard_join_handles = vec![]; + for (i, message_rxs) in messages_rxs.into_iter().enumerate() { + let (control_tx, control_rx) = std::sync::mpsc::channel(); + let (result_tx, result_rx) = std::sync::mpsc::channel(); + control_txs.push(control_tx); + result_rxs.push(result_rx); + shard_join_handles.push(spawn_partitioning_shard( + i, + control_rx, + result_tx, + message_rxs, + messages_txs.iter().map(|txs| txs[i].clone()).collect(), + )); + } + Self { + num_shards, + control_txs, + result_rxs, + shard_threads: shard_join_handles, + } + } + + // reorders the transactions so that transactions from the same sender always go to the same shard. + // This places transactions from the same sender next to each other, which is not optimal for parallelism. + // TODO(skedia): Improve this logic to shuffle senders + fn partition_by_senders( + &self, + txns: Vec, + ) -> Vec> { + let approx_txns_per_shard = (txns.len() as f64 / self.num_shards as f64).ceil() as usize; + let mut sender_to_txns = HashMap::new(); + let mut sender_order = Vec::new(); // Track sender ordering + + for txn in txns { + let sender = txn.sender().unwrap(); + let entry = sender_to_txns.entry(sender).or_insert_with(Vec::new); + entry.push(txn); + if entry.len() == 1 { + sender_order.push(sender); // Add sender to the order vector + } + } + + let mut result = Vec::new(); + result.push(Vec::new()); + + for sender in sender_order { + let txns = sender_to_txns.remove(&sender).unwrap(); + let txns_in_shard = result.last().unwrap().len(); + + if txns_in_shard < approx_txns_per_shard { + result.last_mut().unwrap().extend(txns); + } else { + result.push(txns); + } + } + + // pad the rest of the shard with empty txns + for _ in result.len()..self.num_shards { + result.push(Vec::new()); + } + + result + } + + fn send_partition_msgs(&self, partition_msg: Vec) { + for (i, msg) in partition_msg.into_iter().enumerate() { + self.control_txs[i].send(msg).unwrap(); + } + } + + fn collect_partition_block_response( + &self, + ) -> ( + Vec>, + Vec, + Vec>, + ) { + let mut frozen_sub_blocks = Vec::new(); + let mut frozen_write_set_with_index = Vec::new(); + let mut rejected_txns_vec = Vec::new(); + for rx in &self.result_rxs { + let PartitioningResp { + frozen_sub_blocks: frozen_chunk, + write_set_with_index, + discarded_txns: rejected_txns, + } = rx.recv().unwrap(); + frozen_sub_blocks.push(frozen_chunk); + frozen_write_set_with_index.push(write_set_with_index); + rejected_txns_vec.push(rejected_txns); + } + ( + frozen_sub_blocks, + frozen_write_set_with_index, + rejected_txns_vec, + ) + } + + fn discard_txns_with_cross_shard_dependencies( + &self, + txns_to_partition: Vec>, + current_round_start_index: TxnIndex, + frozen_sub_blocks: Vec>, + frozen_write_set_with_index: Arc>, + ) -> ( + Vec>, + Vec, + Vec>, + ) { + let partition_block_msgs = txns_to_partition + .into_iter() + .zip_eq(frozen_sub_blocks.into_iter()) + .map(|(txns, sub_blocks)| { + DiscardCrossShardDepReq(DiscardCrossShardDep::new( + txns, + frozen_write_set_with_index.clone(), + current_round_start_index, + sub_blocks, + )) + }) + .collect(); + self.send_partition_msgs(partition_block_msgs); + self.collect_partition_block_response() + } + + fn add_cross_shard_dependencies( + &self, + index_offset: usize, + remaining_txns_vec: Vec>, + frozen_sub_blocks_by_shard: Vec>, + frozen_write_set_with_index: Arc>, + ) -> ( + Vec>, + Vec, + Vec>, + ) { + let mut index_offset = index_offset; + let partition_block_msgs = remaining_txns_vec + .into_iter() + .zip_eq(frozen_sub_blocks_by_shard.into_iter()) + .map(|(remaining_txns, frozen_sub_blocks)| { + let remaining_txns_len = remaining_txns.len(); + let partitioning_msg = AddCrossShardDepReq(AddWithCrossShardDep::new( + remaining_txns, + index_offset, + frozen_write_set_with_index.clone(), + frozen_sub_blocks, + )); + index_offset += remaining_txns_len; + partitioning_msg + }) + .collect::>(); + self.send_partition_msgs(partition_block_msgs); + self.collect_partition_block_response() + } + + /// We repeatedly partition chunks, discarding a bunch of transactions with cross-shard dependencies. The set of discarded + /// transactions are used as candidate chunks in the next round. This process is repeated until num_partitioning_rounds. + /// The remaining transactions are then added to the chunks with cross-shard dependencies. + pub fn partition( + &self, + transactions: Vec, + num_partitioning_round: usize, + ) -> Vec> { + let total_txns = transactions.len(); + if total_txns == 0 { + return vec![]; + } + + // First round, we filter all transactions with cross-shard dependencies + let mut txns_to_partition = self.partition_by_senders(transactions); + let mut frozen_write_set_with_index = Arc::new(Vec::new()); + let mut current_round_start_index = 0; + let mut frozen_sub_blocks: Vec> = vec![]; + for shard_id in 0..self.num_shards { + frozen_sub_blocks.push(SubBlocksForShard::empty(shard_id)) + } + + for _ in 0..num_partitioning_round { + let ( + updated_frozen_sub_blocks, + current_frozen_rw_set_with_index_vec, + discarded_txns_to_partition, + ) = self.discard_txns_with_cross_shard_dependencies( + txns_to_partition, + current_round_start_index, + frozen_sub_blocks, + frozen_write_set_with_index.clone(), + ); + // Current round start index is the sum of the number of transactions in the frozen sub-blocks + current_round_start_index = updated_frozen_sub_blocks + .iter() + .map(|sub_blocks| sub_blocks.num_txns()) + .sum::(); + let mut prev_frozen_write_set_with_index = + Arc::try_unwrap(frozen_write_set_with_index).unwrap(); + frozen_sub_blocks = updated_frozen_sub_blocks; + prev_frozen_write_set_with_index.extend(current_frozen_rw_set_with_index_vec); + frozen_write_set_with_index = Arc::new(prev_frozen_write_set_with_index); + txns_to_partition = discarded_txns_to_partition; + if txns_to_partition + .iter() + .map(|txns| txns.len()) + .sum::() + == 0 + { + return frozen_sub_blocks; + } + } + + // We just add cross shard dependencies for remaining transactions. + let (frozen_sub_blocks, _, rejected_txns) = self.add_cross_shard_dependencies( + current_round_start_index, + txns_to_partition, + frozen_sub_blocks, + frozen_write_set_with_index, + ); + + // Assert rejected transactions are empty + assert!(rejected_txns.iter().all(|txns| txns.is_empty())); + frozen_sub_blocks + } +} + +impl Drop for ShardedBlockPartitioner { + /// Best effort stops all the executor shards and waits for the thread to finish. + fn drop(&mut self) { + // send stop command to all executor shards + for control_tx in self.control_txs.iter() { + if let Err(e) = control_tx.send(ControlMsg::Stop) { + error!("Failed to send stop command to executor shard: {:?}", e); + } + } + + // wait for all executor shards to stop + for shard_thread in self.shard_threads.drain(..) { + shard_thread.join().unwrap_or_else(|e| { + error!("Failed to join executor shard thread: {:?}", e); + }); + } + } +} + +fn spawn_partitioning_shard( + shard_id: ShardId, + control_rx: Receiver, + result_tx: Sender, + message_rxs: Vec>, + messages_txs: Vec>, +) -> thread::JoinHandle<()> { + // create and start a new executor shard in a separate thread + thread::Builder::new() + .name(format!("partitioning-shard-{}", shard_id)) + .spawn(move || { + let partitioning_shard = + PartitioningShard::new(shard_id, control_rx, result_tx, message_rxs, messages_txs); + partitioning_shard.start(); + }) + .unwrap() +} + +#[cfg(test)] +mod tests { + use crate::{ + sharded_block_partitioner::ShardedBlockPartitioner, + test_utils::{ + create_non_conflicting_p2p_transaction, create_signed_p2p_transaction, + generate_test_account, generate_test_account_for_address, TestAccount, + }, + }; + use aptos_crypto::hash::CryptoHash; + use aptos_types::{ + block_executor::partitioner::{SubBlock, TxnIdxWithShardId}, + transaction::{analyzed_transaction::AnalyzedTransaction, Transaction}, + }; + use move_core_types::account_address::AccountAddress; + use rand::{rngs::OsRng, Rng}; + use std::collections::HashMap; + + fn verify_no_cross_shard_dependency(sub_blocks_for_shards: Vec>) { + for sub_blocks in sub_blocks_for_shards { + for txn in sub_blocks.iter() { + assert_eq!(txn.cross_shard_dependencies().num_required_edges(), 0); + } + } + } + + #[test] + // Test that the partitioner works correctly for a single sender and multiple receivers. + // In this case the expectation is that only the first shard will contain transactions and all + // other shards will be empty. + fn test_single_sender_txns() { + let mut sender = generate_test_account(); + let mut receivers = Vec::new(); + let num_txns = 10; + for _ in 0..num_txns { + receivers.push(generate_test_account()); + } + let transactions = create_signed_p2p_transaction( + &mut sender, + receivers.iter().collect::>(), + ); + let partitioner = ShardedBlockPartitioner::new(4); + let sub_blocks = partitioner.partition(transactions.clone(), 1); + assert_eq!(sub_blocks.len(), 4); + // The first shard should contain all the transactions + assert_eq!(sub_blocks[0].num_txns(), num_txns); + // The rest of the shards should be empty + for sub_blocks in sub_blocks.iter().take(4).skip(1) { + assert_eq!(sub_blocks.num_txns(), 0); + } + // Verify that the transactions are in the same order as the original transactions and cross shard + // dependencies are empty. + for (i, txn) in sub_blocks[0].iter().enumerate() { + assert_eq!(txn.txn(), transactions[i].transaction()); + assert_eq!(txn.cross_shard_dependencies().num_required_edges(), 0); + } + } + + #[test] + // Test that the partitioner works correctly for no conflict transactions. In this case, the + // expectation is that no transaction is reordered. + fn test_non_conflicting_txns() { + let num_txns = 4; + let num_shards = 2; + let mut transactions = Vec::new(); + for _ in 0..num_txns { + transactions.push(create_non_conflicting_p2p_transaction()) + } + let partitioner = ShardedBlockPartitioner::new(num_shards); + let partitioned_txns = partitioner.partition(transactions.clone(), 1); + assert_eq!(partitioned_txns.len(), num_shards); + // Verify that the transactions are in the same order as the original transactions and cross shard + // dependencies are empty. + let mut current_index = 0; + for sub_blocks_for_shard in partitioned_txns.into_iter() { + assert_eq!(sub_blocks_for_shard.num_txns(), num_txns / num_shards); + for txn in sub_blocks_for_shard.iter() { + assert_eq!(txn.txn(), transactions[current_index].transaction()); + assert_eq!(txn.cross_shard_dependencies().num_required_edges(), 0); + current_index += 1; + } + } + } + + #[test] + fn test_same_sender_in_one_shard() { + let num_shards = 3; + let mut sender = generate_test_account(); + let mut txns_from_sender = Vec::new(); + for _ in 0..5 { + txns_from_sender.push( + create_signed_p2p_transaction(&mut sender, vec![&generate_test_account()]) + .remove(0), + ); + } + let mut non_conflicting_transactions = Vec::new(); + for _ in 0..5 { + non_conflicting_transactions.push(create_non_conflicting_p2p_transaction()); + } + + let mut transactions = Vec::new(); + let mut txn_from_sender_index = 0; + let mut non_conflicting_txn_index = 0; + transactions.push(non_conflicting_transactions[non_conflicting_txn_index].clone()); + non_conflicting_txn_index += 1; + transactions.push(txns_from_sender[txn_from_sender_index].clone()); + txn_from_sender_index += 1; + transactions.push(txns_from_sender[txn_from_sender_index].clone()); + txn_from_sender_index += 1; + transactions.push(non_conflicting_transactions[non_conflicting_txn_index].clone()); + non_conflicting_txn_index += 1; + transactions.push(txns_from_sender[txn_from_sender_index].clone()); + txn_from_sender_index += 1; + transactions.push(txns_from_sender[txn_from_sender_index].clone()); + txn_from_sender_index += 1; + transactions.push(non_conflicting_transactions[non_conflicting_txn_index].clone()); + transactions.push(txns_from_sender[txn_from_sender_index].clone()); + + let partitioner = ShardedBlockPartitioner::new(num_shards); + let sub_blocks = partitioner.partition(transactions.clone(), 1); + assert_eq!(sub_blocks.len(), num_shards); + assert_eq!(sub_blocks[0].num_sub_blocks(), 1); + assert_eq!(sub_blocks[1].num_sub_blocks(), 1); + assert_eq!(sub_blocks[2].num_sub_blocks(), 1); + assert_eq!(sub_blocks[0].num_txns(), 6); + assert_eq!(sub_blocks[1].num_txns(), 2); + assert_eq!(sub_blocks[2].num_txns(), 0); + + // verify that all transactions from the sender end up in shard 0 + for (txn_from_sender, txn) in txns_from_sender.iter().zip(sub_blocks[0].iter().skip(1)) { + assert_eq!(txn.txn(), txn_from_sender.transaction()); + } + verify_no_cross_shard_dependency( + sub_blocks + .iter() + .flat_map(|sub_blocks| sub_blocks.sub_block_iter()) + .cloned() + .collect(), + ); + } + + #[test] + fn test_cross_shard_dependencies() { + let num_shards = 3; + let mut account1 = generate_test_account_for_address(AccountAddress::new([0; 32])); + let mut account2 = generate_test_account_for_address(AccountAddress::new([1; 32])); + let account3 = generate_test_account_for_address(AccountAddress::new([2; 32])); + let mut account4 = generate_test_account_for_address(AccountAddress::new([4; 32])); + let account5 = generate_test_account_for_address(AccountAddress::new([5; 32])); + let account6 = generate_test_account_for_address(AccountAddress::new([6; 32])); + let mut account7 = generate_test_account_for_address(AccountAddress::new([7; 32])); + let account8 = generate_test_account_for_address(AccountAddress::new([8; 32])); + let account9 = generate_test_account_for_address(AccountAddress::new([9; 32])); + + let txn0 = create_signed_p2p_transaction(&mut account1, vec![&account2]).remove(0); // txn 0 + let txn1 = create_signed_p2p_transaction(&mut account1, vec![&account3]).remove(0); // txn 1 + let txn2 = create_signed_p2p_transaction(&mut account2, vec![&account3]).remove(0); // txn 2 + // Should go in shard 1 + let txn3 = create_signed_p2p_transaction(&mut account4, vec![&account5]).remove(0); // txn 3 + let txn4 = create_signed_p2p_transaction(&mut account4, vec![&account6]).remove(0); // txn 4 + let txn5 = create_signed_p2p_transaction(&mut account4, vec![&account6]).remove(0); // txn 5 + // Should go in shard 2 + let txn6 = create_signed_p2p_transaction(&mut account7, vec![&account8]).remove(0); // txn 6 + let txn7 = create_signed_p2p_transaction(&mut account7, vec![&account9]).remove(0); // txn 7 + let txn8 = create_signed_p2p_transaction(&mut account4, vec![&account7]).remove(0); // txn 8 + + let transactions = vec![ + txn0.clone(), + txn1.clone(), + txn2.clone(), + txn3.clone(), + txn4.clone(), + txn5.clone(), + txn6.clone(), + txn7.clone(), + txn8.clone(), + ]; + + let partitioner = ShardedBlockPartitioner::new(num_shards); + let partitioned_sub_blocks = partitioner.partition(transactions, 1); + assert_eq!(partitioned_sub_blocks.len(), num_shards); + + // In first round of the partitioning, we should have txn0, txn1 and txn2 in shard 0 and + // txn3, txn4, txn5 and txn8 in shard 1 and 0 in shard 2. Please note that txn8 is moved to + // shard 1 because of sender based reordering. + assert_eq!( + partitioned_sub_blocks[0] + .get_sub_block(0) + .unwrap() + .num_txns(), + 3 + ); + assert_eq!( + partitioned_sub_blocks[1] + .get_sub_block(0) + .unwrap() + .num_txns(), + 4 + ); + assert_eq!( + partitioned_sub_blocks[2] + .get_sub_block(0) + .unwrap() + .num_txns(), + 0 + ); + + assert_eq!( + partitioned_sub_blocks[0] + .get_sub_block(0) + .unwrap() + .iter() + .map(|x| x.txn.clone()) + .collect::>(), + vec![txn0.into_txn(), txn1.into_txn(), txn2.into_txn()] + ); + assert_eq!( + partitioned_sub_blocks[1] + .get_sub_block(0) + .unwrap() + .iter() + .map(|x| x.txn.clone()) + .collect::>(), + vec![ + txn3.into_txn(), + txn4.into_txn(), + txn5.into_txn(), + txn8.into_txn() + ] + ); + // + // // Rest of the transactions will be added in round 2 along with their dependencies + assert_eq!( + partitioned_sub_blocks[0] + .get_sub_block(1) + .unwrap() + .num_txns(), + 0 + ); + assert_eq!( + partitioned_sub_blocks[1] + .get_sub_block(1) + .unwrap() + .num_txns(), + 0 + ); + assert_eq!( + partitioned_sub_blocks[2] + .get_sub_block(1) + .unwrap() + .num_txns(), + 2 + ); + + assert_eq!( + partitioned_sub_blocks[2] + .get_sub_block(1) + .unwrap() + .iter() + .map(|x| x.txn.clone()) + .collect::>(), + vec![txn6.into_txn(), txn7.into_txn()] + ); + + // Verify transaction dependencies + verify_no_cross_shard_dependency(vec![ + partitioned_sub_blocks[0].get_sub_block(0).unwrap().clone(), + partitioned_sub_blocks[1].get_sub_block(0).unwrap().clone(), + partitioned_sub_blocks[2].get_sub_block(0).unwrap().clone(), + ]); + // Verify transaction depends_on and dependency list + + // txn6 (index 7) and txn7 (index 8) depends on txn8 (index 6) + partitioned_sub_blocks[2] + .get_sub_block(1) + .unwrap() + .iter() + .for_each(|txn| { + let required_deps = txn + .cross_shard_dependencies + .get_required_edge_for(TxnIdxWithShardId::new(6, 1)) + .unwrap(); + // txn (6, 7) and 8 has conflict only on the coin store of account 7 as txn (6,7) are sending + // from account 7 and txn 8 is receiving in account 7 + assert_eq!(required_deps.len(), 1); + assert_eq!( + required_deps[0], + AnalyzedTransaction::coin_store_location(account7.account_address) + ); + }); + + // Verify the dependent edges, again the conflict is only on the coin store of account 7 + let required_deps = partitioned_sub_blocks[1] + .get_sub_block(0) + .unwrap() + .transactions[3] + .cross_shard_dependencies + .get_dependent_edge_for(TxnIdxWithShardId::new(7, 2)) + .unwrap(); + assert_eq!(required_deps.len(), 1); + assert_eq!( + required_deps[0], + AnalyzedTransaction::coin_store_location(account7.account_address) + ); + + let required_deps = partitioned_sub_blocks[1] + .get_sub_block(0) + .unwrap() + .transactions[3] + .cross_shard_dependencies + .get_dependent_edge_for(TxnIdxWithShardId::new(8, 2)) + .unwrap(); + assert_eq!(required_deps.len(), 1); + assert_eq!( + required_deps[0], + AnalyzedTransaction::coin_store_location(account7.account_address) + ); + } + + #[test] + // Generates a bunch of random transactions and ensures that after the partitioning, there is + // no conflict across shards. + fn test_no_conflict_across_shards_in_first_round() { + let mut rng = OsRng; + let max_accounts = 500; + let max_txns = 2000; + let max_num_shards = 64; + let num_accounts = rng.gen_range(1, max_accounts); + let mut accounts = Vec::new(); + for _ in 0..num_accounts { + accounts.push(generate_test_account()); + } + let num_txns = rng.gen_range(1, max_txns); + let mut transactions = Vec::new(); + let mut txns_by_hash = HashMap::new(); + let num_shards = rng.gen_range(1, max_num_shards); + + for _ in 0..num_txns { + // randomly select a sender and receiver from accounts + let sender_index = rng.gen_range(0, accounts.len()); + let mut sender = accounts.swap_remove(sender_index); + let receiver_index = rng.gen_range(0, accounts.len()); + let receiver = accounts.get(receiver_index).unwrap(); + let analyzed_txn = create_signed_p2p_transaction(&mut sender, vec![receiver]).remove(0); + txns_by_hash.insert(analyzed_txn.transaction().hash(), analyzed_txn.clone()); + transactions.push(analyzed_txn); + accounts.push(sender) + } + let partitioner = ShardedBlockPartitioner::new(num_shards); + let partitioned_txns = partitioner.partition(transactions, 1); + // Build a map of storage location to corresponding shards in first round + // and ensure that no storage location is present in more than one shard. + let mut storage_location_to_shard_map = HashMap::new(); + for (shard_id, txns) in partitioned_txns.iter().enumerate() { + let first_round_sub_block = txns.get_sub_block(0).unwrap(); + for txn in first_round_sub_block.iter() { + let analyzed_txn = txns_by_hash.get(&txn.txn.hash()).unwrap(); + let storage_locations = analyzed_txn + .read_hints() + .iter() + .chain(analyzed_txn.write_hints().iter()); + for storage_location in storage_locations { + if storage_location_to_shard_map.contains_key(storage_location) { + assert_eq!( + storage_location_to_shard_map.get(storage_location).unwrap(), + &shard_id + ); + } else { + storage_location_to_shard_map.insert(storage_location, shard_id); + } + } + } + } + } +} diff --git a/execution/block-partitioner/src/sharded_block_partitioner/partitioning_shard.rs b/execution/block-partitioner/src/sharded_block_partitioner/partitioning_shard.rs new file mode 100644 index 0000000000000..9827dc92de5bf --- /dev/null +++ b/execution/block-partitioner/src/sharded_block_partitioner/partitioning_shard.rs @@ -0,0 +1,175 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use crate::sharded_block_partitioner::{ + conflict_detector::CrossShardConflictDetector, + cross_shard_messages::{CrossShardClient, CrossShardClientInterface, CrossShardMsg}, + dependency_analysis::{RWSet, WriteSetWithTxnIndex}, + dependent_edges::DependentEdgeCreator, + messages::{AddWithCrossShardDep, ControlMsg, DiscardCrossShardDep, PartitioningResp}, +}; +use aptos_logger::trace; +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use aptos_types::block_executor::partitioner::{ShardId, SubBlock, TransactionWithDependencies}; +use aptos_types::transaction::Transaction; +use std::sync::{ + mpsc::{Receiver, Sender}, + Arc, +}; + +pub struct PartitioningShard { + num_shards: usize, + shard_id: ShardId, + control_rx: Receiver, + result_tx: Sender, + cross_shard_client: Arc, +} + +impl PartitioningShard { + pub fn new( + shard_id: ShardId, + control_rx: Receiver, + result_tx: Sender, + message_rxs: Vec>, + message_txs: Vec>, + ) -> Self { + let num_shards = message_txs.len(); + let cross_shard_client = + Arc::new(CrossShardClient::new(shard_id, message_rxs, message_txs)); + Self { + num_shards, + shard_id, + control_rx, + result_tx, + cross_shard_client, + } + } + + fn discard_txns_with_cross_shard_deps(&self, partition_msg: DiscardCrossShardDep) { + let DiscardCrossShardDep { + transactions, + prev_rounds_write_set_with_index, + current_round_start_index, + frozen_sub_blocks, + } = partition_msg; + let mut conflict_detector = CrossShardConflictDetector::new(self.shard_id, self.num_shards); + // If transaction filtering is allowed, we need to prepare the dependency analysis and broadcast it to other shards + // Based on the dependency analysis received from other shards, we will reject transactions that are conflicting with + // transactions in other shards + let read_write_set = RWSet::new(&transactions); + let cross_shard_rw_set = self + .cross_shard_client + .broadcast_and_collect_rw_set(read_write_set); + let (accepted_txns, accepted_cross_shard_dependencies, rejected_txns) = conflict_detector + .discard_txns_with_cross_shard_deps( + transactions, + &cross_shard_rw_set, + prev_rounds_write_set_with_index, + ); + + // Broadcast and collect the stats around number of accepted and rejected transactions from other shards + // this will be used to determine the absolute index of accepted transactions in this shard. + let accepted_txns_vec = self + .cross_shard_client + .broadcast_and_collect_num_accepted_txns(accepted_txns.len()); + // Calculate the absolute index of accepted transactions in this shard, which is the sum of all accepted transactions + // from other shards whose shard id is smaller than the current shard id and the current round start index + let num_accepted_txns = accepted_txns_vec.iter().take(self.shard_id).sum::(); + let index_offset = current_round_start_index + num_accepted_txns; + + // Now that we have finalized the global transaction index, we can add the dependent txn edges. + let mut dependent_edge_creator = DependentEdgeCreator::new( + self.shard_id, + self.cross_shard_client.clone(), + frozen_sub_blocks, + self.num_shards, + ); + dependent_edge_creator + .create_dependent_edges(&accepted_cross_shard_dependencies, index_offset); + + // Calculate the RWSetWithTxnIndex for the accepted transactions + let current_rw_set_with_index = WriteSetWithTxnIndex::new(&accepted_txns, index_offset); + + let accepted_txns_with_dependencies = accepted_txns + .into_iter() + .zip(accepted_cross_shard_dependencies.into_iter()) + .map(|(txn, dependencies)| { + TransactionWithDependencies::new(txn.into_txn(), dependencies) + }) + .collect::>>(); + + let mut frozen_sub_blocks = dependent_edge_creator.into_frozen_sub_blocks(); + let current_frozen_sub_block = SubBlock::new(index_offset, accepted_txns_with_dependencies); + frozen_sub_blocks.add_sub_block(current_frozen_sub_block); + // send the result back to the controller + self.result_tx + .send(PartitioningResp::new( + frozen_sub_blocks, + current_rw_set_with_index, + rejected_txns, + )) + .unwrap(); + } + + fn add_txns_with_cross_shard_deps(&self, partition_msg: AddWithCrossShardDep) { + let AddWithCrossShardDep { + transactions, + index_offset, + // The frozen dependencies in previous chunks. + prev_rounds_write_set_with_index, + mut frozen_sub_blocks, + } = partition_msg; + let conflict_detector = CrossShardConflictDetector::new(self.shard_id, self.num_shards); + + // Since txn filtering is not allowed, we can create the RW set with maximum txn + // index with the index offset passed. + let write_set_with_index_for_shard = WriteSetWithTxnIndex::new(&transactions, index_offset); + + let current_round_rw_set_with_index = self + .cross_shard_client + .broadcast_and_collect_write_set_with_index(write_set_with_index_for_shard.clone()); + let (current_frozen_sub_block, current_cross_shard_deps) = conflict_detector + .add_deps_for_frozen_sub_block( + transactions, + Arc::new(current_round_rw_set_with_index), + prev_rounds_write_set_with_index, + index_offset, + ); + + frozen_sub_blocks.add_sub_block(current_frozen_sub_block); + + let mut dependent_edge_creator = DependentEdgeCreator::new( + self.shard_id, + self.cross_shard_client.clone(), + frozen_sub_blocks, + self.num_shards, + ); + dependent_edge_creator.create_dependent_edges(¤t_cross_shard_deps, index_offset); + + self.result_tx + .send(PartitioningResp::new( + dependent_edge_creator.into_frozen_sub_blocks(), + write_set_with_index_for_shard, + vec![], + )) + .unwrap(); + } + + pub fn start(&self) { + loop { + let command = self.control_rx.recv().unwrap(); + match command { + ControlMsg::DiscardCrossShardDepReq(msg) => { + self.discard_txns_with_cross_shard_deps(msg); + }, + ControlMsg::AddCrossShardDepReq(msg) => { + self.add_txns_with_cross_shard_deps(msg); + }, + ControlMsg::Stop => { + break; + }, + } + } + trace!("Shard {} is shutting down", self.shard_id); + } +} diff --git a/execution/block-partitioner/src/test_utils.rs b/execution/block-partitioner/src/test_utils.rs new file mode 100644 index 0000000000000..b1766e7f73566 --- /dev/null +++ b/execution/block-partitioner/src/test_utils.rs @@ -0,0 +1,81 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_crypto::{ed25519::ed25519_keys::Ed25519PrivateKey, PrivateKey, SigningKey, Uniform}; +use aptos_types::{ + chain_id::ChainId, + transaction::{ + analyzed_transaction::AnalyzedTransaction, EntryFunction, RawTransaction, + SignedTransaction, Transaction, TransactionPayload, + }, + utility_coin::APTOS_COIN_TYPE, +}; +use move_core_types::{ + account_address::AccountAddress, identifier::Identifier, language_storage::ModuleId, +}; + +#[derive(Debug)] +pub struct TestAccount { + pub account_address: AccountAddress, + pub private_key: Ed25519PrivateKey, + pub sequence_number: u64, +} + +pub fn generate_test_account() -> TestAccount { + TestAccount { + account_address: AccountAddress::random(), + private_key: Ed25519PrivateKey::generate_for_testing(), + sequence_number: 0, + } +} + +pub fn generate_test_account_for_address(account_address: AccountAddress) -> TestAccount { + TestAccount { + account_address, + private_key: Ed25519PrivateKey::generate_for_testing(), + sequence_number: 0, + } +} + +pub fn create_non_conflicting_p2p_transaction() -> AnalyzedTransaction { + // create unique sender and receiver accounts so that there is no conflict + let mut sender = generate_test_account(); + let receiver = generate_test_account(); + create_signed_p2p_transaction(&mut sender, vec![&receiver]).remove(0) +} + +pub fn create_signed_p2p_transaction( + sender: &mut TestAccount, + receivers: Vec<&TestAccount>, +) -> Vec { + let mut transactions = Vec::new(); + for (_, receiver) in receivers.iter().enumerate() { + let transaction_payload = TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new(AccountAddress::ONE, Identifier::new("coin").unwrap()), + Identifier::new("transfer").unwrap(), + vec![APTOS_COIN_TYPE.clone()], + vec![ + bcs::to_bytes(&receiver.account_address).unwrap(), + bcs::to_bytes(&1u64).unwrap(), + ], + )); + + let raw_transaction = RawTransaction::new( + sender.account_address, + sender.sequence_number, + transaction_payload, + 0, + 0, + 0, + ChainId::new(10), + ); + sender.sequence_number += 1; + let txn = Transaction::UserTransaction(SignedTransaction::new( + raw_transaction.clone(), + sender.private_key.public_key().clone(), + sender.private_key.sign(&raw_transaction).unwrap(), + )); + transactions.push(txn.into()) + } + transactions +} diff --git a/execution/executor-benchmark/src/account_generator.rs b/execution/executor-benchmark/src/account_generator.rs index 426e32e468d0a..83367d254b0e0 100644 --- a/execution/executor-benchmark/src/account_generator.rs +++ b/execution/executor-benchmark/src/account_generator.rs @@ -62,7 +62,7 @@ impl AccountGenerator { pub struct AccountCache { generator: AccountGenerator, pub accounts: VecDeque, - rng: StdRng, + pub rng: StdRng, } impl AccountCache { @@ -113,7 +113,6 @@ impl AccountCache { .map(|i| self.accounts[i].address()) .collect(); let sender = &mut self.accounts[sender_idx]; - (sender, receivers) } } diff --git a/execution/executor-benchmark/src/db_generator.rs b/execution/executor-benchmark/src/db_generator.rs index 1faf1d8d1c8f3..1cd8c43a032af 100644 --- a/execution/executor-benchmark/src/db_generator.rs +++ b/execution/executor-benchmark/src/db_generator.rs @@ -26,7 +26,7 @@ pub fn create_db_with_accounts( db_dir: impl AsRef, storage_pruner_config: PrunerConfig, verify_sequence_numbers: bool, - use_state_kv_db: bool, + split_ledger_db: bool, use_sharded_state_merkle_db: bool, pipeline_config: PipelineConfig, ) where @@ -40,7 +40,7 @@ pub fn create_db_with_accounts( // create if not exists fs::create_dir_all(db_dir.as_ref()).unwrap(); - bootstrap_with_genesis(&db_dir, use_state_kv_db); + bootstrap_with_genesis(&db_dir, split_ledger_db, use_sharded_state_merkle_db); println!( "Finished empty DB creation, DB dir: {}. Creating accounts now...", @@ -55,18 +55,23 @@ pub fn create_db_with_accounts( &db_dir, storage_pruner_config, verify_sequence_numbers, - use_state_kv_db, + split_ledger_db, use_sharded_state_merkle_db, pipeline_config, ); } -fn bootstrap_with_genesis(db_dir: impl AsRef, use_state_kv_db: bool) { +fn bootstrap_with_genesis( + db_dir: impl AsRef, + split_ledger_db: bool, + use_sharded_state_merkle_db: bool, +) { let (config, _genesis_key) = aptos_genesis::test_utils::test_config(); let mut rocksdb_configs = RocksdbConfigs::default(); rocksdb_configs.state_merkle_db_config.max_open_files = -1; - rocksdb_configs.use_state_kv_db = use_state_kv_db; + rocksdb_configs.split_ledger_db = split_ledger_db; + rocksdb_configs.use_sharded_state_merkle_db = use_sharded_state_merkle_db; let (_db, db_rw) = DbReaderWriter::wrap( AptosDB::open( &db_dir, diff --git a/execution/executor-benchmark/src/lib.rs b/execution/executor-benchmark/src/lib.rs index b854b1a0a9c32..143a93d6a58df 100644 --- a/execution/executor-benchmark/src/lib.rs +++ b/execution/executor-benchmark/src/lib.rs @@ -29,10 +29,12 @@ use aptos_executor::{ use aptos_jellyfish_merkle::metrics::{ APTOS_JELLYFISH_INTERNAL_ENCODED_BYTES, APTOS_JELLYFISH_LEAF_ENCODED_BYTES, }; -use aptos_logger::info; +use aptos_logger::{info, warn}; +use aptos_sdk::types::LocalAccount; use aptos_storage_interface::DbReaderWriter; use aptos_transaction_generator_lib::{ create_txn_generator_creator, TransactionGeneratorCreator, TransactionType, + TransactionType::NonConflictingCoinTransfer, }; use aptos_vm::counters::TXN_GAS_USAGE; use db_reliable_submitter::DbReliableTransactionSubmitter; @@ -71,6 +73,7 @@ where fn create_checkpoint( source_dir: impl AsRef, checkpoint_dir: impl AsRef, + split_ledger_db: bool, use_sharded_state_merkle_db: bool, ) { // Create rocksdb checkpoint. @@ -79,23 +82,29 @@ fn create_checkpoint( } std::fs::create_dir_all(checkpoint_dir.as_ref()).unwrap(); - AptosDB::create_checkpoint(source_dir, checkpoint_dir, use_sharded_state_merkle_db) - .expect("db checkpoint creation fails."); + AptosDB::create_checkpoint( + source_dir, + checkpoint_dir, + split_ledger_db, + use_sharded_state_merkle_db, + ) + .expect("db checkpoint creation fails."); } /// Runs the benchmark with given parameters. +#[allow(clippy::too_many_arguments)] pub fn run_benchmark( block_size: usize, num_blocks: usize, transaction_type: Option, - transactions_per_sender: usize, + mut transactions_per_sender: usize, num_main_signer_accounts: usize, num_additional_dst_pool_accounts: usize, source_dir: impl AsRef, checkpoint_dir: impl AsRef, verify_sequence_numbers: bool, pruner_config: PrunerConfig, - use_state_kv_db: bool, + split_ledger_db: bool, use_sharded_state_merkle_db: bool, pipeline_config: PipelineConfig, ) where @@ -104,24 +113,45 @@ pub fn run_benchmark( create_checkpoint( source_dir.as_ref(), checkpoint_dir.as_ref(), + split_ledger_db, use_sharded_state_merkle_db, ); let (mut config, genesis_key) = aptos_genesis::test_utils::test_config(); config.storage.dir = checkpoint_dir.as_ref().to_path_buf(); config.storage.storage_pruner_config = pruner_config; - config.storage.rocksdb_configs.use_state_kv_db = use_state_kv_db; + config.storage.rocksdb_configs.split_ledger_db = split_ledger_db; config.storage.rocksdb_configs.use_sharded_state_merkle_db = use_sharded_state_merkle_db; let (db, executor) = init_db_and_executor::(&config); - let transaction_generator_creator = transaction_type.map(|transaction_type| { - init_workload::( + let num_existing_accounts = TransactionGenerator::read_meta(&source_dir); + let num_accounts_to_be_loaded = std::cmp::min( + num_existing_accounts, + num_main_signer_accounts + num_additional_dst_pool_accounts, + ); + + let mut num_accounts_to_skip = 0; + if let NonConflictingCoinTransfer{..} = transaction_type { + // In case of random non-conflicting coin transfer using `P2PTransactionGenerator`, + // `3*block_size` addresses is required: + // `block_size` number of signers, and 2 groups of burn-n-recycle recipients used alternatively. + if num_accounts_to_be_loaded < block_size * 3 { + panic!("Cannot guarantee random non-conflicting coin transfer using `P2PTransactionGenerator`."); + } + num_accounts_to_skip = block_size; + } + + let accounts_cache = + TransactionGenerator::gen_user_account_cache(db.reader.clone(), num_accounts_to_be_loaded, num_accounts_to_skip); + let (main_signer_accounts, burner_accounts) = + accounts_cache.split(num_main_signer_accounts); + + init_workload::( transaction_type, - num_main_signer_accounts, - num_additional_dst_pool_accounts, + main_signer_accounts, + burner_accounts, db.clone(), - &source_dir, // Initialization pipeline is temporary, so needs to be fully committed. // No discards/aborts allowed during initialization, even if they are allowed later. PipelineConfig { @@ -138,13 +168,28 @@ pub fn run_benchmark( let (pipeline, block_sender) = Pipeline::new(executor, version, pipeline_config.clone(), Some(num_blocks)); + + let mut num_accounts_to_load = num_main_signer_accounts; + if let Some(NonConflictingCoinTransfer { .. }) = transaction_type { + // In case of non-conflicting coin transfer, + // `aptos_executor_benchmark::transaction_generator::TransactionGenerator` needs to hold + // at least `block_size` number of accounts, all as signer only. + num_accounts_to_load = block_size; + if transactions_per_sender > 1 { + warn!( + "Overriding transactions_per_sender to 1 for non_conflicting_txns_per_block workload" + ); + transactions_per_sender = 1; + } + } + let mut generator = TransactionGenerator::new_with_existing_db( db.clone(), genesis_key, block_sender, source_dir, version, - Some(num_main_signer_accounts), + Some(num_accounts_to_load), ); let mut start_time = Instant::now(); @@ -180,6 +225,7 @@ pub fn run_benchmark( .collect::>(); let start_commit_total = APTOS_EXECUTOR_COMMIT_BLOCKS_SECONDS.get_sample_sum(); + let start_vm_time = APTOS_EXECUTOR_VM_EXECUTE_BLOCK_SECONDS.get_sample_sum(); if let Some(transaction_generator_creator) = transaction_generator_creator { generator.run_workload( block_size, @@ -200,6 +246,11 @@ pub fn run_benchmark( let elapsed = start_time.elapsed().as_secs_f64(); let delta_v = (db.reader.get_latest_version().unwrap() - version) as f64; let delta_gas = TXN_GAS_USAGE.get_sample_sum() - start_gas; + let delta_vm_time = APTOS_EXECUTOR_VM_EXECUTE_BLOCK_SECONDS.get_sample_sum() - start_vm_time; + info!( + "VM execution TPS {} txn/s", + (delta_v / delta_vm_time) as usize + ); info!( "Executed workload {}", if let Some(ttype) = transaction_type { @@ -251,12 +302,11 @@ pub fn run_benchmark( } } -fn init_workload>( +fn init_workload( transaction_type: TransactionType, - num_main_signer_accounts: usize, - num_additional_dst_pool_accounts: usize, + mut main_signer_accounts: Vec, + burner_accounts: Vec, db: DbReaderWriter, - db_dir: &P, pipeline_config: PipelineConfig, ) -> Box where @@ -271,17 +321,6 @@ where ); let runtime = Runtime::new().unwrap(); - - let num_existing_accounts = TransactionGenerator::read_meta(db_dir); - let num_cached_accounts = std::cmp::min( - num_existing_accounts, - num_main_signer_accounts + num_additional_dst_pool_accounts, - ); - let accounts_cache = - TransactionGenerator::gen_user_account_cache(db.reader.clone(), num_cached_accounts); - - let (mut main_signer_accounts, burner_accounts) = - accounts_cache.split(num_main_signer_accounts); let transaction_factory = TransactionGenerator::create_transaction_factory(); let (txn_generator_creator, _address_pool, _account_pool) = runtime.block_on(async { @@ -317,7 +356,7 @@ pub fn add_accounts( checkpoint_dir: impl AsRef, pruner_config: PrunerConfig, verify_sequence_numbers: bool, - use_state_kv_db: bool, + split_ledger_db: bool, use_sharded_state_merkle_db: bool, pipeline_config: PipelineConfig, ) where @@ -327,6 +366,7 @@ pub fn add_accounts( create_checkpoint( source_dir.as_ref(), checkpoint_dir.as_ref(), + split_ledger_db, use_sharded_state_merkle_db, ); add_accounts_impl::( @@ -337,7 +377,7 @@ pub fn add_accounts( checkpoint_dir, pruner_config, verify_sequence_numbers, - use_state_kv_db, + split_ledger_db, use_sharded_state_merkle_db, pipeline_config, ); @@ -351,7 +391,7 @@ fn add_accounts_impl( output_dir: impl AsRef, pruner_config: PrunerConfig, verify_sequence_numbers: bool, - use_state_kv_db: bool, + split_ledger_db: bool, use_sharded_state_merkle_db: bool, pipeline_config: PipelineConfig, ) where @@ -360,7 +400,7 @@ fn add_accounts_impl( let (mut config, genesis_key) = aptos_genesis::test_utils::test_config(); config.storage.dir = output_dir.as_ref().to_path_buf(); config.storage.storage_pruner_config = pruner_config; - config.storage.rocksdb_configs.use_state_kv_db = use_state_kv_db; + config.storage.rocksdb_configs.split_ledger_db = split_ledger_db; config.storage.rocksdb_configs.use_sharded_state_merkle_db = use_sharded_state_merkle_db; let (db, executor) = init_db_and_executor::(&config); diff --git a/execution/executor-benchmark/src/main.rs b/execution/executor-benchmark/src/main.rs index 489f47ada9c32..4fa2e9cc25373 100644 --- a/execution/executor-benchmark/src/main.rs +++ b/execution/executor-benchmark/src/main.rs @@ -117,11 +117,14 @@ struct Opt { #[clap(long)] concurrency_level: Option, + #[clap(long, default_value = "1")] + num_executor_shards: usize, + #[clap(flatten)] pruner_opt: PrunerOpt, #[clap(long)] - use_state_kv_db: bool, + split_ledger_db: bool, #[clap(long)] use_sharded_state_merkle_db: bool, @@ -144,10 +147,11 @@ impl Opt { fn concurrency_level(&self) -> usize { match self.concurrency_level { None => { - let level = num_cpus::get(); + let level = + (num_cpus::get() as f64 / self.num_executor_shards as f64).ceil() as usize; println!( - "\nVM concurrency level defaults to num of cpus: {}\n", - level + "\nVM concurrency level defaults to {} for number of shards {} \n", + level, self.num_executor_shards ); level }, @@ -225,7 +229,7 @@ where data_dir, opt.pruner_opt.pruner_config(), opt.verify_sequence_numbers, - opt.use_state_kv_db, + opt.split_ledger_db, opt.use_sharded_state_merkle_db, opt.pipeline_opt.pipeline_config(), ); @@ -250,7 +254,7 @@ where checkpoint_dir, opt.verify_sequence_numbers, opt.pruner_opt.pruner_config(), - opt.use_state_kv_db, + opt.split_ledger_db, opt.use_sharded_state_merkle_db, opt.pipeline_opt.pipeline_config(), ); @@ -269,7 +273,7 @@ where checkpoint_dir, opt.pruner_opt.pruner_config(), opt.verify_sequence_numbers, - opt.use_state_kv_db, + opt.split_ledger_db, opt.use_sharded_state_merkle_db, opt.pipeline_opt.pipeline_config(), ); @@ -293,6 +297,7 @@ fn main() { .build_global() .expect("Failed to build rayon global thread pool."); AptosVM::set_concurrency_level_once(opt.concurrency_level()); + AptosVM::set_num_shards_once(opt.num_executor_shards); NativeExecutor::set_concurrency_level_once(opt.concurrency_level()); if opt.use_native_executor { diff --git a/execution/executor-benchmark/src/native_executor.rs b/execution/executor-benchmark/src/native_executor.rs index 118fa4eb8b510..11d05fd4ce92c 100644 --- a/execution/executor-benchmark/src/native_executor.rs +++ b/execution/executor-benchmark/src/native_executor.rs @@ -13,6 +13,7 @@ use aptos_storage_interface::cached_state_view::CachedStateView; use aptos_types::{ account_address::AccountAddress, account_config::{deposit::DepositEvent, withdraw::WithdrawEvent}, + block_executor::partitioner::ExecutableTransactions, contract_event::ContractEvent, event::EventKey, state_store::state_key::StateKey, @@ -336,9 +337,14 @@ impl NativeExecutor { impl TransactionBlockExecutor for NativeExecutor { fn execute_transaction_block( - transactions: Vec, + transactions: ExecutableTransactions, state_view: CachedStateView, + _maybe_block_gas_limit: Option, ) -> Result { + let transactions = match transactions { + ExecutableTransactions::Unsharded(txns) => txns, + _ => todo!("sharded execution not yet supported"), + }; let transaction_outputs = NATIVE_EXECUTOR_POOL.install(|| { transactions .par_iter() @@ -411,17 +417,4 @@ impl TransactionBlockExecutor for NativeExecutor { state_cache: state_view.into_state_cache(), }) } - - // Dummy function that is not supposed to be used - fn execute_transaction_block_with_gas_limit( - _transactions: Vec, - state_view: CachedStateView, - _maybe_gas_limit: Option, - ) -> Result { - Ok(ChunkOutput { - transactions: vec![], - transaction_outputs: vec![], - state_cache: state_view.into_state_cache(), - }) - } } diff --git a/execution/executor-benchmark/src/transaction_executor.rs b/execution/executor-benchmark/src/transaction_executor.rs index f47b7c20c25cc..6cdaf2c0a72e3 100644 --- a/execution/executor-benchmark/src/transaction_executor.rs +++ b/execution/executor-benchmark/src/transaction_executor.rs @@ -61,7 +61,7 @@ where let block_id = HashValue::random(); let output = self .executor - .execute_block((block_id, transactions), self.parent_block_id) + .execute_block((block_id, transactions).into(), self.parent_block_id, None) .unwrap(); assert_eq!(output.compute_status().len(), num_txns); diff --git a/execution/executor-benchmark/src/transaction_generator.rs b/execution/executor-benchmark/src/transaction_generator.rs index 0a5481cb0a7a5..a46bb4038eb22 100644 --- a/execution/executor-benchmark/src/transaction_generator.rs +++ b/execution/executor-benchmark/src/transaction_generator.rs @@ -18,6 +18,7 @@ use aptos_types::{ use chrono::Local; use indicatif::{ProgressBar, ProgressStyle}; use itertools::Itertools; +use rand::thread_rng; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; use std::{ @@ -141,11 +142,15 @@ impl TransactionGenerator { accounts } - pub fn gen_user_account_cache(reader: Arc, num_accounts: usize) -> AccountCache { + pub fn gen_user_account_cache( + reader: Arc, + num_accounts: usize, + num_to_skip: usize, + ) -> AccountCache { Self::resync_sequence_numbers( reader, Self::gen_account_cache( - AccountGenerator::new_for_user_accounts(0), + AccountGenerator::new_for_user_accounts(num_to_skip as u64), num_accounts, "user", ), @@ -185,7 +190,7 @@ impl TransactionGenerator { main_signer_accounts: num_main_signer_accounts.map(|num_main_signer_accounts| { let num_cached_accounts = std::cmp::min(num_existing_accounts, num_main_signer_accounts); - Self::gen_user_account_cache(db.reader.clone(), num_cached_accounts) + Self::gen_user_account_cache(db.reader.clone(), num_cached_accounts, 0) }), num_existing_accounts, version, @@ -275,23 +280,25 @@ impl TransactionGenerator { transactions_per_sender: usize, ) { assert!(self.block_sender.is_some()); + let num_senders_per_block = + (block_size + transactions_per_sender - 1) / transactions_per_sender; + let account_pool_size = self.main_signer_accounts.as_ref().unwrap().accounts.len(); let mut transaction_generator = transaction_generator_creator.create_transaction_generator(); - for _ in 0..num_blocks { - // TODO: handle when block_size isn't divisible by transactions_per_sender - let transactions: Vec<_> = (0..(block_size / transactions_per_sender)) - .into_iter() - .flat_map(|_| { - let sender = self.main_signer_accounts.as_mut().unwrap().get_random(); - transaction_generator - .generate_transactions(sender, transactions_per_sender) - .into_iter() - .map(Transaction::UserTransaction) - .collect::>() - }) - .chain(once(Transaction::StateCheckpoint(HashValue::random()))) - .collect(); + let transactions: Vec<_> = rand::seq::index::sample( + &mut thread_rng(), + account_pool_size, + num_senders_per_block, + ) + .into_iter() + .flat_map(|idx| { + let sender = &mut self.main_signer_accounts.as_mut().unwrap().accounts[idx]; + transaction_generator.generate_transactions(sender, transactions_per_sender) + }) + .map(Transaction::UserTransaction) + .chain(once(Transaction::StateCheckpoint(HashValue::random()))) + .collect(); self.version += transactions.len() as Version; if let Some(sender) = &self.block_sender { @@ -405,7 +412,6 @@ impl TransactionGenerator { for _ in 0..num_blocks { // TODO: handle when block_size isn't divisible by transactions_per_sender let transactions: Vec<_> = (0..(block_size / transactions_per_sender)) - .into_iter() .flat_map(|_| { let (sender, receivers) = self .main_signer_accounts diff --git a/execution/executor-service/Cargo.toml b/execution/executor-service/Cargo.toml new file mode 100644 index 0000000000000..7fcddb11b8db9 --- /dev/null +++ b/execution/executor-service/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "aptos-executor-service" +description = "Aptos executor service" +version = "0.1.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +aptos-config = { workspace = true } +aptos-crypto = { workspace = true } +aptos-executor-types = { workspace = true } +aptos-language-e2e-tests = { workspace = true } +aptos-logger = { workspace = true } +aptos-retrier = { workspace = true } +aptos-secure-net = { workspace = true } +aptos-state-view = { workspace = true } +aptos-types = { workspace = true } +aptos-vm = { workspace = true } +bcs = { workspace = true } +clap = { workspace = true } +itertools = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +thiserror = { workspace = true } diff --git a/execution/executor-service/src/error.rs b/execution/executor-service/src/error.rs new file mode 100644 index 0000000000000..6901536f863cf --- /dev/null +++ b/execution/executor-service/src/error.rs @@ -0,0 +1,27 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Clone, Debug, Deserialize, Error, PartialEq, Eq, Serialize)] +/// Different reasons for executor service fails to execute a block. +pub enum Error { + #[error("Internal error: {0}")] + InternalError(String), + #[error("Serialization error: {0}")] + SerializationError(String), +} + +impl From for Error { + fn from(error: bcs::Error) -> Self { + Self::SerializationError(format!("{}", error)) + } +} + +impl From for Error { + fn from(error: aptos_secure_net::Error) -> Self { + Self::InternalError(error.to_string()) + } +} diff --git a/execution/executor-service/src/lib.rs b/execution/executor-service/src/lib.rs new file mode 100644 index 0000000000000..1d7988e54760e --- /dev/null +++ b/execution/executor-service/src/lib.rs @@ -0,0 +1,39 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use aptos_state_view::in_memory_state_view::InMemoryStateView; +use aptos_types::{ + block_executor::partitioner::SubBlocksForShard, + transaction::{Transaction, TransactionOutput}, + vm_status::VMStatus, +}; +use serde::{Deserialize, Serialize}; + +mod error; +pub mod process_executor_service; +pub mod remote_executor_client; +pub mod remote_executor_service; +#[cfg(test)] +mod thread_executor_service; + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct BlockExecutionResult { + pub inner: Result, VMStatus>, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum BlockExecutionRequest { + ExecuteBlock(ExecuteBlockCommand), +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ExecuteBlockCommand { + pub(crate) sub_blocks: SubBlocksForShard, + // Currently we only support the state view backed by in-memory hashmap, which means that + // the controller needs to pre-read all the KV pairs from the storage and pass them to the + // executor service. In the future, we will support other types of state view, e.g., the + // state view backed by remote storage service, which will allow the executor service to read the KV pairs + // directly from the storage. + pub(crate) state_view: InMemoryStateView, + pub(crate) concurrency_level: usize, + pub(crate) maybe_block_gas_limit: Option, +} diff --git a/execution/executor-service/src/main.rs b/execution/executor-service/src/main.rs new file mode 100644 index 0000000000000..fd978cfaf9734 --- /dev/null +++ b/execution/executor-service/src/main.rs @@ -0,0 +1,25 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_executor_service::process_executor_service::ProcessExecutorService; +use clap::Parser; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +#[derive(Debug, Parser)] +struct Args { + #[clap(long, default_value = "8080")] + pub server_port: u16, + + #[clap(long, default_value = "8")] + pub num_executor_threads: usize, +} + +fn main() { + let args = Args::parse(); + aptos_logger::Logger::new().init(); + + let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), args.server_port); + let executor_service = + ProcessExecutorService::new(server_addr, 1000, args.num_executor_threads); + executor_service.run(); +} diff --git a/execution/executor-service/src/process_executor_service.rs b/execution/executor-service/src/process_executor_service.rs new file mode 100644 index 0000000000000..f2c888be0b209 --- /dev/null +++ b/execution/executor-service/src/process_executor_service.rs @@ -0,0 +1,54 @@ +// Copyright © Aptos Foundation + +use crate::{ + remote_executor_service, + remote_executor_service::{ExecutorService, RemoteExecutorService}, +}; +use aptos_logger::info; +use aptos_secure_net::NetworkServer; +use std::net::SocketAddr; + +/// An implementation of the remote executor service that runs in a standalone process. +pub struct ProcessExecutorService { + server_addr: SocketAddr, + network_timeout_ms: u64, + num_executor_threads: usize, +} + +impl ProcessExecutorService { + pub fn new(server_addr: SocketAddr, network_timeout: u64, num_executor_threads: usize) -> Self { + Self { + server_addr, + network_timeout_ms: network_timeout, + num_executor_threads, + } + } + + pub fn run(&self) { + info!( + "Starting process remote executor service on {}", + self.server_addr + ); + let network_server = NetworkServer::new( + "process-executor-service", + self.server_addr, + self.network_timeout_ms, + ); + let executor_service = ExecutorService::new(self.num_executor_threads); + remote_executor_service::execute(network_server, executor_service); + } +} + +impl RemoteExecutorService for ProcessExecutorService { + fn server_address(&self) -> SocketAddr { + self.server_addr + } + + fn network_timeout_ms(&self) -> u64 { + self.network_timeout_ms + } + + fn executor_threads(&self) -> usize { + self.num_executor_threads + } +} diff --git a/execution/executor-service/src/remote_executor_client.rs b/execution/executor-service/src/remote_executor_client.rs new file mode 100644 index 0000000000000..1b8eff64333f9 --- /dev/null +++ b/execution/executor-service/src/remote_executor_client.rs @@ -0,0 +1,76 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{error::Error, BlockExecutionRequest, BlockExecutionResult, ExecuteBlockCommand}; +use aptos_logger::error; +use aptos_retrier::{fixed_retry_strategy, retry}; +use aptos_secure_net::NetworkClient; +use aptos_state_view::StateView; +use aptos_types::{ + block_executor::partitioner::SubBlocksForShard, + transaction::{Transaction, TransactionOutput}, + vm_status::VMStatus, +}; +use aptos_vm::sharded_block_executor::block_executor_client::BlockExecutorClient; +use std::{net::SocketAddr, sync::Mutex}; + +/// An implementation of [`BlockExecutorClient`] that supports executing blocks remotely. +pub struct RemoteExecutorClient { + network_client: Mutex, +} + +impl RemoteExecutorClient { + pub fn new(server_address: SocketAddr, network_timeout_ms: u64) -> Self { + let network_client = NetworkClient::new( + "remote-executor-service", + server_address, + network_timeout_ms, + ); + Self { + network_client: Mutex::new(network_client), + } + } + + fn execute_block_inner( + &self, + execution_request: BlockExecutionRequest, + ) -> Result { + let input_message = bcs::to_bytes(&execution_request)?; + let mut network_client = self.network_client.lock().unwrap(); + network_client.write(&input_message)?; + let bytes = network_client.read()?; + Ok(bcs::from_bytes(&bytes)?) + } + + fn execute_block_with_retry( + &self, + execution_request: BlockExecutionRequest, + ) -> BlockExecutionResult { + retry(fixed_retry_strategy(5, 20), || { + let res = self.execute_block_inner(execution_request.clone()); + if let Err(e) = &res { + error!("Failed to execute block: {:?}", e); + } + res + }) + .unwrap() + } +} + +impl BlockExecutorClient for RemoteExecutorClient { + fn execute_block( + &self, + sub_blocks: SubBlocksForShard, + state_view: &S, + concurrency_level: usize, + maybe_block_gas_limit: Option, + ) -> Result, VMStatus> { + let input = BlockExecutionRequest::ExecuteBlock(ExecuteBlockCommand { + sub_blocks, + state_view: S::as_in_memory_state_view(state_view), + concurrency_level, + maybe_block_gas_limit, + }); + self.execute_block_with_retry(input).inner + } +} diff --git a/execution/executor-service/src/remote_executor_service.rs b/execution/executor-service/src/remote_executor_service.rs new file mode 100644 index 0000000000000..5e1389fec4387 --- /dev/null +++ b/execution/executor-service/src/remote_executor_service.rs @@ -0,0 +1,244 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + error::Error, remote_executor_client::RemoteExecutorClient, BlockExecutionRequest, + BlockExecutionResult, +}; +use aptos_logger::{error, info}; +use aptos_secure_net::NetworkServer; +use aptos_vm::sharded_block_executor::block_executor_client::{ + BlockExecutorClient, LocalExecutorClient, +}; +use std::net::SocketAddr; + +/// A service that provides support for remote execution. Essentially, it reads a request from +/// the remote executor client and executes the block locally and returns the result. +pub struct ExecutorService { + client: LocalExecutorClient, +} + +impl ExecutorService { + pub fn new(num_executor_threads: usize) -> Self { + Self { + client: LocalExecutorClient::new(num_executor_threads), + } + } + + pub fn handle_message(&self, execution_message: Vec) -> Result, Error> { + let input = bcs::from_bytes(&execution_message)?; + let result = self.handle_execution_request(input)?; + Ok(bcs::to_bytes(&result)?) + } + + pub fn handle_execution_request( + &self, + execution_request: BlockExecutionRequest, + ) -> Result { + let result = match execution_request { + BlockExecutionRequest::ExecuteBlock(command) => self.client.execute_block( + command.sub_blocks, + &command.state_view, + command.concurrency_level, + command.maybe_block_gas_limit, + ), + }; + Ok(BlockExecutionResult { inner: result }) + } +} + +pub trait RemoteExecutorService { + fn client(&self) -> RemoteExecutorClient { + RemoteExecutorClient::new(self.server_address(), self.network_timeout_ms()) + } + + fn server_address(&self) -> SocketAddr; + + /// Network Timeout in milliseconds. + fn network_timeout_ms(&self) -> u64; + + fn executor_threads(&self) -> usize; +} + +pub fn execute(mut network_server: NetworkServer, executor_service: ExecutorService) { + loop { + if let Err(e) = process_one_message(&mut network_server, &executor_service) { + error!("Failed to process message: {}", e); + } + } +} + +fn process_one_message( + network_server: &mut NetworkServer, + executor_service: &ExecutorService, +) -> Result<(), Error> { + let request = network_server.read()?; + let response = executor_service.handle_message(request)?; + info!("server sending response"); + network_server.write(&response)?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::{ + remote_executor_service::RemoteExecutorService, + thread_executor_service::ThreadExecutorService, + }; + use aptos_language_e2e_tests::{ + account::AccountData, common_transactions::peer_to_peer_txn, executor::FakeExecutor, + }; + use aptos_types::{ + account_config::{DepositEvent, WithdrawEvent}, + block_executor::partitioner::{ + CrossShardDependencies, SubBlock, SubBlocksForShard, TransactionWithDependencies, + }, + transaction::{ExecutionStatus, Transaction, TransactionOutput, TransactionStatus}, + }; + use aptos_vm::sharded_block_executor::{ + block_executor_client::BlockExecutorClient, ShardedBlockExecutor, + }; + use std::sync::Arc; + + fn generate_transactions(executor: &mut FakeExecutor) -> (Vec, AccountData) { + let sender = executor.create_raw_account_data(3_000_000_000, 10); + let receiver = executor.create_raw_account_data(3_000_000_000, 10); + executor.add_account_data(&sender); + executor.add_account_data(&receiver); + + let transfer_amount = 1_000; + + // execute transaction + let txns: Vec = vec![ + Transaction::UserTransaction(peer_to_peer_txn( + sender.account(), + receiver.account(), + 10, + transfer_amount, + 100, + )), + Transaction::UserTransaction(peer_to_peer_txn( + sender.account(), + receiver.account(), + 11, + transfer_amount, + 100, + )), + Transaction::UserTransaction(peer_to_peer_txn( + sender.account(), + receiver.account(), + 12, + transfer_amount, + 100, + )), + Transaction::UserTransaction(peer_to_peer_txn( + sender.account(), + receiver.account(), + 13, + transfer_amount, + 100, + )), + ]; + (txns, receiver) + } + + fn verify_txn_output( + transfer_amount: u64, + output: &[TransactionOutput], + executor: &mut FakeExecutor, + receiver: &AccountData, + ) { + for (idx, txn_output) in output.iter().enumerate() { + assert_eq!( + txn_output.status(), + &TransactionStatus::Keep(ExecutionStatus::Success) + ); + + // check events + for event in txn_output.events() { + if let Ok(payload) = WithdrawEvent::try_from(event) { + assert_eq!(transfer_amount, payload.amount()); + } else if let Ok(payload) = DepositEvent::try_from(event) { + if payload.amount() == 0 { + continue; + } + assert_eq!(transfer_amount, payload.amount()); + } else { + panic!("Unexpected Event Type") + } + } + + let original_receiver_balance = executor + .read_coin_store_resource(receiver.account()) + .expect("receiver balcne must exist"); + executor.apply_write_set(txn_output.write_set()); + + // check that numbers in stored DB are correct + let receiver_balance = original_receiver_balance.coin() + transfer_amount; + let updated_receiver_balance = executor + .read_coin_store_resource(receiver.account()) + .expect("receiver balance must exist"); + assert_eq!(receiver_balance, updated_receiver_balance.coin()); + assert_eq!( + idx as u64 + 1, + updated_receiver_balance.deposit_events().count() + ); + } + } + + #[test] + fn test_remote_block_execute() { + let executor_service = ThreadExecutorService::new(5000, 2); + // Uncomment for testing with a real server + // let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); + // let client = RemoteExecutorClient::new(server_addr, 1000); + + let client = executor_service.client(); + let mut executor = FakeExecutor::from_head_genesis(); + for _ in 0..5 { + let (txns, receiver) = generate_transactions(&mut executor); + let txns_with_deps = txns + .into_iter() + .map(|txn| TransactionWithDependencies::new(txn, CrossShardDependencies::default())) + .collect::>(); + let sub_block = SubBlock::new(0, txns_with_deps); + let sub_blocks_for_shard = SubBlocksForShard::new(0, vec![sub_block]); + + let output = client + .execute_block(sub_blocks_for_shard, executor.data_store(), 2, None) + .unwrap(); + verify_txn_output(1_000, &output, &mut executor, &receiver); + } + } + + #[test] + fn test_sharded_remote_block_executor() { + let executor_service = ThreadExecutorService::new(5000, 2); + let client = executor_service.client(); + // Uncomment for testing with a real server + // let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); + // let client = RemoteExecutorClient::new(server_addr, 1000); + + let sharded_block_executor = ShardedBlockExecutor::new(vec![client]); + let mut executor = FakeExecutor::from_head_genesis(); + for _ in 0..5 { + let (txns, receiver) = generate_transactions(&mut executor); + let txns_with_deps = txns + .into_iter() + .map(|txn| TransactionWithDependencies::new(txn, CrossShardDependencies::default())) + .collect::>(); + let sub_block = SubBlock::new(0, txns_with_deps); + let sub_blocks_for_shard = SubBlocksForShard::new(0, vec![sub_block]); + + let output = sharded_block_executor + .execute_block( + Arc::new(executor.data_store().clone()), + vec![sub_blocks_for_shard], + 2, + None, + ) + .unwrap(); + verify_txn_output(1_000, &output, &mut executor, &receiver); + } + } +} diff --git a/execution/executor-service/src/thread_executor_service.rs b/execution/executor-service/src/thread_executor_service.rs new file mode 100644 index 0000000000000..92e4672192d76 --- /dev/null +++ b/execution/executor-service/src/thread_executor_service.rs @@ -0,0 +1,62 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use crate::{ + remote_executor_service, + remote_executor_service::{ExecutorService, RemoteExecutorService}, +}; +use aptos_config::utils; +use aptos_logger::info; +use aptos_secure_net::NetworkServer; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + thread, + thread::JoinHandle, +}; + +/// This is a simple implementation of RemoteExecutorService that runs the executor service in a +/// separate thread. This should be used for testing only. +pub struct ThreadExecutorService { + _child: JoinHandle<()>, + server_addr: SocketAddr, + network_timeout_ms: u64, + num_executor_threads: usize, +} + +impl ThreadExecutorService { + pub fn new(network_timeout_ms: u64, num_executor_threads: usize) -> Self { + let listen_port = utils::get_available_port(); + let listen_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), listen_port); + let server_addr = listen_addr; + info!("Starting thread remote executor service on {}", listen_addr); + + let network_server = + NetworkServer::new("thread-executor-service", listen_addr, network_timeout_ms); + + let executor_service = ExecutorService::new(num_executor_threads); + + let child = thread::spawn(move || { + remote_executor_service::execute(network_server, executor_service); + }); + + Self { + _child: child, + server_addr, + network_timeout_ms, + num_executor_threads, + } + } +} + +impl RemoteExecutorService for ThreadExecutorService { + fn server_address(&self) -> SocketAddr { + self.server_addr + } + + fn network_timeout_ms(&self) -> u64 { + self.network_timeout_ms + } + + fn executor_threads(&self) -> usize { + self.num_executor_threads + } +} diff --git a/execution/executor-test-helpers/src/integration_test_impl.rs b/execution/executor-test-helpers/src/integration_test_impl.rs index fad11604db79a..42f879d008170 100644 --- a/execution/executor-test-helpers/src/integration_test_impl.rs +++ b/execution/executor-test-helpers/src/integration_test_impl.rs @@ -21,7 +21,7 @@ use aptos_types::{ block_metadata::BlockMetadata, chain_id::ChainId, event::EventKey, - test_helpers::transaction_test_helpers::block, + test_helpers::transaction_test_helpers::{block, BLOCK_GAS_LIMIT}, transaction::{ Transaction, Transaction::UserTransaction, TransactionListWithProof, TransactionWithProof, WriteSetPayload, @@ -160,10 +160,14 @@ pub fn test_execution_with_storage_impl() -> Arc { txn_factory.transfer(account3.address(), 10 * B), ))); } - let block3 = block(block3, executor.get_block_gas_limit()); // append state checkpoint txn + let block3 = block(block3, BLOCK_GAS_LIMIT); // append state checkpoint txn let output1 = executor - .execute_block((block1_id, block1.clone()), parent_block_id) + .execute_block( + (block1_id, block1.clone()).into(), + parent_block_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); let li1 = gen_ledger_info_with_sigs(1, &output1, block1_id, &[signer.clone()]); let epoch2_genesis_id = Block::make_genesis_block_from_ledger_info(li1.ledger_info()).id(); @@ -371,7 +375,11 @@ pub fn test_execution_with_storage_impl() -> Arc { // Execute block 2, 3, 4 let output2 = executor - .execute_block((block2_id, block2), epoch2_genesis_id) + .execute_block( + (block2_id, block2).into(), + epoch2_genesis_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); let li2 = gen_ledger_info_with_sigs(2, &output2, block2_id, &[signer.clone()]); let epoch3_genesis_id = Block::make_genesis_block_from_ledger_info(li2.ledger_info()).id(); @@ -386,7 +394,11 @@ pub fn test_execution_with_storage_impl() -> Arc { assert_eq!(current_version, 13); let output3 = executor - .execute_block((block3_id, block3.clone()), epoch3_genesis_id) + .execute_block( + (block3_id, block3.clone()).into(), + epoch3_genesis_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); let li3 = gen_ledger_info_with_sigs(3, &output3, block3_id, &[signer]); executor.commit_blocks(vec![block3_id], li3).unwrap(); @@ -430,7 +442,7 @@ pub fn test_execution_with_storage_impl() -> Arc { .unwrap(); // With block gas limit, StateCheckpoint txn is inserted to block after execution. - let diff = executor.get_block_gas_limit().map(|_| 0).unwrap_or(1); + let diff = BLOCK_GAS_LIMIT.map(|_| 0).unwrap_or(1); let transaction_list_with_proof = db .reader diff --git a/execution/executor-types/Cargo.toml b/execution/executor-types/Cargo.toml index 4a6100161f235..a77177ce6e03a 100644 --- a/execution/executor-types/Cargo.toml +++ b/execution/executor-types/Cargo.toml @@ -14,6 +14,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } +aptos-block-partitioner = { workspace = true } aptos-crypto = { workspace = true } aptos-scratchpad = { workspace = true } aptos-secure-net = { workspace = true } diff --git a/execution/executor-types/src/in_memory_state_calculator.rs b/execution/executor-types/src/in_memory_state_calculator.rs index 3010ad637432a..2ea0a22f040df 100644 --- a/execution/executor-types/src/in_memory_state_calculator.rs +++ b/execution/executor-types/src/in_memory_state_calculator.rs @@ -82,7 +82,6 @@ impl InMemoryStateCalculator { let state_cache = sharded_state_cache .iter() .flatten() - .into_iter() .map(|entry| (entry.key().clone(), entry.value().1.clone())) .collect(); diff --git a/execution/executor-types/src/lib.rs b/execution/executor-types/src/lib.rs index 69ca00e860fec..9055136c0d6c4 100644 --- a/execution/executor-types/src/lib.rs +++ b/execution/executor-types/src/lib.rs @@ -1,7 +1,6 @@ // Copyright © Aptos Foundation // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 - #![forbid(unsafe_code)] use anyhow::Result; @@ -11,6 +10,7 @@ use aptos_crypto::{ }; use aptos_scratchpad::{ProofRead, SparseMerkleTree}; use aptos_types::{ + block_executor::partitioner::ExecutableBlock, contract_event::ContractEvent, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, @@ -90,8 +90,9 @@ pub trait BlockExecutorTrait: Send + Sync { /// Executes a block. fn execute_block( &self, - block: (HashValue, Vec), + block: ExecutableBlock, parent_block_id: HashValue, + maybe_block_gas_limit: Option, ) -> Result; /// Saves eligible blocks to persistent storage. @@ -124,10 +125,6 @@ pub trait BlockExecutorTrait: Send + Sync { /// Finishes the block executor by releasing memory held by inner data structures(SMT). fn finish(&self); - - fn get_block_gas_limit(&self) -> Option; - - fn update_block_gas_limit(&self, block_gas_limit: Option); } #[derive(Clone)] diff --git a/execution/executor/Cargo.toml b/execution/executor/Cargo.toml index cb47f7f36d530..8273dbf2ac591 100644 --- a/execution/executor/Cargo.toml +++ b/execution/executor/Cargo.toml @@ -14,6 +14,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } +aptos-block-partitioner = { workspace = true } aptos-consensus-types = { workspace = true } aptos-crypto = { workspace = true } aptos-executor-types = { workspace = true } @@ -32,6 +33,7 @@ dashmap = { workspace = true } fail = { workspace = true } itertools = { workspace = true } move-core-types = { workspace = true } +num_cpus = { workspace = true } once_cell = { workspace = true } rayon = { workspace = true } serde = { workspace = true } diff --git a/execution/executor/src/block_executor.rs b/execution/executor/src/block_executor.rs index b1568a582821c..b43ac2c11325b 100644 --- a/execution/executor/src/block_executor.rs +++ b/execution/executor/src/block_executor.rs @@ -16,7 +16,7 @@ use crate::{ use anyhow::Result; use aptos_crypto::HashValue; use aptos_executor_types::{BlockExecutorTrait, Error, StateComputeResult}; -use aptos_infallible::{Mutex, RwLock}; +use aptos_infallible::RwLock; use aptos_logger::prelude::*; use aptos_scratchpad::SparseMerkleTree; use aptos_state_view::StateViewId; @@ -24,7 +24,9 @@ use aptos_storage_interface::{ async_proof_fetcher::AsyncProofFetcher, cached_state_view::CachedStateView, DbReaderWriter, }; use aptos_types::{ - ledger_info::LedgerInfoWithSignatures, state_store::state_value::StateValue, + block_executor::partitioner::{ExecutableBlock, ExecutableTransactions}, + ledger_info::LedgerInfoWithSignatures, + state_store::state_value::StateValue, transaction::Transaction, }; use aptos_vm::AptosVM; @@ -33,34 +35,22 @@ use std::{marker::PhantomData, sync::Arc}; pub trait TransactionBlockExecutor: Send + Sync { fn execute_transaction_block( - transactions: Vec, + transactions: ExecutableTransactions, state_view: CachedStateView, - ) -> Result; - - fn execute_transaction_block_with_gas_limit( - transactions: Vec, - state_view: CachedStateView, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Result; } impl TransactionBlockExecutor for AptosVM { fn execute_transaction_block( - transactions: Vec, - state_view: CachedStateView, - ) -> Result { - ChunkOutput::by_transaction_execution::(transactions, state_view) - } - - fn execute_transaction_block_with_gas_limit( - transactions: Vec, + transactions: ExecutableTransactions, state_view: CachedStateView, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Result { - ChunkOutput::by_transaction_execution_with_gas_limit::( + ChunkOutput::by_transaction_execution::( transactions, state_view, - maybe_gas_limit, + maybe_block_gas_limit, ) } } @@ -101,24 +91,6 @@ impl BlockExecutorTrait for BlockExecutor where V: TransactionBlockExecutor, { - fn get_block_gas_limit(&self) -> Option { - self.maybe_initialize().expect("Failed to initialize."); - self.inner - .read() - .as_ref() - .expect("BlockExecutor is not reset") - .get_block_gas_limit() - } - - fn update_block_gas_limit(&self, block_gas_limit: Option) { - self.maybe_initialize().expect("Failed to initialize."); - self.inner - .write() - .as_ref() - .expect("BlockExecutor is not reset") - .update_block_gas_limit(block_gas_limit); - } - fn committed_block_id(&self) -> HashValue { self.maybe_initialize().expect("Failed to initialize."); self.inner @@ -135,15 +107,16 @@ where fn execute_block( &self, - block: (HashValue, Vec), + block: ExecutableBlock, parent_block_id: HashValue, + maybe_block_gas_limit: Option, ) -> Result { self.maybe_initialize()?; self.inner .read() .as_ref() .expect("BlockExecutor is not reset") - .execute_block(block, parent_block_id) + .execute_block(block, parent_block_id, maybe_block_gas_limit) } fn commit_blocks_ext( @@ -168,7 +141,6 @@ struct BlockExecutorInner { db: DbReaderWriter, block_tree: BlockTree, phantom: PhantomData, - block_gas_limit: Mutex>, } impl BlockExecutorInner @@ -181,7 +153,6 @@ where db, block_tree, phantom: PhantomData, - block_gas_limit: Mutex::new(None), }) } @@ -200,26 +171,21 @@ impl BlockExecutorInner where V: TransactionBlockExecutor, { - fn get_block_gas_limit(&self) -> Option { - self.block_gas_limit.lock().as_ref().copied() - } - - fn update_block_gas_limit(&self, block_gas_limit: Option) { - let mut gas_limit = self.block_gas_limit.lock(); - *gas_limit = block_gas_limit; - } - fn committed_block_id(&self) -> HashValue { self.block_tree.root_block().id } fn execute_block( &self, - block: (HashValue, Vec), + block: ExecutableBlock, parent_block_id: HashValue, + maybe_block_gas_limit: Option, ) -> Result { let _timer = APTOS_EXECUTOR_EXECUTE_BLOCK_SECONDS.start_timer(); - let (block_id, transactions) = block; + let ExecutableBlock { + block_id, + transactions, + } = block; let committed_block = self.block_tree.root_block(); let mut block_vec = self .block_tree @@ -261,8 +227,6 @@ where )? }; - let maybe_gas_limit = self.get_block_gas_limit(); - let chunk_output = { let _timer = APTOS_EXECUTOR_VM_EXECUTE_BLOCK_SECONDS.start_timer(); fail_point!("executor::vm_execute_block", |_| { @@ -270,15 +234,7 @@ where "Injected error in vm_execute_block" ))) }); - if maybe_gas_limit.is_some() { - V::execute_transaction_block_with_gas_limit( - transactions, - state_view, - maybe_gas_limit, - )? - } else { - V::execute_transaction_block(transactions, state_view)? - } + V::execute_transaction_block(transactions, state_view, maybe_block_gas_limit)? }; chunk_output.trace_log_transaction_status(); @@ -287,7 +243,7 @@ where .start_timer(); let (output, _, _) = chunk_output - .apply_to_ledger_for_block(parent_view, maybe_gas_limit.map(|_| block_id))?; + .apply_to_ledger_for_block(parent_view, maybe_block_gas_limit.map(|_| block_id))?; output }; diff --git a/execution/executor/src/chunk_executor.rs b/execution/executor/src/chunk_executor.rs index 3e3724a40b217..cfc62b7e12e74 100644 --- a/execution/executor/src/chunk_executor.rs +++ b/execution/executor/src/chunk_executor.rs @@ -203,7 +203,8 @@ impl ChunkExecutorInner { let state_view = self.state_view(&latest_view)?; let chunk_output = { let _timer = APTOS_EXECUTOR_VM_EXECUTE_CHUNK_SECONDS.start_timer(); - ChunkOutput::by_transaction_execution::(transactions, state_view)? + // State sync executor shouldn't have block gas limit. + ChunkOutput::by_transaction_execution::(transactions.into(), state_view, None)? }; let executed_chunk = Self::apply_chunk_output_for_state_sync( verified_target_li, @@ -527,9 +528,11 @@ impl ChunkExecutorInner { .iter() .take((end_version - begin_version) as usize) .cloned() - .collect(); + .collect::>(); - let chunk_output = ChunkOutput::by_transaction_execution::(txns, state_view)?; + // State sync executor shouldn't have block gas limit. + let chunk_output = + ChunkOutput::by_transaction_execution::(txns.into(), state_view, None)?; // not `zip_eq`, deliberately for (version, txn_out, txn_info, write_set, events) in multizip(( begin_version..end_version, diff --git a/execution/executor/src/components/block_tree/mod.rs b/execution/executor/src/components/block_tree/mod.rs index 8fb05fbca301f..2f1a14fc5b9f3 100644 --- a/execution/executor/src/components/block_tree/mod.rs +++ b/execution/executor/src/components/block_tree/mod.rs @@ -7,7 +7,10 @@ #[cfg(test)] mod test; -use crate::logging::{LogEntry, LogSchema}; +use crate::{ + logging::{LogEntry, LogSchema}, + metrics::APTOS_EXECUTOR_OTHER_TIMERS_SECONDS, +}; use anyhow::{anyhow, ensure, Result}; use aptos_consensus_types::block::Block as ConsensusBlock; use aptos_crypto::HashValue; @@ -18,7 +21,10 @@ use aptos_storage_interface::DbReader; use aptos_types::{ledger_info::LedgerInfo, proof::definition::LeafCount}; use std::{ collections::{hash_map::Entry, HashMap}, - sync::{Arc, Weak}, + sync::{ + mpsc::{channel, Receiver}, + Arc, Weak, + }, }; pub struct Block { @@ -226,7 +232,12 @@ impl BlockTree { block_lookup.fetch_or_add_block(id, ExecutedBlock::new_empty(ledger_view), None) } - pub fn prune(&self, ledger_info: &LedgerInfo) -> Result<()> { + // Set the root to be at `ledger_info`, drop blocks that are no longer descendants of the + // new root. + // + // Dropping happens asynchronously in another thread. A receiver is returned to the caller + // to wait for the dropping to fully complete (useful for tests). + pub fn prune(&self, ledger_info: &LedgerInfo) -> Result> { let committed_block_id = ledger_info.consensus_block_id(); let last_committed_block = self.get_block(committed_block_id)?; @@ -250,8 +261,26 @@ impl BlockTree { ); last_committed_block }; - *self.root.lock() = root; - Ok(()) + let old_root = { + let mut root_locked = self.root.lock(); + // send old root to async task to drop it + let old_root = root_locked.clone(); + *root_locked = root; + old_root + }; + // This should be the last reference to old root, spawning a drop to a different thread + // guarantees that the drop will not happen in the current thread + let (tx, rx) = channel::<()>(); + rayon::spawn(move || { + let _timeer = APTOS_EXECUTOR_OTHER_TIMERS_SECONDS + .with_label_values(&["drop_old_root"]) + .start_timer(); + drop(old_root); + // Error is ignored, since the caller might not care about dropping completion and + // has discarded the receiver already. + tx.send(()).ok(); + }); + Ok(rx) } pub fn add_block( diff --git a/execution/executor/src/components/block_tree/test.rs b/execution/executor/src/components/block_tree/test.rs index 0aba39ca8fea2..f1c3ac195afcc 100644 --- a/execution/executor/src/components/block_tree/test.rs +++ b/execution/executor/src/components/block_tree/test.rs @@ -28,6 +28,7 @@ impl BlockTree { } } + #[cfg(test)] pub fn size(&self) -> usize { self.block_lookup.inner.lock().0.len() } @@ -103,7 +104,11 @@ fn test_branch() { // if assertion fails. let num_blocks = block_tree.size(); assert_eq!(num_blocks, 12); - block_tree.prune(&gen_ledger_info(id(9), false)).unwrap(); + block_tree + .prune(&gen_ledger_info(id(9), false)) + .unwrap() + .recv() + .unwrap(); let num_blocks = block_tree.size(); assert_eq!(num_blocks, 3); assert_eq!(block_tree.root_block().id, id(9)); @@ -113,7 +118,7 @@ fn test_branch() { fn test_reconfig_id_update() { let block_tree = create_tree(); let ledger_info = gen_ledger_info(id(1), true); - block_tree.prune(&ledger_info).unwrap(); + block_tree.prune(&ledger_info).unwrap().recv().unwrap(); let num_blocks = block_tree.size(); // reconfig suffix blocks are ditched assert_eq!(num_blocks, 1); diff --git a/execution/executor/src/components/chunk_output.rs b/execution/executor/src/components/chunk_output.rs index b4fd421ec6262..f8dd15520f05b 100644 --- a/execution/executor/src/components/chunk_output.rs +++ b/execution/executor/src/components/chunk_output.rs @@ -16,9 +16,13 @@ use aptos_storage_interface::{ }; use aptos_types::{ account_config::CORE_CODE_ADDRESS, + block_executor::partitioner::{ExecutableTransactions, SubBlocksForShard}, transaction::{ExecutionStatus, Transaction, TransactionOutput, TransactionStatus}, }; -use aptos_vm::{sharded_block_executor::ShardedBlockExecutor, AptosVM, VMExecutor}; +use aptos_vm::{ + sharded_block_executor::{block_executor_client::LocalExecutorClient, ShardedBlockExecutor}, + AptosVM, VMExecutor, +}; use fail::fail_point; use move_core_types::vm_status::StatusCode; use once_cell::sync::Lazy; @@ -26,11 +30,9 @@ use std::{ops::Deref, sync::Arc, time::Duration}; pub static SHARDED_BLOCK_EXECUTOR: Lazy>>> = Lazy::new(|| { - Arc::new(Mutex::new(ShardedBlockExecutor::new( - AptosVM::get_num_shards(), - None, // Defaults to num_cpus / num_shards - None, - ))) + let executor_clients = + LocalExecutorClient::create_local_clients(AptosVM::get_num_shards(), None); + Arc::new(Mutex::new(ShardedBlockExecutor::new(executor_clients))) }); pub struct ChunkOutput { @@ -46,33 +48,33 @@ pub struct ChunkOutput { impl ChunkOutput { pub fn by_transaction_execution( - transactions: Vec, + transactions: ExecutableTransactions, state_view: CachedStateView, + maybe_block_gas_limit: Option, ) -> Result { - let transaction_outputs = Self::execute_block::(transactions.clone(), &state_view)?; - - // to print txn output for debugging, uncomment: - // println!("{:?}", transaction_outputs.iter().map(|t| t.status() ).collect::>()); - - update_counters_for_processed_chunk(&transactions, &transaction_outputs, "executed"); - - Ok(Self { - transactions, - transaction_outputs, - state_cache: state_view.into_state_cache(), - }) + match transactions { + ExecutableTransactions::Unsharded(txns) => { + Self::by_transaction_execution_unsharded::( + txns, + state_view, + maybe_block_gas_limit, + ) + }, + ExecutableTransactions::Sharded(_) => { + // TODO(skedia): Change this into sharded once we move partitioner out of the + // sharded block executor. + todo!("sharded execution integration is not yet done") + }, + } } - pub fn by_transaction_execution_with_gas_limit( + fn by_transaction_execution_unsharded( transactions: Vec, state_view: CachedStateView, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Result { - let transaction_outputs = Self::execute_block_with_gas_limit::( - transactions.clone(), - &state_view, - maybe_gas_limit, - )?; + let transaction_outputs = + Self::execute_block::(transactions.clone(), &state_view, maybe_block_gas_limit)?; // to print txn output for debugging, uncomment: // println!("{:?}", transaction_outputs.iter().map(|t| t.status() ).collect::>()); @@ -87,22 +89,26 @@ impl ChunkOutput { } pub fn by_transaction_execution_sharded( - transactions: Vec, + block: Vec>, state_view: CachedStateView, + maybe_block_gas_limit: Option, ) -> Result { let state_view_arc = Arc::new(state_view); - let transaction_outputs = - Self::execute_block_sharded::(transactions.clone(), state_view_arc.clone())?; + let transaction_outputs = Self::execute_block_sharded::( + block.clone(), + state_view_arc.clone(), + maybe_block_gas_limit, + )?; - update_counters_for_processed_chunk(&transactions, &transaction_outputs, "executed"); + // TODO(skedia) add logic to emit counters per shard instead of doing it globally. + // Unwrapping here is safe because the execution has finished and it is guaranteed that + // the state view is not used anymore. let state_view = Arc::try_unwrap(state_view_arc).unwrap(); Ok(Self { - transactions, + transactions: SubBlocksForShard::flatten(block), transaction_outputs, - // Unwrapping here is safe because the execution has finished and it is guaranteed that - // the state view is not used anymore. state_cache: state_view.into_state_cache(), }) } @@ -170,13 +176,15 @@ impl ChunkOutput { } fn execute_block_sharded( - transactions: Vec, + block: Vec>, state_view: Arc, + maybe_block_gas_limit: Option, ) -> Result> { Ok(V::execute_block_sharded( SHARDED_BLOCK_EXECUTOR.lock().deref(), - transactions, + block, state_view, + maybe_block_gas_limit, )?) } @@ -186,22 +194,12 @@ impl ChunkOutput { fn execute_block( transactions: Vec, state_view: &CachedStateView, + maybe_block_gas_limit: Option, ) -> Result> { - Ok(V::execute_block(transactions, &state_view)?) - } - - /// Executes the block of [Transaction]s using the [VMExecutor] and returns - /// a vector of [TransactionOutput]s. - #[cfg(not(feature = "consensus-only-perf-test"))] - fn execute_block_with_gas_limit( - transactions: Vec, - state_view: &CachedStateView, - maybe_gas_limit: Option, - ) -> Result> { - Ok(V::execute_block_with_gas_limit( + Ok(V::execute_block( transactions, &state_view, - maybe_gas_limit, + maybe_block_gas_limit, )?) } @@ -213,13 +211,16 @@ impl ChunkOutput { fn execute_block( transactions: Vec, state_view: &CachedStateView, + maybe_block_gas_limit: Option, ) -> Result> { use aptos_state_view::{StateViewId, TStateView}; use aptos_types::write_set::WriteSet; let transaction_outputs = match state_view.id() { // this state view ID implies a genesis block in non-test cases. - StateViewId::Miscellaneous => V::execute_block(transactions, &state_view)?, + StateViewId::Miscellaneous => { + V::execute_block(transactions, &state_view, maybe_block_gas_limit)? + }, _ => transactions .iter() .map(|_| { @@ -234,16 +235,6 @@ impl ChunkOutput { }; Ok(transaction_outputs) } - - /// In consensus-only mode, we do not care about gas limits. - #[cfg(feature = "consensus-only-perf-test")] - fn execute_block_with_gas_limit( - transactions: Vec, - state_view: &CachedStateView, - _maybe_gas_limit: Option, - ) -> Result> { - Self::execute_block::(transactions, state_view) - } } pub fn update_counters_for_processed_chunk( diff --git a/execution/executor/src/db_bootstrapper.rs b/execution/executor/src/db_bootstrapper.rs index 2f8b7ef9123ec..d263d8f264e70 100644 --- a/execution/executor/src/db_bootstrapper.rs +++ b/execution/executor/src/db_bootstrapper.rs @@ -136,9 +136,12 @@ pub fn calculate_genesis( get_state_epoch(&base_state_view)? }; - let (mut output, _, _) = - ChunkOutput::by_transaction_execution::(vec![genesis_txn.clone()], base_state_view)? - .apply_to_ledger(&executed_trees, None)?; + let (mut output, _, _) = ChunkOutput::by_transaction_execution::( + vec![genesis_txn.clone()].into(), + base_state_view, + None, + )? + .apply_to_ledger(&executed_trees, None)?; ensure!( !output.to_commit.is_empty(), "Genesis txn execution failed." diff --git a/execution/executor/src/fuzzing.rs b/execution/executor/src/fuzzing.rs index c24db7ec31894..9b7969fe278cf 100644 --- a/execution/executor/src/fuzzing.rs +++ b/execution/executor/src/fuzzing.rs @@ -14,7 +14,9 @@ use aptos_storage_interface::{ cached_state_view::CachedStateView, state_delta::StateDelta, DbReader, DbReaderWriter, DbWriter, }; use aptos_types::{ + block_executor::partitioner::{ExecutableTransactions, SubBlocksForShard}, ledger_info::LedgerInfoWithSignatures, + test_helpers::transaction_test_helpers::BLOCK_GAS_LIMIT, transaction::{Transaction, TransactionOutput, TransactionToCommit, Version}, vm_status::VMStatus, }; @@ -38,7 +40,8 @@ pub fn fuzz_execute_and_commit_blocks( let mut block_ids = vec![]; for block in blocks { let block_id = block.0; - let _execution_results = executor.execute_block(block, parent_block_id); + let _execution_results = + executor.execute_block(block.into(), parent_block_id, BLOCK_GAS_LIMIT); parent_block_id = block_id; block_ids.push(block_id); } @@ -50,21 +53,14 @@ pub struct FakeVM; impl TransactionBlockExecutor for FakeVM { fn execute_transaction_block( - transactions: Vec, + transactions: ExecutableTransactions, state_view: CachedStateView, + maybe_block_gas_limit: Option, ) -> Result { - ChunkOutput::by_transaction_execution::(transactions, state_view) - } - - fn execute_transaction_block_with_gas_limit( - transactions: Vec, - state_view: CachedStateView, - maybe_gas_limit: Option, - ) -> Result { - ChunkOutput::by_transaction_execution_with_gas_limit::( + ChunkOutput::by_transaction_execution::( transactions, state_view, - maybe_gas_limit, + maybe_block_gas_limit, ) } } @@ -72,8 +68,9 @@ impl TransactionBlockExecutor for FakeVM { impl VMExecutor for FakeVM { fn execute_block_sharded( _sharded_block_executor: &ShardedBlockExecutor, - _transactions: Vec, + _block: Vec>, _state_view: Arc, + _maybe_block_gas_limit: Option, ) -> Result, VMStatus> { Ok(Vec::new()) } @@ -81,14 +78,7 @@ impl VMExecutor for FakeVM { fn execute_block( _transactions: Vec, _state_view: &impl StateView, - ) -> Result, VMStatus> { - Ok(Vec::new()) - } - - fn execute_block_with_gas_limit( - _transactions: Vec, - _state_view: &impl StateView, - _maybe_gas_limit: Option, + _maybe_block_gas_limit: Option, ) -> Result, VMStatus> { Ok(Vec::new()) } diff --git a/execution/executor/src/mock_vm/mock_vm_test.rs b/execution/executor/src/mock_vm/mock_vm_test.rs index 73c24af2e0164..05cbd65739387 100644 --- a/execution/executor/src/mock_vm/mock_vm_test.rs +++ b/execution/executor/src/mock_vm/mock_vm_test.rs @@ -45,7 +45,7 @@ fn test_mock_vm_different_senders() { txns.push(encode_mint_transaction(gen_address(i), amount)); } - let outputs = MockVM::execute_block(txns.clone(), &MockStateView) + let outputs = MockVM::execute_block(txns.clone(), &MockStateView, None) .expect("MockVM should not fail to start"); for (output, txn) in itertools::zip_eq(outputs.iter(), txns.iter()) { @@ -82,7 +82,7 @@ fn test_mock_vm_same_sender() { } let outputs = - MockVM::execute_block(txns, &MockStateView).expect("MockVM should not fail to start"); + MockVM::execute_block(txns, &MockStateView, None).expect("MockVM should not fail to start"); for (i, output) in outputs.iter().enumerate() { assert_eq!( @@ -116,7 +116,7 @@ fn test_mock_vm_payment() { ]; let output = - MockVM::execute_block(txns, &MockStateView).expect("MockVM should not fail to start"); + MockVM::execute_block(txns, &MockStateView, None).expect("MockVM should not fail to start"); let mut output_iter = output.iter(); output_iter.next(); diff --git a/execution/executor/src/mock_vm/mod.rs b/execution/executor/src/mock_vm/mod.rs index f69543306d32f..98f5c668649bb 100644 --- a/execution/executor/src/mock_vm/mod.rs +++ b/execution/executor/src/mock_vm/mod.rs @@ -14,6 +14,7 @@ use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, account_config::CORE_CODE_ADDRESS, + block_executor::partitioner::{ExecutableTransactions, SubBlocksForShard}, chain_id::ChainId, contract_event::ContractEvent, event::EventKey, @@ -59,21 +60,14 @@ pub struct MockVM; impl TransactionBlockExecutor for MockVM { fn execute_transaction_block( - transactions: Vec, - state_view: CachedStateView, - ) -> Result { - ChunkOutput::by_transaction_execution::(transactions, state_view) - } - - fn execute_transaction_block_with_gas_limit( - transactions: Vec, + transactions: ExecutableTransactions, state_view: CachedStateView, - maybe_gas_limit: Option, + maybe_block_gas_limit: Option, ) -> Result { - ChunkOutput::by_transaction_execution_with_gas_limit::( + ChunkOutput::by_transaction_execution::( transactions, state_view, - maybe_gas_limit, + maybe_block_gas_limit, ) } } @@ -82,6 +76,7 @@ impl VMExecutor for MockVM { fn execute_block( transactions: Vec, state_view: &impl StateView, + _maybe_block_gas_limit: Option, ) -> Result, VMStatus> { if state_view.is_genesis() { assert_eq!( @@ -212,18 +207,11 @@ impl VMExecutor for MockVM { Ok(outputs) } - fn execute_block_with_gas_limit( - transactions: Vec, - state_view: &(impl StateView + Sync), - _maybe_gas_limit: Option, - ) -> Result, VMStatus> { - MockVM::execute_block(transactions, state_view) - } - fn execute_block_sharded( _sharded_block_executor: &ShardedBlockExecutor, - _transactions: Vec, + _block: Vec>, _state_view: Arc, + _maybe_block_gas_limit: Option, ) -> std::result::Result, VMStatus> { todo!() } diff --git a/execution/executor/src/tests/chunk_executor_tests.rs b/execution/executor/src/tests/chunk_executor_tests.rs index be6a4b19dd14c..058b7d80e06a7 100644 --- a/execution/executor/src/tests/chunk_executor_tests.rs +++ b/execution/executor/src/tests/chunk_executor_tests.rs @@ -17,7 +17,7 @@ use aptos_executor_types::{BlockExecutorTrait, ChunkExecutorTrait}; use aptos_storage_interface::DbReaderWriter; use aptos_types::{ ledger_info::LedgerInfoWithSignatures, - test_helpers::transaction_test_helpers::block, + test_helpers::transaction_test_helpers::{block, BLOCK_GAS_LIMIT}, transaction::{TransactionListWithProof, TransactionOutputListWithProof}, }; use rand::Rng; @@ -274,15 +274,12 @@ fn test_executor_execute_and_commit_chunk_local_result_mismatch() { .collect::>(); let output = executor .execute_block( - (block_id, block(txns, executor.get_block_gas_limit())), + (block_id, block(txns, BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); - // With no block gas limit, StateCheckpoint txn is inserted to block before execution. - // So the ledger_info version needs to + 1 with no block gas limit. - let maybe_gas_limit = executor.get_block_gas_limit(); - let diff = maybe_gas_limit.map(|_| 0).unwrap_or(1); - let ledger_info = tests::gen_ledger_info(5 + diff, output.root_hash(), block_id, 1); + let ledger_info = tests::gen_ledger_info(5 + 1, output.root_hash(), block_id, 1); executor.commit_blocks(vec![block_id], ledger_info).unwrap(); } @@ -327,7 +324,7 @@ fn test_executor_execute_and_commit_chunk_without_verify() { .map(|_| encode_mint_transaction(tests::gen_address(rng.gen::()), 100)) .collect::>(); let output = executor - .execute_block((block_id, block(txns)), parent_block_id) + .execute_block((block_id, block(txns)).into(), parent_block_id) .unwrap(); let ledger_info = tests::gen_ledger_info(6, output.root_hash(), block_id, 1); executor.commit_blocks(vec![block_id], ledger_info).unwrap(); diff --git a/execution/executor/src/tests/mod.rs b/execution/executor/src/tests/mod.rs index 22ea1df12519e..cdcdac761e9bc 100644 --- a/execution/executor/src/tests/mod.rs +++ b/execution/executor/src/tests/mod.rs @@ -30,7 +30,7 @@ use aptos_types::{ ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, proof::definition::LeafCount, state_store::{state_key::StateKey, state_value::StateValue}, - test_helpers::transaction_test_helpers::block, + test_helpers::transaction_test_helpers::{block, BLOCK_GAS_LIMIT}, transaction::{ ExecutionStatus, RawTransaction, Script, SignedTransaction, Transaction, TransactionListWithProof, TransactionOutput, TransactionPayload, TransactionStatus, @@ -53,8 +53,9 @@ fn execute_and_commit_block( let output = executor .execute_block( - (id, block(vec![txn], executor.get_block_gas_limit())), + (id, block(vec![txn], BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); let version = 2 * (txn_index + 1); @@ -80,7 +81,6 @@ impl TestExecutor { let waypoint = generate_waypoint::(&db, &genesis).unwrap(); maybe_bootstrap::(&db, &genesis, waypoint).unwrap(); let executor = BlockExecutor::new(db.clone()); - executor.update_block_gas_limit(Some(1000)); // Can comment out this line to test without gas limit TestExecutor { _path: path, @@ -152,11 +152,9 @@ fn test_executor_status() { let output = executor .execute_block( - ( - block_id, - block(vec![txn0, txn1, txn2], executor.get_block_gas_limit()), - ), + (block_id, block(vec![txn0, txn1, txn2], BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); @@ -184,10 +182,7 @@ fn test_executor_status_consensus_only() { let output = executor .execute_block( - ( - block_id, - block(vec![txn0, txn1, txn2], executor.get_block_gas_limit()), - ), + (block_id, block(vec![txn0, txn1, txn2], BLOCK_GAS_LIMIT)).into(), parent_block_id, ) .unwrap(); @@ -216,8 +211,9 @@ fn test_executor_one_block() { .collect::>(); let output = executor .execute_block( - (block_id, block(txns, executor.get_block_gas_limit())), + (block_id, block(txns, BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); let version = num_user_txns + 1; @@ -261,20 +257,16 @@ fn test_executor_two_blocks_with_failed_txns() { .collect::>(); let _output1 = executor .execute_block( - ( - block1_id, - block(block1_txns, executor.get_block_gas_limit()), - ), + (block1_id, block(block1_txns, BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); let output2 = executor .execute_block( - ( - block2_id, - block(block2_txns, executor.get_block_gas_limit()), - ), + (block2_id, block(block2_txns, BLOCK_GAS_LIMIT)).into(), block1_id, + BLOCK_GAS_LIMIT, ) .unwrap(); @@ -294,11 +286,9 @@ fn test_executor_commit_twice() { let block1_id = gen_block_id(1); let output1 = executor .execute_block( - ( - block1_id, - block(block1_txns, executor.get_block_gas_limit()), - ), + (block1_id, block(block1_txns, BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); let ledger_info = gen_ledger_info(6, output1.root_hash(), block1_id, 1); @@ -326,11 +316,9 @@ fn test_executor_execute_same_block_multiple_times() { for _i in 0..100 { let output = executor .execute_block( - ( - block_id, - block(txns.clone(), executor.get_block_gas_limit()), - ), + (block_id, block(txns.clone(), BLOCK_GAS_LIMIT)).into(), parent_block_id, + BLOCK_GAS_LIMIT, ) .unwrap(); responses.push(output); @@ -339,10 +327,10 @@ fn test_executor_execute_same_block_multiple_times() { assert_eq!(responses.len(), 1); } -fn ledger_version_from_block_size(block_size: usize, maybe_gas_limit: Option) -> usize { +fn ledger_version_from_block_size(block_size: usize, maybe_block_gas_limit: Option) -> usize { // With block gas limit, StateCheckpoint txn is inserted to block after execution. // So the ledger_info version needs to block_size + 1 with block gas limit. - block_size + maybe_gas_limit.map(|_| 1).unwrap_or(0) + block_size + maybe_block_gas_limit.map(|_| 1).unwrap_or(0) } /// Generates a list of `TransactionListWithProof`s according to the given ranges. @@ -368,17 +356,20 @@ fn create_transaction_chunks( let txn = encode_mint_transaction(gen_address(i), 100); txns.push(txn); } - if executor.get_block_gas_limit().is_none() { + if BLOCK_GAS_LIMIT.is_none() { txns.push(Transaction::StateCheckpoint(HashValue::random())); } let id = gen_block_id(1); let output = executor - .execute_block((id, txns.clone()), executor.committed_block_id()) + .execute_block( + (id, txns.clone()).into(), + executor.committed_block_id(), + BLOCK_GAS_LIMIT, + ) .unwrap(); - let ledger_version = - ledger_version_from_block_size(txns.len(), executor.get_block_gas_limit()) as u64; + let ledger_version = ledger_version_from_block_size(txns.len(), BLOCK_GAS_LIMIT) as u64; let ledger_info = gen_ledger_info(ledger_version, output.root_hash(), id, 1); executor .commit_blocks(vec![id], ledger_info.clone()) @@ -411,12 +402,20 @@ fn test_noop_block_after_reconfiguration() { let first_txn = encode_reconfiguration_transaction(); let first_block_id = gen_block_id(1); let output1 = executor - .execute_block((first_block_id, vec![first_txn]), parent_block_id) + .execute_block( + (first_block_id, vec![first_txn]).into(), + parent_block_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); parent_block_id = first_block_id; - let second_block = TestBlock::new(10, 10, gen_block_id(2), executor.get_block_gas_limit()); + let second_block = TestBlock::new(10, 10, gen_block_id(2), BLOCK_GAS_LIMIT); let output2 = executor - .execute_block((second_block.id, second_block.txns), parent_block_id) + .execute_block( + (second_block.id, second_block.txns).into(), + parent_block_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); assert_eq!(output1.root_hash(), output2.root_hash()); } @@ -589,24 +588,40 @@ fn test_reconfig_suffix_empty_blocks() { db: _, executor, } = TestExecutor::new(); - // add gas limit to be consistent with block executor that will add state checkpoint txn - let block_a = TestBlock::new(10000, 1, gen_block_id(1), Some(0)); + let block_a = TestBlock::new(10000, 1, gen_block_id(1), BLOCK_GAS_LIMIT); + // add block gas limit to be consistent with block executor that will add state checkpoint txn let mut block_b = TestBlock::new(10000, 1, gen_block_id(2), Some(0)); - let block_c = TestBlock::new(1, 1, gen_block_id(3), Some(0)); - let block_d = TestBlock::new(1, 1, gen_block_id(4), Some(0)); + let block_c = TestBlock::new(1, 1, gen_block_id(3), BLOCK_GAS_LIMIT); + let block_d = TestBlock::new(1, 1, gen_block_id(4), BLOCK_GAS_LIMIT); block_b.txns.push(encode_reconfiguration_transaction()); let parent_block_id = executor.committed_block_id(); executor - .execute_block((block_a.id, block_a.txns), parent_block_id) + .execute_block( + (block_a.id, block_a.txns).into(), + parent_block_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); let output = executor - .execute_block((block_b.id, block_b.txns), block_a.id) + .execute_block( + (block_b.id, block_b.txns).into(), + block_a.id, + BLOCK_GAS_LIMIT, + ) .unwrap(); executor - .execute_block((block_c.id, block_c.txns), block_b.id) + .execute_block( + (block_c.id, block_c.txns).into(), + block_b.id, + BLOCK_GAS_LIMIT, + ) .unwrap(); executor - .execute_block((block_d.id, block_d.txns), block_c.id) + .execute_block( + (block_d.id, block_d.txns).into(), + block_c.id, + BLOCK_GAS_LIMIT, + ) .unwrap(); let ledger_info = gen_ledger_info(20002, output.root_hash(), block_d.id, 1); @@ -624,7 +639,12 @@ struct TestBlock { } impl TestBlock { - fn new(num_user_txns: u64, amount: u32, id: HashValue, maybe_gas_limit: Option) -> Self { + fn new( + num_user_txns: u64, + amount: u32, + id: HashValue, + maybe_block_gas_limit: Option, + ) -> Self { let txns = if num_user_txns == 0 { Vec::new() } else { @@ -632,7 +652,7 @@ impl TestBlock { (0..num_user_txns) .map(|index| encode_mint_transaction(gen_address(index), u64::from(amount))) .collect(), - maybe_gas_limit, + maybe_block_gas_limit, ) }; TestBlock { txns, id } @@ -641,14 +661,17 @@ impl TestBlock { // Executes a list of transactions by executing and immediately committing one at a time. Returns // the root hash after all transactions are committed. -fn run_transactions_naive(transactions: Vec) -> HashValue { +fn run_transactions_naive( + transactions: Vec, + maybe_block_gas_limit: Option, +) -> HashValue { let executor = TestExecutor::new(); let db = &executor.db; let mut ledger_view: ExecutedTrees = db.reader.get_latest_executed_trees().unwrap(); for txn in transactions { let out = ChunkOutput::by_transaction_execution::( - vec![txn], + vec![txn].into(), ledger_view .verified_state_view( StateViewId::Miscellaneous, @@ -656,6 +679,7 @@ fn run_transactions_naive(transactions: Vec) -> HashValue { Arc::new(AsyncProofFetcher::new(db.reader.clone())), ) .unwrap(), + maybe_block_gas_limit, ) .unwrap(); let (executed, _, _) = out.apply_to_ledger(&ledger_view, None).unwrap(); @@ -689,13 +713,13 @@ proptest! { let executor = TestExecutor::new(); let block_id = gen_block_id(1); - let mut block = TestBlock::new(num_user_txns, 10, block_id, executor.get_block_gas_limit()); + let mut block = TestBlock::new(num_user_txns, 10, block_id, BLOCK_GAS_LIMIT); let num_txns = block.txns.len() as LeafCount; block.txns[reconfig_txn_index as usize] = encode_reconfiguration_transaction(); let parent_block_id = executor.committed_block_id(); let output = executor.execute_block( - (block_id, block.txns.clone()), parent_block_id + (block_id, block.txns.clone()).into(), parent_block_id, BLOCK_GAS_LIMIT ).unwrap(); // assert: txns after the reconfiguration are with status "Retry" @@ -714,11 +738,11 @@ proptest! { // retry txns after reconfiguration let retry_block_id = gen_block_id(2); let retry_output = executor.execute_block( - (retry_block_id, block.txns.iter().skip(reconfig_txn_index as usize + 1).cloned().collect()), parent_block_id + (retry_block_id, block.txns.iter().skip(reconfig_txn_index as usize + 1).cloned().collect()).into(), parent_block_id, BLOCK_GAS_LIMIT ).unwrap(); prop_assert!(retry_output.compute_status().iter().all(|s| matches!(*s, TransactionStatus::Keep(_)))); - let ledger_version = ledger_version_from_block_size(num_txns as usize, executor.get_block_gas_limit()) as u64; + let ledger_version = ledger_version_from_block_size(num_txns as usize, BLOCK_GAS_LIMIT) as u64; // commit let ledger_info = gen_ledger_info(ledger_version, retry_output.root_hash(), retry_block_id, 12345 /* timestamp */); @@ -750,22 +774,20 @@ proptest! { fn test_executor_restart(a_size in 1..30u64, b_size in 1..30u64, amount in any::()) { let TestExecutor { _path, db, executor } = TestExecutor::new(); - let block_a = TestBlock::new(a_size, amount, gen_block_id(1), executor.get_block_gas_limit()); - let block_b = TestBlock::new(b_size, amount, gen_block_id(2), executor.get_block_gas_limit()); + let block_a = TestBlock::new(a_size, amount, gen_block_id(1), BLOCK_GAS_LIMIT); + let block_b = TestBlock::new(b_size, amount, gen_block_id(2), BLOCK_GAS_LIMIT); let mut parent_block_id; let mut root_hash; - let maybe_gas_limit = executor.get_block_gas_limit(); - // First execute and commit one block, then destroy executor. { parent_block_id = executor.committed_block_id(); let output_a = executor.execute_block( - (block_a.id, block_a.txns.clone()), parent_block_id + (block_a.id, block_a.txns.clone()).into(), parent_block_id, BLOCK_GAS_LIMIT ).unwrap(); root_hash = output_a.root_hash(); - let ledger_info = gen_ledger_info(ledger_version_from_block_size(block_a.txns.len(), maybe_gas_limit) as u64, root_hash, block_a.id, 1); + let ledger_info = gen_ledger_info(ledger_version_from_block_size(block_a.txns.len(), BLOCK_GAS_LIMIT) as u64, root_hash, block_a.id, 1); executor.commit_blocks(vec![block_a.id], ledger_info).unwrap(); parent_block_id = block_a.id; } @@ -773,11 +795,10 @@ proptest! { // Now we construct a new executor and run one more block. { let executor = BlockExecutor::::new(db); - executor.update_block_gas_limit(maybe_gas_limit); - let output_b = executor.execute_block((block_b.id, block_b.txns.clone()), parent_block_id).unwrap(); + let output_b = executor.execute_block((block_b.id, block_b.txns.clone()).into(), parent_block_id, BLOCK_GAS_LIMIT).unwrap(); root_hash = output_b.root_hash(); let ledger_info = gen_ledger_info( - (ledger_version_from_block_size(block_a.txns.len(), maybe_gas_limit) + ledger_version_from_block_size(block_b.txns.len(), maybe_gas_limit)) as u64, + (ledger_version_from_block_size(block_a.txns.len(), BLOCK_GAS_LIMIT) + ledger_version_from_block_size(block_b.txns.len(), BLOCK_GAS_LIMIT)) as u64, root_hash, block_b.id, 2, @@ -788,15 +809,15 @@ proptest! { let expected_root_hash = run_transactions_naive({ let mut txns = vec![]; txns.extend(block_a.txns.iter().cloned()); - if executor.get_block_gas_limit().is_some() { + if BLOCK_GAS_LIMIT.is_some() { txns.push(Transaction::StateCheckpoint(block_a.id)); } txns.extend(block_b.txns.iter().cloned()); - if executor.get_block_gas_limit().is_some() { + if BLOCK_GAS_LIMIT.is_some() { txns.push(Transaction::StateCheckpoint(block_b.id)); } txns - }); + }, BLOCK_GAS_LIMIT); prop_assert_eq!(root_hash, expected_root_hash); } @@ -831,14 +852,14 @@ proptest! { let parent_block_id = executor.committed_block_id(); let first_block_id = gen_block_id(1); let _output1 = executor.execute_block( - (first_block_id, first_block_txns), - parent_block_id + (first_block_id, first_block_txns).into(), + parent_block_id, BLOCK_GAS_LIMIT ).unwrap(); let second_block_id = gen_block_id(2); let output2 = executor.execute_block( - (second_block_id, block(second_block_txns, executor.get_block_gas_limit())), - first_block_id, + (second_block_id, block(second_block_txns, BLOCK_GAS_LIMIT)).into(), + first_block_id, BLOCK_GAS_LIMIT ).unwrap(); let version = chunk_size + overlap_size + num_new_txns + 1; diff --git a/execution/executor/tests/db_bootstrapper_test.rs b/execution/executor/tests/db_bootstrapper_test.rs index 0ccbf3e8a237e..74651fd30b862 100644 --- a/execution/executor/tests/db_bootstrapper_test.rs +++ b/execution/executor/tests/db_bootstrapper_test.rs @@ -30,7 +30,7 @@ use aptos_types::{ event::EventHandle, on_chain_config::{access_path_for_config, ConfigurationResource, OnChainConfig, ValidatorSet}, state_store::state_key::StateKey, - test_helpers::transaction_test_helpers::block, + test_helpers::transaction_test_helpers::{block, BLOCK_GAS_LIMIT}, transaction::{authenticator::AuthenticationKey, ChangeSet, Transaction, WriteSetPayload}, trusted_state::TrustedState, validator_signer::ValidatorSigner, @@ -87,8 +87,9 @@ fn execute_and_commit(txns: Vec, db: &DbReaderWriter, signer: &Vali let executor = BlockExecutor::::new(db.clone()); let output = executor .execute_block( - (block_id, block(txns, executor.get_block_gas_limit())), + (block_id, block(txns, BLOCK_GAS_LIMIT)).into(), executor.committed_block_id(), + BLOCK_GAS_LIMIT, ) .unwrap(); assert_eq!(output.num_leaves(), target_version + 1); diff --git a/execution/executor/tests/storage_integration_test.rs b/execution/executor/tests/storage_integration_test.rs index e986325192e8b..bf358bb94d4d7 100644 --- a/execution/executor/tests/storage_integration_test.rs +++ b/execution/executor/tests/storage_integration_test.rs @@ -19,6 +19,7 @@ use aptos_types::{ account_view::AccountView, block_metadata::BlockMetadata, state_store::state_key::StateKey, + test_helpers::transaction_test_helpers::BLOCK_GAS_LIMIT, transaction::{Transaction, WriteSetPayload}, trusted_state::TrustedState, validator_signer::ValidatorSigner, @@ -139,7 +140,11 @@ fn test_reconfiguration() { let txn_block = vec![txn1, txn2, txn3]; let block_id = gen_block_id(1); let vm_output = executor - .execute_block((block_id, txn_block.clone()), parent_block_id) + .execute_block( + (block_id, txn_block.clone()).into(), + parent_block_id, + BLOCK_GAS_LIMIT, + ) .unwrap(); // Make sure the execution result sees the reconfiguration diff --git a/mempool/src/core_mempool/mempool.rs b/mempool/src/core_mempool/mempool.rs index 278dcc67bb304..d9230ad738839 100644 --- a/mempool/src/core_mempool/mempool.rs +++ b/mempool/src/core_mempool/mempool.rs @@ -7,7 +7,7 @@ use crate::{ core_mempool::{ index::TxnPointer, - transaction::{MempoolTransaction, TimelineState}, + transaction::{InsertionInfo, MempoolTransaction, TimelineState}, transaction_store::TransactionStore, }, counters, @@ -111,19 +111,27 @@ impl Mempool { .reject_transaction(sender, sequence_number, hash); } + pub(crate) fn log_txn_commit_latency( + insertion_info: InsertionInfo, + bucket: &str, + stage: &'static str, + ) { + if let Ok(time_delta) = SystemTime::now().duration_since(insertion_info.insertion_time) { + counters::core_mempool_txn_commit_latency( + stage, + insertion_info.submitted_by_label(), + bucket, + time_delta, + ); + } + } + fn log_latency(&self, account: AccountAddress, sequence_number: u64, stage: &'static str) { - if let Some((&insertion_time, is_end_to_end, bucket)) = self + if let Some((&insertion_info, bucket)) = self .transactions - .get_insertion_time_and_bucket(&account, sequence_number) + .get_insertion_info_and_bucket(&account, sequence_number) { - if let Ok(time_delta) = SystemTime::now().duration_since(insertion_time) { - let scope = if is_end_to_end { - counters::E2E_LABEL - } else { - counters::LOCAL_LABEL - }; - counters::core_mempool_txn_commit_latency(stage, scope, bucket, time_delta); - } + Self::log_txn_commit_latency(insertion_info, bucket, stage); } } @@ -139,6 +147,7 @@ impl Mempool { ranking_score: u64, db_sequence_number: u64, timeline_state: TimelineState, + client_submitted: bool, ) -> MempoolStatus { trace!( LogSchema::new(LogEntry::AddTxn) @@ -166,6 +175,7 @@ impl Mempool { timeline_state, db_sequence_number, now, + client_submitted, ); let status = self.transactions.insert(txn_info); diff --git a/mempool/src/core_mempool/mod.rs b/mempool/src/core_mempool/mod.rs index 0c56692cf6402..434d991d03398 100644 --- a/mempool/src/core_mempool/mod.rs +++ b/mempool/src/core_mempool/mod.rs @@ -10,6 +10,6 @@ mod transaction_store; pub use self::{ index::TxnPointer, mempool::Mempool as CoreMempool, - transaction::{MempoolTransaction, TimelineState}, + transaction::{MempoolTransaction, SubmittedBy, TimelineState}, transaction_store::TXN_INDEX_ESTIMATED_BYTES, }; diff --git a/mempool/src/core_mempool/transaction.rs b/mempool/src/core_mempool/transaction.rs index da222baf9a3cf..d6850f5c7b129 100644 --- a/mempool/src/core_mempool/transaction.rs +++ b/mempool/src/core_mempool/transaction.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::core_mempool::TXN_INDEX_ESTIMATED_BYTES; +use crate::{core_mempool::TXN_INDEX_ESTIMATED_BYTES, counters}; use aptos_crypto::HashValue; use aptos_types::{account_address::AccountAddress, transaction::SignedTransaction}; use serde::{Deserialize, Serialize}; @@ -22,7 +22,7 @@ pub struct MempoolTransaction { pub ranking_score: u64, pub timeline_state: TimelineState, pub sequence_info: SequenceInfo, - pub insertion_time: SystemTime, + pub insertion_info: InsertionInfo, pub was_parked: bool, } @@ -34,6 +34,7 @@ impl MempoolTransaction { timeline_state: TimelineState, seqno: u64, insertion_time: SystemTime, + client_submitted: bool, ) -> Self { Self { sequence_info: SequenceInfo { @@ -44,7 +45,7 @@ impl MempoolTransaction { expiration_time, ranking_score, timeline_state, - insertion_time, + insertion_info: InsertionInfo::new(insertion_time, client_submitted, timeline_state), was_parked: false, } } @@ -84,6 +85,62 @@ pub struct SequenceInfo { pub account_sequence_number: u64, } +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub enum SubmittedBy { + /// The transaction was received from a client REST API submission, rather than a mempool + /// broadcast. This can be used as the time a transaction first entered the network, + /// to measure end-to-end latency within the entire network. However, if a transaction is + /// submitted to multiple nodes (by the client) then the end-to-end latency measured will not + /// be accurate (the measured value will be lower than the correct value). + Client, + /// The transaction was received from a downstream peer, i.e., not a client or a peer validator. + /// At a validator, a transaction from downstream can be used as the time a transaction first + /// entered the validator network, to measure end-to-end latency within the validator network. + /// However, if a transaction enters via multiple validators (due to duplication outside of the + /// validator network) then the validator end-to-end latency measured will not be accurate + /// (the measured value will be lower than the correct value). + Downstream, + /// The transaction was received at a validator from another validator, rather than from the + /// downstream VFN. This transaction should not be used to measure end-to-end latency within the + /// validator network (see Downstream). + /// Note, with Quorum Store enabled, no transactions will be classified as PeerValidator. + PeerValidator, +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub struct InsertionInfo { + pub insertion_time: SystemTime, + pub submitted_by: SubmittedBy, +} + +impl InsertionInfo { + pub fn new( + insertion_time: SystemTime, + client_submitted: bool, + timeline_state: TimelineState, + ) -> Self { + let submitted_by = if client_submitted { + SubmittedBy::Client + } else if timeline_state == TimelineState::NonQualified { + SubmittedBy::PeerValidator + } else { + SubmittedBy::Downstream + }; + Self { + insertion_time, + submitted_by, + } + } + + pub fn submitted_by_label(&self) -> &'static str { + match self.submitted_by { + SubmittedBy::Client => counters::SUBMITTED_BY_CLIENT_LABEL, + SubmittedBy::Downstream => counters::SUBMITTED_BY_DOWNSTREAM_LABEL, + SubmittedBy::PeerValidator => counters::SUBMITTED_BY_PEER_VALIDATOR_LABEL, + } + } +} + #[cfg(test)] mod test { use crate::core_mempool::{MempoolTransaction, TimelineState}; @@ -113,6 +170,7 @@ mod test { TimelineState::NotReady, 0, SystemTime::now(), + false, ) } diff --git a/mempool/src/core_mempool/transaction_store.rs b/mempool/src/core_mempool/transaction_store.rs index 3fb620d718eea..3e5990920c44e 100644 --- a/mempool/src/core_mempool/transaction_store.rs +++ b/mempool/src/core_mempool/transaction_store.rs @@ -8,14 +8,12 @@ use crate::{ AccountTransactions, MultiBucketTimelineIndex, ParkingLotIndex, PriorityIndex, PriorityQueueIter, TTLIndex, }, - transaction::{MempoolTransaction, TimelineState}, + mempool::Mempool, + transaction::{InsertionInfo, MempoolTransaction, TimelineState}, TxnPointer, }, counters, - counters::{ - BROADCAST_BATCHED_LABEL, BROADCAST_READY_LABEL, CONSENSUS_READY_LABEL, E2E_LABEL, - LOCAL_LABEL, - }, + counters::{BROADCAST_BATCHED_LABEL, BROADCAST_READY_LABEL, CONSENSUS_READY_LABEL}, logging::{LogEntry, LogEvent, LogSchema, TxnsLog}, shared_mempool::types::MultiBucketTimelineIndexIds, }; @@ -158,18 +156,13 @@ impl TransactionStore { } } - /// Return (SystemTime, is the timestamp for end-to-end) - pub(crate) fn get_insertion_time_and_bucket( + pub(crate) fn get_insertion_info_and_bucket( &self, address: &AccountAddress, sequence_number: u64, - ) -> Option<(&SystemTime, bool, &str)> { + ) -> Option<(&InsertionInfo, &str)> { if let Some(txn) = self.get_mempool_txn(address, sequence_number) { - return Some(( - &txn.insertion_time, - txn.timeline_state != TimelineState::NonQualified, - self.get_bucket(txn.ranking_score), - )); + return Some((&txn.insertion_info, self.get_bucket(txn.ranking_score))); } None } @@ -382,35 +375,41 @@ impl TransactionStore { fn log_ready_transaction( ranking_score: u64, bucket: &str, - time_delta: Duration, + insertion_info: InsertionInfo, broadcast_ready: bool, ) { + if let Ok(time_delta) = SystemTime::now().duration_since(insertion_info.insertion_time) { + let submitted_by = insertion_info.submitted_by_label(); + if broadcast_ready { + counters::core_mempool_txn_commit_latency( + CONSENSUS_READY_LABEL, + submitted_by, + bucket, + time_delta, + ); + counters::core_mempool_txn_commit_latency( + BROADCAST_READY_LABEL, + submitted_by, + bucket, + time_delta, + ); + } else { + counters::core_mempool_txn_commit_latency( + CONSENSUS_READY_LABEL, + submitted_by, + bucket, + time_delta, + ); + } + } + if broadcast_ready { - counters::core_mempool_txn_commit_latency( - CONSENSUS_READY_LABEL, - E2E_LABEL, - bucket, - time_delta, - ); - counters::core_mempool_txn_commit_latency( - BROADCAST_READY_LABEL, - E2E_LABEL, - bucket, - time_delta, - ); counters::core_mempool_txn_ranking_score( BROADCAST_READY_LABEL, BROADCAST_READY_LABEL, bucket, ranking_score, ); - } else { - counters::core_mempool_txn_commit_latency( - CONSENSUS_READY_LABEL, - LOCAL_LABEL, - bucket, - time_delta, - ); } counters::core_mempool_txn_ranking_score( CONSENSUS_READY_LABEL, @@ -439,14 +438,12 @@ impl TransactionStore { } if process_ready { - if let Ok(time_delta) = SystemTime::now().duration_since(txn.insertion_time) { - Self::log_ready_transaction( - txn.ranking_score, - self.timeline_index.get_bucket(txn.ranking_score), - time_delta, - process_broadcast_ready, - ); - } + Self::log_ready_transaction( + txn.ranking_score, + self.timeline_index.get_bucket(txn.ranking_score), + txn.insertion_info, + process_broadcast_ready, + ); } // Remove txn from parking lot after it has been promoted to @@ -603,22 +600,18 @@ impl TransactionStore { if let TimelineState::Ready(timeline_id) = txn.timeline_state { last_timeline_id[i] = timeline_id; } - if let Ok(time_delta) = SystemTime::now().duration_since(txn.insertion_time) - { - let bucket = self.timeline_index.get_bucket(txn.ranking_score); - counters::core_mempool_txn_commit_latency( - BROADCAST_BATCHED_LABEL, - E2E_LABEL, - bucket, - time_delta, - ); - counters::core_mempool_txn_ranking_score( - BROADCAST_BATCHED_LABEL, - BROADCAST_BATCHED_LABEL, - bucket, - txn.ranking_score, - ); - } + let bucket = self.timeline_index.get_bucket(txn.ranking_score); + Mempool::log_txn_commit_latency( + txn.insertion_info, + bucket, + BROADCAST_BATCHED_LABEL, + ); + counters::core_mempool_txn_ranking_score( + BROADCAST_BATCHED_LABEL, + BROADCAST_BATCHED_LABEL, + bucket, + txn.ranking_score, + ); } } } @@ -658,7 +651,7 @@ impl TransactionStore { for key in self.system_ttl_index.iter().take(20) { if let Some(txn) = self.get_mempool_txn(&key.address, key.sequence_number) { if !txn.was_parked { - oldest_insertion_time = Some(txn.insertion_time); + oldest_insertion_time = Some(txn.insertion_info.insertion_time); break; } } @@ -740,7 +733,9 @@ impl TransactionStore { let account = txn.get_sender(); let txn_sequence_number = txn.sequence_info.transaction_sequence_number; gc_txns_log.add_with_status(account, txn_sequence_number, status); - if let Ok(time_delta) = SystemTime::now().duration_since(txn.insertion_time) { + if let Ok(time_delta) = + SystemTime::now().duration_since(txn.insertion_info.insertion_time) + { counters::CORE_MEMPOOL_GC_LATENCY .with_label_values(&[metric_label, status]) .observe(time_delta.as_secs_f64()); @@ -773,7 +768,12 @@ impl TransactionStore { } else { "ready" }; - txns_log.add_full_metadata(*account, *seq_num, status, txn.insertion_time); + txns_log.add_full_metadata( + *account, + *seq_num, + status, + txn.insertion_info.insertion_time, + ); } } txns_log diff --git a/mempool/src/counters.rs b/mempool/src/counters.rs index 586332efeebe3..3984791f91593 100644 --- a/mempool/src/counters.rs +++ b/mempool/src/counters.rs @@ -85,14 +85,15 @@ pub const SENT_LABEL: &str = "sent"; // invalid ACK type labels pub const UNKNOWN_PEER: &str = "unknown_peer"; -// Inserted transaction scope labels -pub const LOCAL_LABEL: &str = "local"; -pub const E2E_LABEL: &str = "e2e"; - // Event types for ranking_score pub const INSERT_LABEL: &str = "insert"; pub const REMOVE_LABEL: &str = "remove"; +// The submission point where the transaction originated from +pub const SUBMITTED_BY_CLIENT_LABEL: &str = "client"; +pub const SUBMITTED_BY_DOWNSTREAM_LABEL: &str = "downstream"; +pub const SUBMITTED_BY_PEER_VALIDATOR_LABEL: &str = "peer_validator"; + // Histogram buckets that make more sense at larger timescales than DEFAULT_BUCKETS const LARGER_LATENCY_BUCKETS: &[f64; 11] = &[ 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 20.0, 40.0, 80.0, 160.0, 320.0, @@ -179,12 +180,12 @@ pub static CORE_MEMPOOL_IDEMPOTENT_TXNS: Lazy = Lazy::new(|| { pub fn core_mempool_txn_commit_latency( stage: &'static str, - scope: &'static str, + submitted_by: &'static str, bucket: &str, latency: Duration, ) { CORE_MEMPOOL_TXN_COMMIT_LATENCY - .with_label_values(&[stage, scope, bucket]) + .with_label_values(&[stage, submitted_by, bucket]) .observe(latency.as_secs_f64()); } @@ -196,7 +197,7 @@ static CORE_MEMPOOL_TXN_COMMIT_LATENCY: Lazy = Lazy::new(|| { "Latency of txn reaching various stages in core mempool after insertion", LARGER_LATENCY_BUCKETS.to_vec() ); - register_histogram_vec!(histogram_opts, &["stage", "scope", "bucket"]).unwrap() + register_histogram_vec!(histogram_opts, &["stage", "submitted_by", "bucket"]).unwrap() }); pub fn core_mempool_txn_ranking_score( diff --git a/mempool/src/shared_mempool/tasks.rs b/mempool/src/shared_mempool/tasks.rs index 8c1ee6b69c634..4d952c2330c63 100644 --- a/mempool/src/shared_mempool/tasks.rs +++ b/mempool/src/shared_mempool/tasks.rs @@ -117,7 +117,7 @@ pub(crate) async fn process_client_transaction_submission smp: &SharedMempool, transactions: Vec, timeline_state: TimelineState, + client_submitted: bool, ) -> Vec where NetworkClient: NetworkClientInterface, @@ -308,7 +309,13 @@ where }) .collect(); - validate_and_add_transactions(transactions, smp, timeline_state, &mut statuses); + validate_and_add_transactions( + transactions, + smp, + timeline_state, + &mut statuses, + client_submitted, + ); notify_subscribers(SharedMempoolNotification::NewTransactions, &smp.subscribers); statuses } @@ -321,6 +328,7 @@ fn validate_and_add_transactions( smp: &SharedMempool, timeline_state: TimelineState, statuses: &mut Vec<(SignedTransaction, (MempoolStatus, Option))>, + client_submitted: bool, ) where NetworkClient: NetworkClientInterface, TransactionValidator: TransactionValidation, @@ -346,6 +354,7 @@ fn validate_and_add_transactions( ranking_score, sequence_info, timeline_state, + client_submitted, ); statuses.push((transaction, (mempool_status, None))); }, @@ -385,13 +394,20 @@ fn validate_and_add_transactions( smp: &SharedMempool, timeline_state: TimelineState, statuses: &mut Vec<(SignedTransaction, (MempoolStatus, Option))>, + client_submitted: bool, ) where NetworkClient: NetworkClientInterface, TransactionValidator: TransactionValidation, { let mut mempool = smp.mempool.lock(); for (transaction, sequence_info) in transactions.into_iter() { - let mempool_status = mempool.add_txn(transaction.clone(), 0, sequence_info, timeline_state); + let mempool_status = mempool.add_txn( + transaction.clone(), + 0, + sequence_info, + timeline_state, + client_submitted, + ); statuses.push((transaction, (mempool_status, None))); } } diff --git a/mempool/src/tests/common.rs b/mempool/src/tests/common.rs index b4b30c8ab8bef..4085cb7ec0af8 100644 --- a/mempool/src/tests/common.rs +++ b/mempool/src/tests/common.rs @@ -122,6 +122,7 @@ pub(crate) fn add_txns_to_mempool( txn.gas_unit_price(), transaction.account_seqno, TimelineState::NotReady, + false, ); transactions.push(txn); } @@ -139,6 +140,7 @@ pub(crate) fn add_signed_txn(pool: &mut CoreMempool, transaction: SignedTransact transaction.gas_unit_price(), 0, TimelineState::NotReady, + false, ) .code { diff --git a/mempool/src/tests/core_mempool_test.rs b/mempool/src/tests/core_mempool_test.rs index 81d55de50a702..c4cae0ac76611 100644 --- a/mempool/src/tests/core_mempool_test.rs +++ b/mempool/src/tests/core_mempool_test.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - core_mempool::{CoreMempool, MempoolTransaction, TimelineState}, + core_mempool::{CoreMempool, MempoolTransaction, SubmittedBy, TimelineState}, tests::common::{ add_signed_txn, add_txn, add_txns_to_mempool, setup_mempool, setup_mempool_with_broadcast_buckets, TestTransaction, @@ -73,28 +73,44 @@ fn test_transaction_metrics() { txn.gas_unit_price(), 0, TimelineState::NotReady, + false, ); - let txn = TestTransaction::new(1, 0, 2).make_signed_transaction(); + let txn = TestTransaction::new(1, 0, 1).make_signed_transaction(); mempool.add_txn( txn.clone(), txn.gas_unit_price(), 0, TimelineState::NonQualified, + false, + ); + let txn = TestTransaction::new(2, 0, 1).make_signed_transaction(); + mempool.add_txn( + txn.clone(), + txn.gas_unit_price(), + 0, + TimelineState::NotReady, + true, ); // Check timestamp returned as end-to-end for broadcast-able transaction - let (&_insertion_time, is_end_to_end, _bucket) = mempool + let (insertion_info, _bucket) = mempool .get_transaction_store() - .get_insertion_time_and_bucket(&TestTransaction::get_address(0), 0) + .get_insertion_info_and_bucket(&TestTransaction::get_address(0), 0) .unwrap(); - assert!(is_end_to_end); + assert_eq!(insertion_info.submitted_by, SubmittedBy::Downstream); // Check timestamp returned as not end-to-end for non-broadcast-able transaction - let (&_insertion_time, is_end_to_end, _bucket) = mempool + let (insertion_info, _bucket) = mempool .get_transaction_store() - .get_insertion_time_and_bucket(&TestTransaction::get_address(1), 0) + .get_insertion_info_and_bucket(&TestTransaction::get_address(1), 0) .unwrap(); - assert!(!is_end_to_end); + assert_eq!(insertion_info.submitted_by, SubmittedBy::PeerValidator); + + let (insertion_info, _bucket) = mempool + .get_transaction_store() + .get_insertion_info_and_bucket(&TestTransaction::get_address(2), 0) + .unwrap(); + assert_eq!(insertion_info.submitted_by, SubmittedBy::Client); } #[test] @@ -548,6 +564,7 @@ fn test_capacity_bytes() { txn.ranking_score, txn.sequence_info.account_sequence_number, txn.timeline_state, + false, ); assert_eq!(status.code, MempoolStatusCode::Accepted); }); @@ -558,6 +575,7 @@ fn test_capacity_bytes() { txn.ranking_score, txn.sequence_info.account_sequence_number, txn.timeline_state, + false, ); assert_eq!(status.code, MempoolStatusCode::MempoolIsFull); } @@ -575,6 +593,7 @@ fn new_test_mempool_transaction(address: usize, sequence_number: u64) -> Mempool TimelineState::NotReady, 0, SystemTime::now(), + false, ) } @@ -643,7 +662,7 @@ fn test_gc_ready_transaction() { // Insert in the middle transaction that's going to be expired. let txn = TestTransaction::new(1, 1, 1).make_signed_transaction_with_expiration_time(0); - pool.add_txn(txn, 1, 0, TimelineState::NotReady); + pool.add_txn(txn, 1, 0, TimelineState::NotReady, false); // Insert few transactions after it. // They are supposed to be ready because there's a sequential path from 0 to them. @@ -682,7 +701,7 @@ fn test_clean_stuck_transactions() { } let db_sequence_number = 10; let txn = TestTransaction::new(0, db_sequence_number, 1).make_signed_transaction(); - pool.add_txn(txn, 1, db_sequence_number, TimelineState::NotReady); + pool.add_txn(txn, 1, db_sequence_number, TimelineState::NotReady, false); let block = pool.get_batch(1, 1024, true, false, vec![]); assert_eq!(block.len(), 1); assert_eq!(block[0].sequence_number(), 10); @@ -693,7 +712,13 @@ fn test_get_transaction_by_hash() { let mut pool = setup_mempool().0; let db_sequence_number = 10; let txn = TestTransaction::new(0, db_sequence_number, 1).make_signed_transaction(); - pool.add_txn(txn.clone(), 1, db_sequence_number, TimelineState::NotReady); + pool.add_txn( + txn.clone(), + 1, + db_sequence_number, + TimelineState::NotReady, + false, + ); let hash = txn.clone().committed_hash(); let ret = pool.get_by_hash(hash); assert_eq!(ret, Some(txn)); @@ -707,7 +732,13 @@ fn test_get_transaction_by_hash_after_the_txn_is_updated() { let mut pool = setup_mempool().0; let db_sequence_number = 10; let txn = TestTransaction::new(0, db_sequence_number, 1).make_signed_transaction(); - pool.add_txn(txn.clone(), 1, db_sequence_number, TimelineState::NotReady); + pool.add_txn( + txn.clone(), + 1, + db_sequence_number, + TimelineState::NotReady, + false, + ); let hash = txn.committed_hash(); // new txn with higher gas price @@ -717,6 +748,7 @@ fn test_get_transaction_by_hash_after_the_txn_is_updated() { 1, db_sequence_number, TimelineState::NotReady, + false, ); let new_txn_hash = new_txn.clone().committed_hash(); diff --git a/mempool/src/tests/fuzzing.rs b/mempool/src/tests/fuzzing.rs index 898f2efadddb5..2b756bcb6a33f 100644 --- a/mempool/src/tests/fuzzing.rs +++ b/mempool/src/tests/fuzzing.rs @@ -57,7 +57,7 @@ pub fn test_mempool_process_incoming_transactions_impl( config.base.role, ); - let _ = tasks::process_incoming_transactions(&smp, txns, timeline_state); + let _ = tasks::process_incoming_transactions(&smp, txns, timeline_state, false); } proptest! { diff --git a/mempool/src/tests/mocks.rs b/mempool/src/tests/mocks.rs index 92ec705f8c388..b7259bb783355 100644 --- a/mempool/src/tests/mocks.rs +++ b/mempool/src/tests/mocks.rs @@ -166,6 +166,7 @@ impl MockSharedMempool { txn.gas_unit_price(), 0, TimelineState::NotReady, + false, ) .code != MempoolStatusCode::Accepted diff --git a/mempool/src/tests/node.rs b/mempool/src/tests/node.rs index b5d715e257d9f..d2537c36a06e1 100644 --- a/mempool/src/tests/node.rs +++ b/mempool/src/tests/node.rs @@ -375,6 +375,7 @@ impl Node { transaction.gas_unit_price(), 0, TimelineState::NotReady, + false, ); } } diff --git a/rust-toolchain b/rust-toolchain index 0403bed10c327..832e9afb6c139 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.66.1 +1.70.0 diff --git a/scripts/dev_setup.sh b/scripts/dev_setup.sh index bbc339de437c6..2a8fb4ca00b2b 100755 --- a/scripts/dev_setup.sh +++ b/scripts/dev_setup.sh @@ -1039,4 +1039,4 @@ You should now be able to build the project by running: EOF fi -exit 0 +exit 0 \ No newline at end of file diff --git a/scripts/devnet_message.sh b/scripts/devnet_message.sh index d036f5b2a558d..5da0185b4105d 100755 --- a/scripts/devnet_message.sh +++ b/scripts/devnet_message.sh @@ -23,6 +23,6 @@ For upgrade, make sure you pulled the latest docker image, or build the rust bin - genesis.blob sha256: $GENESIS_SHA - waypoint: $WAYPOINT - Chain ID: $CHAIN_ID -You can follow the instructions here for upgrade: https://aptos.dev/tutorials/run-a-fullnode#update-fullnode-with-new-releases/ +You can follow the instructions here for upgrade: https://aptos.dev/nodes/full-node/update-fullnode-with-new-devnet-releases EOF diff --git a/scripts/update_docker_images.py b/scripts/update_docker_images.py index ad55a3f304ec8..89602778c851a 100755 --- a/scripts/update_docker_images.py +++ b/scripts/update_docker_images.py @@ -9,14 +9,14 @@ OS = "linux" IMAGES = { - "debian-base": "debian:bullseye", - "rust-base": "rust:1.66.1-bullseye", + "debian": "debian:bullseye", + "rust": "rust:1.66.1-bullseye", } def update() -> int: script_dir = os.path.dirname(os.path.realpath(__file__)) - dockerfile_path = os.path.join(script_dir, "..", "docker", "rust-all.Dockerfile") + dockerfile_path = os.path.join(script_dir, "..", "docker", "builder", "docker-bake-rust-all.hcl") update_exists = False @@ -24,7 +24,7 @@ def update() -> int: manifest = None digest = None current_digest = None - regex = f"FROM [\S]+ AS {base_image}" + regex = f"{base_image} = \"docker-image://{image_name}.*\"" print(f"Update {image_name}") manifest_inspect = subprocess.check_output(["docker", "manifest", "inspect", image_name]) @@ -48,8 +48,8 @@ def update() -> int: dockerfile_content = f.read() for line in dockerfile_content.splitlines(): - if re.match(regex, line): - current_digest = line.split()[1].split("@")[1] + if re.search(regex, line): + current_digest = line.split("@")[1].split("\"")[0] break if current_digest == None: @@ -61,7 +61,7 @@ def update() -> int: continue print(f"Found update for {image_name}: {current_digest} -> {digest}") - dockerfile_content = re.sub(regex, f"FROM {image_name}@{digest} AS {base_image}", dockerfile_content) + dockerfile_content = re.sub(regex, f"{base_image} = \"docker-image://{image_name}@{digest}\"", dockerfile_content) with open(dockerfile_path, "w") as f: f.write(dockerfile_content) diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index a183e66394ab6..0f326eb95452b 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -14,6 +14,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } +aptos-api-types = { workspace = true } aptos-cached-packages = { workspace = true } aptos-crypto = { workspace = true } aptos-global-constants = { workspace = true } diff --git a/state-sync/aptos-data-client/Cargo.toml b/state-sync/aptos-data-client/Cargo.toml index a2e233a324204..3e9d74287ed8b 100644 --- a/state-sync/aptos-data-client/Cargo.toml +++ b/state-sync/aptos-data-client/Cargo.toml @@ -21,6 +21,7 @@ aptos-logger = { workspace = true } aptos-metrics-core = { workspace = true } aptos-netcore = { workspace = true } aptos-network = { workspace = true } +aptos-storage-interface = { workspace = true } aptos-storage-service-client = { workspace = true } aptos-storage-service-types = { workspace = true } aptos-time-service = { workspace = true } @@ -34,11 +35,14 @@ thiserror = { workspace = true } tokio = { workspace = true } [dev-dependencies] +anyhow = { workspace = true } aptos-channels = { workspace = true } aptos-network = { workspace = true, features = ["fuzzing"] } aptos-storage-service-server = { workspace = true } aptos-time-service = { workspace = true, features = ["async", "testing"] } +async-trait = { workspace = true } bcs = { workspace = true } claims = { workspace = true } maplit = { workspace = true } +mockall = { workspace = true } tokio = { workspace = true } diff --git a/state-sync/aptos-data-client/src/client.rs b/state-sync/aptos-data-client/src/client.rs index be0c852eb5abd..b558d5520cd14 100644 --- a/state-sync/aptos-data-client/src/client.rs +++ b/state-sync/aptos-data-client/src/client.rs @@ -24,6 +24,7 @@ use aptos_id_generator::{IdGenerator, U64IdGenerator}; use aptos_infallible::RwLock; use aptos_logger::{debug, info, sample, sample::SampleRate, trace, warn}; use aptos_network::{application::interface::NetworkClient, protocols::network::RpcError}; +use aptos_storage_interface::DbReader; use aptos_storage_service_client::StorageServiceClient; use aptos_storage_service_types::{ requests::{ @@ -88,10 +89,12 @@ impl AptosDataClient { data_client_config: AptosDataClientConfig, base_config: BaseConfig, time_service: TimeService, + storage: Arc, storage_service_client: StorageServiceClient>, runtime: Option, ) -> (Self, DataSummaryPoller) { - let client = Self { + // Create the data client + let data_client = Self { data_client_config, storage_service_client: storage_service_client.clone(), peer_states: Arc::new(RwLock::new(PeerStates::new( @@ -102,13 +105,18 @@ impl AptosDataClient { global_summary_cache: Arc::new(RwLock::new(GlobalDataSummary::empty())), response_id_generator: Arc::new(U64IdGenerator::new()), }; - let poller = DataSummaryPoller::new( - client.clone(), - Duration::from_millis(client.data_client_config.summary_poll_loop_interval_ms), + + // Create the data summary poller + let data_summary_poller = DataSummaryPoller::new( + data_client_config, + data_client.clone(), + Duration::from_millis(data_client.data_client_config.summary_poll_loop_interval_ms), runtime, + storage, time_service, ); - (client, poller) + + (data_client, data_summary_poller) } /// Returns true iff compression should be requested diff --git a/state-sync/aptos-data-client/src/latency_monitor.rs b/state-sync/aptos-data-client/src/latency_monitor.rs new file mode 100644 index 0000000000000..ca714d84388db --- /dev/null +++ b/state-sync/aptos-data-client/src/latency_monitor.rs @@ -0,0 +1,495 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + interface::AptosDataClientInterface, + logging::{LogEntry, LogEvent, LogSchema}, + metrics, +}; +use aptos_config::config::AptosDataClientConfig; +use aptos_logger::{info, sample, sample::SampleRate, warn}; +use aptos_storage_interface::DbReader; +use aptos_time_service::{TimeService, TimeServiceTrait}; +use futures::StreamExt; +use std::{ + collections::BTreeMap, + sync::Arc, + time::{Duration, Instant}, +}; + +// Useful constants +const LATENCY_MONITOR_LOG_FREQ_SECS: u64 = 5; +const MAX_NUM_TRACKED_VERSION_ENTRIES: usize = 10_000; +const MAX_VERSION_LAG_TO_TOLERATE: u64 = 10_000; + +/// A simple monitor that tracks the latencies taken to see +/// and sync new blockchain data (i.e., transactions). +pub struct LatencyMonitor { + advertised_version_timestamps: BTreeMap, // The timestamps when advertised versions were first seen + caught_up_to_latest: bool, // Whether the node has ever caught up to the latest blockchain version + data_client: Arc, // The data client through which to see advertised data + monitor_loop_interval: Duration, // The interval between latency monitor loop executions + storage: Arc, // The reader interface to storage + time_service: TimeService, // The service to monitor elapsed time +} + +impl LatencyMonitor { + pub fn new( + data_client_config: AptosDataClientConfig, + data_client: Arc, + storage: Arc, + time_service: TimeService, + ) -> Self { + let monitor_loop_interval = + Duration::from_millis(data_client_config.latency_monitor_loop_interval_ms); + + Self { + advertised_version_timestamps: BTreeMap::new(), + caught_up_to_latest: false, + data_client, + monitor_loop_interval, + storage, + time_service, + } + } + + /// Starts the latency monitor and periodically updates the latency metrics + pub async fn start_latency_monitor(mut self) { + info!( + (LogSchema::new(LogEntry::LatencyMonitor) + .message("Starting the Aptos data client latency monitor!")) + ); + let loop_ticker = self.time_service.interval(self.monitor_loop_interval); + futures::pin_mut!(loop_ticker); + + // Start the monitor + loop { + // Wait for the next round + loop_ticker.next().await; + + // Get the highest synced version from storage + let highest_synced_version = match self.storage.get_latest_version() { + Ok(version) => version, + Err(error) => { + sample!( + SampleRate::Duration(Duration::from_secs(LATENCY_MONITOR_LOG_FREQ_SECS)), + warn!( + (LogSchema::new(LogEntry::LatencyMonitor) + .event(LogEvent::StorageReadFailed) + .message(&format!("Unable to read the highest synced version: {:?}", error))) + ); + ); + continue; // Continue to the next round + }, + }; + + // Update the latency metrics for all versions that we've now synced + self.update_latency_metrics(highest_synced_version); + + // Get the highest advertised version from the global data summary + let advertised_data = &self.data_client.get_global_data_summary().advertised_data; + let highest_advertised_version = match advertised_data.highest_synced_ledger_info() { + Some(ledger_info) => ledger_info.ledger_info().version(), + None => { + sample!( + SampleRate::Duration(Duration::from_secs(LATENCY_MONITOR_LOG_FREQ_SECS)), + warn!( + (LogSchema::new(LogEntry::LatencyMonitor) + .event(LogEvent::AggregateSummary) + .message("Unable to get the highest advertised version!")) + ); + ); + continue; // Continue to the next round + }, + }; + + // Update the advertised version timestamps + self.update_advertised_version_timestamps( + highest_synced_version, + highest_advertised_version, + ); + } + } + + /// Updates the latency metrics for all versions that have now been synced + fn update_latency_metrics(&mut self, highest_synced_version: u64) { + // Split the advertised versions into synced and unsynced versions + let unsynced_advertised_versions = self + .advertised_version_timestamps + .split_off(&(highest_synced_version + 1)); + + // Update the metrics for all synced versions + for (synced_version, (seen_time, seen_timestamp_usecs)) in + self.advertised_version_timestamps.iter() + { + // Update the seen to synced latencies + let duration_from_seen_to_synced = self.time_service.now().duration_since(*seen_time); + metrics::observe_value_with_label( + &metrics::SYNC_LATENCIES, + metrics::SEEN_TO_SYNC_LATENCY_LABEL, + duration_from_seen_to_synced.as_secs_f64(), + ); + + // Update the proposal latencies + if let Ok(block_timestamp_usecs) = self.storage.get_block_timestamp(*synced_version) { + // Update the propose to seen latencies + if let Some(duration_from_propose_to_seen) = + calculate_duration_from_proposal(block_timestamp_usecs, *seen_timestamp_usecs) + { + metrics::observe_value_with_label( + &metrics::SYNC_LATENCIES, + metrics::PROPOSE_TO_SEEN_LATENCY_LABEL, + duration_from_propose_to_seen.as_secs_f64(), + ); + } + + // Update the propose to synced latencies + let timestamp_now_usecs = self.get_timestamp_now_usecs(); + if let Some(duration_from_propose_to_sync) = + calculate_duration_from_proposal(block_timestamp_usecs, timestamp_now_usecs) + { + metrics::observe_value_with_label( + &metrics::SYNC_LATENCIES, + metrics::PROPOSE_TO_SYNC_LATENCY_LABEL, + duration_from_propose_to_sync.as_secs_f64(), + ); + } + } + } + + // Update the advertised versions with those we still need to sync + self.advertised_version_timestamps = unsynced_advertised_versions; + } + + /// Updates the advertised version timestamps by inserting any newly seen versions + /// into the map and garbage collecting any old versions. + fn update_advertised_version_timestamps( + &mut self, + highest_synced_version: u64, + highest_advertised_version: u64, + ) { + // Check if we're still catching up to the latest version + if !self.caught_up_to_latest { + if highest_synced_version + MAX_VERSION_LAG_TO_TOLERATE >= highest_advertised_version { + info!( + (LogSchema::new(LogEntry::LatencyMonitor) + .event(LogEvent::CaughtUpToLatest) + .message( + "We've caught up to the latest version! Starting the latency monitor." + )) + ); + self.caught_up_to_latest = true; // We've caught up + } else { + return; // We're still catching up, so we shouldn't update the advertised version timestamps + } + } + + // If we're already synced with the highest advertised version, there's nothing to do + if highest_synced_version >= highest_advertised_version { + return; + } + + // Get the current time and timestamp (note: we store both because + // there isn't a clean way of converting between them when relying + // on the time service). + let time_now_instant = self.time_service.now(); + let timestamp_now_usecs = self.get_timestamp_now_usecs(); + + // Insert the newly seen version into the advertised version timestamps + self.advertised_version_timestamps.insert( + highest_advertised_version, + (time_now_instant, timestamp_now_usecs), + ); + + // If the map is too large, garbage collect the old versions + while self.advertised_version_timestamps.len() > MAX_NUM_TRACKED_VERSION_ENTRIES { + // Remove the lowest version from the map by popping the first + // item. This is possible because BTreeMaps are sorted by key. + self.advertised_version_timestamps.pop_first(); + } + } + + /// Returns the current timestamp (in microseconds) since the Unix epoch + fn get_timestamp_now_usecs(&self) -> u64 { + self.time_service.now_unix_time().as_micros() as u64 + } +} + +/// Calculates the duration between the propose timestamp and the given +/// timestamp. If the propose time is not in the past, this returns None. +/// +/// Note: the propose timestamp and the given timestamp should both +/// be durations (in microseconds) since the Unix epoch. +fn calculate_duration_from_proposal( + propose_timestamp_usecs: u64, + given_timestamp_usecs: u64, +) -> Option { + if given_timestamp_usecs > propose_timestamp_usecs { + Some(Duration::from_micros( + given_timestamp_usecs - propose_timestamp_usecs, + )) + } else { + // Log the error and return None + sample!( + SampleRate::Duration(Duration::from_secs(LATENCY_MONITOR_LOG_FREQ_SECS)), + warn!( + (LogSchema::new(LogEntry::LatencyMonitor) + .event(LogEvent::UnexpectedError) + .message("The propose timestamp is ahead of the given timestamp!")) + ); + ); + None + } +} + +#[cfg(test)] +mod tests { + use crate::{ + latency_monitor, + latency_monitor::{ + calculate_duration_from_proposal, LatencyMonitor, MAX_NUM_TRACKED_VERSION_ENTRIES, + MAX_VERSION_LAG_TO_TOLERATE, + }, + tests::mock::{create_mock_data_client, create_mock_db_reader}, + }; + use aptos_config::config::AptosDataClientConfig; + use aptos_time_service::{TimeService, TimeServiceTrait}; + use std::time::{Duration, Instant}; + + #[test] + fn test_calculate_duration_from_proposal() { + // Test a valid duration (i.e., where proposal time is earlier than the given time) + let propose_timestamp_usecs = 100; + let given_timestamp_usecs = 200; + let calculated_duration = + calculate_duration_from_proposal(propose_timestamp_usecs, given_timestamp_usecs); + assert_eq!( + calculated_duration, + Some(Duration::from_micros( + given_timestamp_usecs - propose_timestamp_usecs + )) + ); + + // Test an invalid duration (i.e., where proposal time is equal to the given time) + let timestamp_usecs = 100_000; + let calculated_duration = + calculate_duration_from_proposal(timestamp_usecs, timestamp_usecs); + assert_eq!(calculated_duration, None); + + // Test an invalid duration (i.e., where proposal time is after the given time) + let propose_timestamp_usecs = 100_000_001; + let given_timestamp_usecs = 100_000_000; + let calculated_duration = + calculate_duration_from_proposal(propose_timestamp_usecs, given_timestamp_usecs); + assert_eq!(calculated_duration, None); + } + + #[tokio::test] + async fn test_advertised_version_timestamps() { + // Create a latency monitor + let (time_service, mut latency_monitor) = create_latency_monitor(); + + // Verify the initial state + assert!(!latency_monitor.caught_up_to_latest); + verify_advertised_version_timestamps_length(&mut latency_monitor, 0); + + // Update the advertised version timestamps + let highest_advertised_version = MAX_VERSION_LAG_TO_TOLERATE + 100; + let highest_synced_version = 0; + latency_monitor.update_advertised_version_timestamps( + highest_synced_version, + highest_advertised_version, + ); + + // Verify that we still haven't caught up (the sync lag is too large) + let time_service = time_service.into_mock(); + assert!(!latency_monitor.caught_up_to_latest); + verify_advertised_version_timestamps_length(&mut latency_monitor, 0); + + // Update the advertised version timestamps + let mut highest_advertised_version = MAX_VERSION_LAG_TO_TOLERATE + 100; + let highest_synced_version = 100; + latency_monitor.update_advertised_version_timestamps( + highest_synced_version, + highest_advertised_version, + ); + + // Verify that we've finally caught up and started tracking latencies + assert!(latency_monitor.caught_up_to_latest); + verify_advertised_version_timestamps_length(&mut latency_monitor, 1); + + // Verify the timestamps of the highest advertised version + let (time_now_instant, timestamp_now_usecs) = + get_advertised_version_timestamps(&mut latency_monitor, &highest_advertised_version); + assert_eq!(time_now_instant, time_service.now()); + assert_eq!( + timestamp_now_usecs, + time_service.now_unix_time().as_micros() as u64 + ); + + // Elapse the time + time_service.advance_ms(1000); + + // Update the advertised version timestamps again + highest_advertised_version += 100; + latency_monitor.update_advertised_version_timestamps( + highest_synced_version, + highest_advertised_version, + ); + + // Verify the number of tracked versions + verify_advertised_version_timestamps_length(&mut latency_monitor, 2); + + // Verify the timestamps of the highest advertised version + let (time_now_instant, timestamp_now_usecs) = + get_advertised_version_timestamps(&mut latency_monitor, &highest_advertised_version); + assert_eq!(time_now_instant, time_service.now()); + assert_eq!( + timestamp_now_usecs, + time_service.now_unix_time().as_micros() as u64 + ); + } + + #[tokio::test] + async fn test_advertised_version_timestamps_garbage_collection() { + // Create a latency monitor (and mark it as caught up) + let (time_service, mut latency_monitor) = create_latency_monitor(); + latency_monitor.caught_up_to_latest = true; + + // Update the advertised versions many more times than the max + let num_advertised_versions = MAX_NUM_TRACKED_VERSION_ENTRIES as u64 * 5; + for advertised_version in 0..num_advertised_versions { + latency_monitor.update_advertised_version_timestamps(0, advertised_version); + } + + // Verify that we're tracking the max number of advertised version timestamps + // (i.e., that garbage collection has kicked in). + verify_advertised_version_timestamps_length( + &mut latency_monitor, + MAX_NUM_TRACKED_VERSION_ENTRIES as u64, + ); + + // Update the latency metrics and verify that the tracked version timestamps are empty + latency_monitor.update_latency_metrics(num_advertised_versions); + verify_advertised_version_timestamps_length(&mut latency_monitor, 0); + + // Update the advertised versions many more times than the max (again) + let time_service = time_service.into_mock(); + let start_time_usecs = time_service.now_unix_time().as_micros() as u64; + for advertised_version in 0..num_advertised_versions { + // Elapse some time (1 ms) + time_service.advance_ms(1); + + // Update the advertised version timestamps + latency_monitor.update_advertised_version_timestamps(0, advertised_version); + } + + // Verify the advertised version timestamps are correctly populated + let lowest_tracked_version = + num_advertised_versions - (MAX_NUM_TRACKED_VERSION_ENTRIES as u64); + for advertised_version in lowest_tracked_version..num_advertised_versions { + let (_, timestamp_now_usecs) = + get_advertised_version_timestamps(&mut latency_monitor, &advertised_version); + assert_eq!( + timestamp_now_usecs, + start_time_usecs + ((advertised_version + 1) * 1000) + ); + } + } + + #[tokio::test] + async fn test_advertised_version_timestamps_split() { + // Create a latency monitor (and mark it as caught up) + let (time_service, mut latency_monitor) = create_latency_monitor(); + latency_monitor.caught_up_to_latest = true; + + // Update the advertised versions several times + let time_service = time_service.into_mock(); + let num_advertised_versions = 100; + for advertised_version in 0..num_advertised_versions { + // Elapse some time (1 ms) + time_service.advance_ms(1); + + // Update the advertised version timestamps + latency_monitor.update_advertised_version_timestamps(0, advertised_version + 1); + } + + // Verify that we're tracking the correct number of advertised version timestamps + verify_advertised_version_timestamps_length(&mut latency_monitor, num_advertised_versions); + + // Update the latency metrics (we've only synced the first half of the advertised versions) + let highest_synced_version = 50; + latency_monitor.update_latency_metrics(highest_synced_version); + + // Verify that we're tracking the correct number of advertised version timestamps + let expected_num_tracked_versions = 50; + verify_advertised_version_timestamps_length( + &mut latency_monitor, + expected_num_tracked_versions, + ); + + // Update the latency metrics (we've now almost synced all advertised versions) + let highest_synced_version = 98; + latency_monitor.update_latency_metrics(highest_synced_version); + + // Verify that we're tracking the correct number of advertised version timestamps + let expected_num_tracked_versions = 2; + verify_advertised_version_timestamps_length( + &mut latency_monitor, + expected_num_tracked_versions, + ); + + // Update the latency metrics (we've now synced all advertised versions) + let highest_synced_version = 100; + latency_monitor.update_latency_metrics(highest_synced_version); + + // Verify that we're tracking the correct number of advertised version timestamps + verify_advertised_version_timestamps_length(&mut latency_monitor, 0); + + // Update the advertised version timestamps (we're now synced to the advertised version) + latency_monitor.update_advertised_version_timestamps(200, 200); + + // Verify that we're tracking the correct number of advertised version timestamps + verify_advertised_version_timestamps_length(&mut latency_monitor, 0); + } + + /// Creates a latency monitor for testing + fn create_latency_monitor() -> (TimeService, LatencyMonitor) { + let data_client_config = AptosDataClientConfig::default(); + let data_client = create_mock_data_client(); + let storage = create_mock_db_reader(); + let time_service = TimeService::mock(); + let latency_monitor = latency_monitor::LatencyMonitor::new( + data_client_config, + data_client.clone(), + storage.clone(), + time_service.clone(), + ); + + (time_service, latency_monitor) + } + + /// Returns the advertised version timestamps for the given version + fn get_advertised_version_timestamps( + latency_monitor: &mut LatencyMonitor, + highest_advertised_version: &u64, + ) -> (Instant, u64) { + let (time_now_instant, timestamp_now_usecs) = latency_monitor + .advertised_version_timestamps + .get(highest_advertised_version) + .unwrap(); + + (*time_now_instant, *timestamp_now_usecs) + } + + /// Verifies that the length of the advertised version timestamps is correct + fn verify_advertised_version_timestamps_length( + latency_monitor: &mut LatencyMonitor, + expected_length: u64, + ) { + assert_eq!( + latency_monitor.advertised_version_timestamps.len(), + expected_length as usize + ); + } +} diff --git a/state-sync/aptos-data-client/src/lib.rs b/state-sync/aptos-data-client/src/lib.rs index 76e22b7d6417b..6c1c1d8eb8b63 100644 --- a/state-sync/aptos-data-client/src/lib.rs +++ b/state-sync/aptos-data-client/src/lib.rs @@ -7,6 +7,7 @@ pub mod client; pub mod error; pub mod global_summary; pub mod interface; +mod latency_monitor; mod logging; mod metrics; mod peer_states; diff --git a/state-sync/aptos-data-client/src/logging.rs b/state-sync/aptos-data-client/src/logging.rs index d953026f8ef13..095fa81b2e51b 100644 --- a/state-sync/aptos-data-client/src/logging.rs +++ b/state-sync/aptos-data-client/src/logging.rs @@ -41,6 +41,7 @@ impl<'a> LogSchema<'a> { #[serde(rename_all = "snake_case")] pub enum LogEntry { DataSummaryPoller, + LatencyMonitor, PeerStates, StorageServiceRequest, StorageServiceResponse, @@ -52,6 +53,7 @@ pub enum LogEntry { #[serde(rename_all = "snake_case")] pub enum LogEvent { AggregateSummary, + CaughtUpToLatest, NoPeersToPoll, PeerIgnored, PeerNoLongerIgnored, @@ -61,4 +63,6 @@ pub enum LogEvent { ResponseError, ResponseSuccess, SendRequest, + StorageReadFailed, + UnexpectedError, } diff --git a/state-sync/aptos-data-client/src/metrics.rs b/state-sync/aptos-data-client/src/metrics.rs index 93d7f81ce8306..3e2830977610b 100644 --- a/state-sync/aptos-data-client/src/metrics.rs +++ b/state-sync/aptos-data-client/src/metrics.rs @@ -8,16 +8,13 @@ use aptos_metrics_core::{ HistogramTimer, HistogramVec, IntCounterVec, IntGaugeVec, }; -/// The special label TOTAL_COUNT stores the sum of all values in the counter. -pub const TOTAL_COUNT_LABEL: &str = "TOTAL_COUNT"; +// Useful metric constants and labels pub const PRIORITIZED_PEER: &str = "prioritized_peer"; +pub const PROPOSE_TO_SEEN_LATENCY_LABEL: &str = "propose_to_seen_latency"; +pub const PROPOSE_TO_SYNC_LATENCY_LABEL: &str = "propose_to_sync_latency"; pub const REGULAR_PEER: &str = "regular_peer"; - -// Latency buckets for network latencies (i.e., the defaults only go up -// to 10 seconds, but we usually require more). -const NETWORK_LATENCY_BUCKETS: [f64; 14] = [ - 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 40.0, 60.0, -]; +pub const SEEN_TO_SYNC_LATENCY_LABEL: &str = "seen_to_sync_latency"; +pub const TOTAL_COUNT_LABEL: &str = "TOTAL_COUNT"; // TOOD(joshlind): add peer priorities back to the requests @@ -51,12 +48,18 @@ pub static ERROR_RESPONSES: Lazy = Lazy::new(|| { .unwrap() }); +// Latency buckets for network latencies (seconds) +const REQUEST_LATENCY_BUCKETS_SECS: [f64; 18] = [ + 0.05, 0.1, 0.2, 0.3, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 40.0, + 60.0, +]; + /// Counter for tracking request latencies pub static REQUEST_LATENCIES: Lazy = Lazy::new(|| { let histogram_opts = histogram_opts!( "aptos_data_client_request_latencies", "Counters related to request latencies", - NETWORK_LATENCY_BUCKETS.to_vec() + REQUEST_LATENCY_BUCKETS_SECS.to_vec() ); register_histogram_vec!(histogram_opts, &["request_type", "network"]).unwrap() }); @@ -111,6 +114,23 @@ pub static OPTIMAL_CHUNK_SIZES: Lazy = Lazy::new(|| { .unwrap() }); +// Latency buckets for the sync latencies (seconds). Note: there are a +// lot of buckets here because we really care about sync latencies. +const SYNC_LATENCY_BUCKETS_SECS: [f64; 36] = [ + 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, + 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 3.0, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 60.0, 120.0, 180.0, +]; + +/// Counter for tracking various sync latencies +pub static SYNC_LATENCIES: Lazy = Lazy::new(|| { + let histogram_opts = histogram_opts!( + "aptos_data_client_sync_latencies", + "Counters related to sync latencies", + SYNC_LATENCY_BUCKETS_SECS.to_vec() + ); + register_histogram_vec!(histogram_opts, &["label"]).unwrap() +}); + /// An enum representing the various types of data that can be /// fetched via the data client. pub enum DataType { @@ -153,6 +173,11 @@ pub fn increment_request_counter( .inc(); } +/// Observes the value for the provided histogram and label +pub fn observe_value_with_label(histogram: &Lazy, label: &str, value: f64) { + histogram.with_label_values(&[label]).observe(value) +} + /// Sets the gauge with the specific label and value pub fn set_gauge(counter: &Lazy, label: &str, value: u64) { counter.with_label_values(&[label]).set(value as i64); diff --git a/state-sync/aptos-data-client/src/poller.rs b/state-sync/aptos-data-client/src/poller.rs index b8ee6fde2eb1c..1618cc886fc8e 100644 --- a/state-sync/aptos-data-client/src/poller.rs +++ b/state-sync/aptos-data-client/src/poller.rs @@ -6,19 +6,21 @@ use crate::{ error::Error, global_summary::GlobalDataSummary, interface::{AptosDataClientInterface, Response}, + latency_monitor::LatencyMonitor, logging::{LogEntry, LogEvent, LogSchema}, metrics, metrics::{set_gauge, start_request_timer, DataType}, }; -use aptos_config::network_id::PeerNetworkId; +use aptos_config::{config::AptosDataClientConfig, network_id::PeerNetworkId}; use aptos_logger::{debug, info, sample, sample::SampleRate, warn}; +use aptos_storage_interface::DbReader; use aptos_storage_service_types::{ requests::{DataRequest, StorageServiceRequest}, responses::StorageServerSummary, }; use aptos_time_service::{TimeService, TimeServiceTrait}; use futures::StreamExt; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use tokio::{runtime::Handle, task::JoinHandle}; // Useful constants @@ -30,29 +32,45 @@ const REGULAR_PEER_SAMPLE_FREQ: u64 = 3; /// A poller for storage summaries that is responsible for periodically refreshing /// the view of advertised data in the network. pub struct DataSummaryPoller { - data_client: AptosDataClient, // The data client through which to poll peers - poll_loop_interval: Duration, // The interval between polling loop executions - runtime: Option, // An optional runtime on which to spawn the poller threads - time_service: TimeService, // The service to monitor elapsed time + data_client_config: AptosDataClientConfig, // The configuration for the data client + data_client: AptosDataClient, // The data client through which to poll peers + poll_loop_interval: Duration, // The interval between polling loop executions + runtime: Option, // An optional runtime on which to spawn the poller threads + storage: Arc, // The reader interface to storage + time_service: TimeService, // The service to monitor elapsed time } impl DataSummaryPoller { pub fn new( + data_client_config: AptosDataClientConfig, data_client: AptosDataClient, poll_loop_interval: Duration, runtime: Option, + storage: Arc, time_service: TimeService, ) -> Self { Self { + data_client_config, data_client, poll_loop_interval, runtime, + storage, time_service, } } /// Runs the poller that continuously updates the global data summary pub async fn start_poller(self) { + // Create and start the latency monitor + start_latency_monitor( + self.data_client_config, + self.data_client.clone(), + self.storage.clone(), + self.time_service.clone(), + self.runtime.clone(), + ); + + // Start the poller info!( (LogSchema::new(LogEntry::DataSummaryPoller) .message("Starting the Aptos data poller!")) @@ -228,6 +246,30 @@ pub(crate) fn poll_peer( } } +/// Spawns the dedicated latency monitor +fn start_latency_monitor( + data_client_config: AptosDataClientConfig, + data_client: AptosDataClient, + storage: Arc, + time_service: TimeService, + runtime: Option, +) -> JoinHandle<()> { + // Create the latency monitor + let latency_monitor = LatencyMonitor::new( + data_client_config, + Arc::new(data_client), + storage, + time_service, + ); + + // Spawn the latency monitor + if let Some(runtime) = runtime { + runtime.spawn(async move { latency_monitor.start_latency_monitor().await }) + } else { + tokio::spawn(async move { latency_monitor.start_latency_monitor().await }) + } +} + /// Updates the advertised data metrics using the given global /// data summary. fn update_advertised_data_metrics(global_data_summary: GlobalDataSummary) { diff --git a/state-sync/aptos-data-client/src/tests.rs b/state-sync/aptos-data-client/src/tests.rs deleted file mode 100644 index c5c572320bc14..0000000000000 --- a/state-sync/aptos-data-client/src/tests.rs +++ /dev/null @@ -1,1570 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - client::AptosDataClient, - error::Error, - interface::AptosDataClientInterface, - peer_states::calculate_optimal_chunk_sizes, - poller::{poll_peer, DataSummaryPoller}, -}; -use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::{ - config::{AptosDataClientConfig, BaseConfig, RoleType}, - network_id::{NetworkId, PeerNetworkId}, -}; -use aptos_crypto::HashValue; -use aptos_netcore::transport::ConnectionOrigin; -use aptos_network::{ - application::{interface::NetworkClient, metadata::ConnectionState, storage::PeersAndMetadata}, - peer_manager::{ConnectionRequestSender, PeerManagerRequest, PeerManagerRequestSender}, - protocols::{ - network::{NetworkSender, NewNetworkSender}, - wire::handshake::v1::ProtocolId, - }, - transport::ConnectionMetadata, -}; -use aptos_storage_service_client::StorageServiceClient; -use aptos_storage_service_server::network::{NetworkRequest, ResponseSender}; -use aptos_storage_service_types::{ - requests::{ - DataRequest, NewTransactionOutputsWithProofRequest, NewTransactionsWithProofRequest, - StorageServiceRequest, TransactionOutputsWithProofRequest, TransactionsWithProofRequest, - }, - responses::{ - CompleteDataRange, DataResponse, DataSummary, ProtocolMetadata, StorageServerSummary, - StorageServiceResponse, OPTIMISTIC_FETCH_VERSION_DELTA, - }, - StorageServiceError, StorageServiceMessage, -}; -use aptos_time_service::{MockTimeService, TimeService}; -use aptos_types::{ - aggregate_signature::AggregateSignature, - block_info::BlockInfo, - ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, - transaction::{TransactionListWithProof, Version}, - PeerId, -}; -use claims::{assert_err, assert_matches, assert_none}; -use futures::StreamExt; -use maplit::hashmap; -use std::{sync::Arc, time::Duration}; - -fn mock_ledger_info(version: Version) -> LedgerInfoWithSignatures { - LedgerInfoWithSignatures::new( - LedgerInfo::new( - BlockInfo::new(0, 0, HashValue::zero(), HashValue::zero(), version, 0, None), - HashValue::zero(), - ), - AggregateSignature::empty(), - ) -} - -fn mock_storage_summary(version: Version) -> StorageServerSummary { - StorageServerSummary { - protocol_metadata: ProtocolMetadata { - max_epoch_chunk_size: 1000, - max_state_chunk_size: 1000, - max_transaction_chunk_size: 1000, - max_transaction_output_chunk_size: 1000, - }, - data_summary: DataSummary { - synced_ledger_info: Some(mock_ledger_info(version)), - epoch_ending_ledger_infos: None, - transactions: Some(CompleteDataRange::new(0, version).unwrap()), - transaction_outputs: Some(CompleteDataRange::new(0, version).unwrap()), - states: None, - }, - } -} - -struct MockNetwork { - network_id: NetworkId, - peer_mgr_reqs_rx: aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, - peers_and_metadata: Arc, -} - -impl MockNetwork { - fn new( - base_config: Option, - data_client_config: Option, - networks: Option>, - ) -> (Self, MockTimeService, AptosDataClient, DataSummaryPoller) { - // Setup the request managers - let queue_cfg = aptos_channel::Config::new(10).queue_style(QueueStyle::FIFO); - let (peer_mgr_reqs_tx, peer_mgr_reqs_rx) = queue_cfg.build(); - let (connection_reqs_tx, _connection_reqs_rx) = queue_cfg.build(); - - // Setup the network client - let network_sender = NetworkSender::new( - PeerManagerRequestSender::new(peer_mgr_reqs_tx), - ConnectionRequestSender::new(connection_reqs_tx), - ); - let networks = networks - .unwrap_or_else(|| vec![NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]); - let peers_and_metadata = PeersAndMetadata::new(&networks); - let client_network_id = NetworkId::Validator; - let network_client = NetworkClient::new( - vec![], - vec![ProtocolId::StorageServiceRpc], - hashmap! { - client_network_id => network_sender}, - peers_and_metadata.clone(), - ); - - // Create a storage service client - let storage_service_client = StorageServiceClient::new(network_client); - - // Create an aptos data client - let mock_time = TimeService::mock(); - let base_config = base_config.unwrap_or_default(); - let data_client_config = data_client_config.unwrap_or_default(); - let (client, poller) = AptosDataClient::new( - data_client_config, - base_config, - mock_time.clone(), - storage_service_client, - None, - ); - - // Create the mock network - let mock_network = Self { - network_id: client_network_id, - peer_mgr_reqs_rx, - peers_and_metadata, - }; - - (mock_network, mock_time.into_mock(), client, poller) - } - - /// Add a new peer to the network peer DB - fn add_peer(&mut self, priority: bool) -> PeerNetworkId { - // Get the network id - let network_id = if priority { - NetworkId::Validator - } else { - NetworkId::Public - }; - self.add_peer_with_network_id(network_id, false) - } - - /// Add a new peer to the network peer DB with the specified network - fn add_peer_with_network_id( - &mut self, - network_id: NetworkId, - outbound_connection: bool, - ) -> PeerNetworkId { - // Create a new peer - let peer_id = PeerId::random(); - let peer_network_id = PeerNetworkId::new(network_id, peer_id); - - // Create and save a new connection metadata - let mut connection_metadata = ConnectionMetadata::mock(peer_id); - connection_metadata.origin = if outbound_connection { - ConnectionOrigin::Outbound - } else { - ConnectionOrigin::Inbound - }; - connection_metadata - .application_protocols - .insert(ProtocolId::StorageServiceRpc); - self.peers_and_metadata - .insert_connection_metadata(peer_network_id, connection_metadata) - .unwrap(); - - // Return the new peer - peer_network_id - } - - /// Disconnects the peer in the network peer DB - fn disconnect_peer(&mut self, peer: PeerNetworkId) { - self.update_peer_state(peer, ConnectionState::Disconnected); - } - - /// Reconnects the peer in the network peer DB - fn reconnect_peer(&mut self, peer: PeerNetworkId) { - self.update_peer_state(peer, ConnectionState::Connected); - } - - /// Updates the state of the given peer - - fn update_peer_state(&mut self, peer: PeerNetworkId, state: ConnectionState) { - self.peers_and_metadata - .update_connection_state(peer, state) - .unwrap(); - } - - /// Get the next request sent from the client. - async fn next_request(&mut self) -> Option { - match self.peer_mgr_reqs_rx.next().await { - Some(PeerManagerRequest::SendRpc(peer_id, network_request)) => { - let peer_network_id = PeerNetworkId::new(self.network_id, peer_id); - let protocol_id = network_request.protocol_id; - let data = network_request.data; - let res_tx = network_request.res_tx; - - let message: StorageServiceMessage = bcs::from_bytes(data.as_ref()).unwrap(); - let storage_service_request = match message { - StorageServiceMessage::Request(request) => request, - _ => panic!("unexpected: {:?}", message), - }; - let response_sender = ResponseSender::new(res_tx); - - Some(NetworkRequest { - peer_network_id, - protocol_id, - storage_service_request, - response_sender, - }) - }, - Some(PeerManagerRequest::SendDirectSend(_, _)) => panic!("Unexpected direct send msg"), - None => None, - } - } -} - -#[tokio::test] -async fn request_works_only_when_data_available() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, mock_time, client, poller) = MockNetwork::new(None, None, None); - - tokio::spawn(poller.start_poller()); - - // This request should fail because no peers are currently connected - let request_timeout = client.get_response_timeout_ms(); - let error = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap_err(); - assert_matches!(error, Error::DataIsUnavailable(_)); - - // Add a connected peer - let expected_peer = mock_network.add_peer(true); - - // Requesting some txns now will still fail since no peers are advertising - // availability for the desired range. - let error = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap_err(); - assert_matches!(error, Error::DataIsUnavailable(_)); - - // Advance time so the poller sends a data summary request - tokio::task::yield_now().await; - mock_time.advance_async(Duration::from_millis(1_000)).await; - - // Receive their request and fulfill it - let network_request = mock_network.next_request().await.unwrap(); - assert_eq!(network_request.peer_network_id, expected_peer); - assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); - assert!(network_request.storage_service_request.use_compression); - assert_matches!( - network_request.storage_service_request.data_request, - DataRequest::GetStorageServerSummary - ); - - let summary = mock_storage_summary(200); - let data_response = DataResponse::StorageServerSummary(summary); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); - - // Let the poller finish processing the response - tokio::task::yield_now().await; - - // Handle the client's transactions request - tokio::spawn(async move { - let network_request = mock_network.next_request().await.unwrap(); - - assert_eq!(network_request.peer_network_id, expected_peer); - assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); - assert!(network_request.storage_service_request.use_compression); - assert_matches!( - network_request.storage_service_request.data_request, - DataRequest::GetTransactionsWithProof(TransactionsWithProofRequest { - start_version: 50, - end_version: 100, - proof_version: 100, - include_events: false, - }) - ); - - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); - }); - - // The client's request should succeed since a peer finally has advertised - // data for this range. - let response = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap(); - assert_eq!(response.payload, TransactionListWithProof::new_empty()); -} - -#[tokio::test] -async fn fetch_peers_frequency() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, poller) = MockNetwork::new(None, None, None); - - // Add regular peer 1 and 2 - let _regular_peer_1 = mock_network.add_peer(false); - let _regular_peer_2 = mock_network.add_peer(false); - - // Set `always_poll` to true and fetch the regular peers multiple times. Ensure - // that for each fetch we receive a peer. - let num_fetches = 20; - for _ in 0..num_fetches { - let peer = poller.fetch_regular_peer(true).unwrap(); - client.in_flight_request_complete(&peer); - } - - // Set `always_poll` to false and fetch the regular peers multiple times - let mut regular_peer_count = 0; - for _ in 0..num_fetches { - if let Some(peer) = poller.fetch_regular_peer(false) { - regular_peer_count += 1; - client.in_flight_request_complete(&peer); - } - } - - // Verify we received regular peers at a reduced frequency - assert!(regular_peer_count < num_fetches); - - // Add priority peer 1 and 2 - let _priority_peer_1 = mock_network.add_peer(true); - let _priority_peer_2 = mock_network.add_peer(true); - - // Fetch the prioritized peers multiple times. Ensure that for - // each fetch we receive a peer. - for _ in 0..num_fetches { - let peer = poller.try_fetch_peer(true).unwrap(); - client.in_flight_request_complete(&peer); - } -} - -#[tokio::test] -async fn fetch_peers_ordering() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Ensure the properties hold for both priority and non-priority peers - for is_priority_peer in [true, false] { - // Add peer 1 - let peer_1 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify that we get peer 1 - for _ in 0..3 { - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_1); - client.in_flight_request_complete(&peer_to_poll); - } - - // Add peer 2 - let peer_2 = mock_network.add_peer(is_priority_peer); - - // Request the next peer and verify we get either peer - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert!(peer_to_poll == peer_1 || peer_to_poll == peer_2); - client.in_flight_request_complete(&peer_to_poll); - - // Request the next peer again, but don't mark the poll as complete - let peer_to_poll_1 = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - - // Request another peer again and verify that it's different to the previous peer - let peer_to_poll_2 = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_ne!(peer_to_poll_1, peer_to_poll_2); - - // Neither poll has completed (they're both in-flight), so make another request - // and verify we get no peers. - assert_none!(fetch_peer_to_poll(client.clone(), is_priority_peer).unwrap()); - - // Add peer 3 - let peer_3 = mock_network.add_peer(is_priority_peer); - - // Request another peer again and verify it's peer_3 - let peer_to_poll_3 = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll_3, peer_3); - - // Mark the second poll as completed - client.in_flight_request_complete(&peer_to_poll_2); - - // Make another request and verify we get peer 2 now (as it was ready) - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_to_poll_2); - - // Mark the first poll as completed - client.in_flight_request_complete(&peer_to_poll_1); - - // Make another request and verify we get peer 1 now - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_to_poll_1); - - // Mark the third poll as completed - client.in_flight_request_complete(&peer_to_poll_3); - - // Make another request and verify we get peer 3 now - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_to_poll_3); - client.in_flight_request_complete(&peer_to_poll_3); - } -} - -#[tokio::test] -async fn fetch_peers_disconnect() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Ensure the properties hold for both priority and non-priority peers - for is_priority_peer in [true, false] { - // Request the next peer to poll and verify we have no peers - assert_matches!( - fetch_peer_to_poll(client.clone(), is_priority_peer), - Err(Error::DataIsUnavailable(_)) - ); - - // Add peer 1 - let peer_1 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify it's peer 1 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_1); - client.in_flight_request_complete(&peer_to_poll); - - // Add peer 2 and disconnect peer 1 - let peer_2 = mock_network.add_peer(is_priority_peer); - mock_network.disconnect_peer(peer_1); - - // Request the next peer to poll and verify it's peer 2 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_2); - client.in_flight_request_complete(&peer_to_poll); - - // Disconnect peer 2 - mock_network.disconnect_peer(peer_2); - - // Request the next peer to poll and verify an error is returned because - // there are no connected peers. - assert_matches!( - fetch_peer_to_poll(client.clone(), is_priority_peer), - Err(Error::DataIsUnavailable(_)) - ); - - // Add peer 3 - let peer_3 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify it's peer 3 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_3); - client.in_flight_request_complete(&peer_to_poll); - - // Disconnect peer 3 - mock_network.disconnect_peer(peer_3); - - // Request the next peer to poll and verify an error is returned because - // there are no connected peers. - assert_matches!( - fetch_peer_to_poll(client.clone(), is_priority_peer), - Err(Error::DataIsUnavailable(_)) - ); - } -} - -#[tokio::test] -async fn fetch_peers_reconnect() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Ensure the properties hold for both priority and non-priority peers - for is_priority_peer in [true, false] { - // Request the next peer to poll and verify we have no peers - assert_matches!( - fetch_peer_to_poll(client.clone(), is_priority_peer), - Err(Error::DataIsUnavailable(_)) - ); - - // Add peer 1 - let peer_1 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify it's peer 1 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_1); - client.in_flight_request_complete(&peer_to_poll); - - // Add peer 2 and disconnect peer 1 - let peer_2 = mock_network.add_peer(is_priority_peer); - mock_network.disconnect_peer(peer_1); - - // Request the next peer to poll and verify it's peer 2 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_2); - client.in_flight_request_complete(&peer_to_poll); - - // Disconnect peer 2 and reconnect peer 1 - mock_network.disconnect_peer(peer_2); - mock_network.reconnect_peer(peer_1); - - // Request the next peer to poll and verify it's peer 1 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_1); - - // Reconnect peer 2 - mock_network.reconnect_peer(peer_2); - - // Request the next peer to poll several times and verify it's peer 2 - // (the in-flight request for peer 1 has yet to complete). - for _ in 0..3 { - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_2); - client.in_flight_request_complete(&peer_to_poll); - } - - // Disconnect peer 2 and mark peer 1's in-flight request as complete - mock_network.disconnect_peer(peer_2); - client.in_flight_request_complete(&peer_1); - - // Request the next peer to poll several times and verify it's peer 1 - for _ in 0..3 { - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_1); - client.in_flight_request_complete(&peer_to_poll); - } - - // Disconnect peer 1 - mock_network.disconnect_peer(peer_1); - - // Request the next peer to poll and verify an error is returned because - // there are no connected peers. - assert_matches!( - fetch_peer_to_poll(client.clone(), is_priority_peer), - Err(Error::DataIsUnavailable(_)) - ); - } -} - -#[tokio::test] -async fn fetch_peers_max_in_flight() { - ::aptos_logger::Logger::init_for_testing(); - - // Create a data client with max in-flight requests of 2 - let data_client_config = AptosDataClientConfig { - max_num_in_flight_priority_polls: 2, - max_num_in_flight_regular_polls: 2, - ..Default::default() - }; - let (mut mock_network, _, client, _) = MockNetwork::new(None, Some(data_client_config), None); - - // Ensure the properties hold for both priority and non-priority peers - for is_priority_peer in [true, false] { - // Add peer 1 - let peer_1 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify it's peer 1 - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_1); - - // Add peer 2 - let peer_2 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify it's peer 2 (peer 1's in-flight - // request has not yet completed). - let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_eq!(peer_to_poll, peer_2); - - // Add peer 3 - let peer_3 = mock_network.add_peer(is_priority_peer); - - // Request the next peer to poll and verify it's empty (we already have - // the maximum number of in-flight requests). - assert_none!(fetch_peer_to_poll(client.clone(), is_priority_peer).unwrap()); - - // Mark peer 2's in-flight request as complete - client.in_flight_request_complete(&peer_2); - - // Request the next peer to poll and verify it's either peer 2 or peer 3 - let peer_to_poll_1 = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert!(peer_to_poll_1 == peer_2 || peer_to_poll_1 == peer_3); - - // Request the next peer to poll and verify it's empty (we already have - // the maximum number of in-flight requests). - assert_none!(fetch_peer_to_poll(client.clone(), is_priority_peer).unwrap()); - - // Mark peer 1's in-flight request as complete - client.in_flight_request_complete(&peer_1); - - // Request the next peer to poll and verify it's not the peer that already - // has an in-flight request. - let peer_to_poll_2 = fetch_peer_to_poll(client.clone(), is_priority_peer) - .unwrap() - .unwrap(); - assert_ne!(peer_to_poll_1, peer_to_poll_2); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn in_flight_error_handling() { - ::aptos_logger::Logger::init_for_testing(); - - // Create a data client with max in-flight requests of 1 - let data_client_config = AptosDataClientConfig { - max_num_in_flight_priority_polls: 1, - max_num_in_flight_regular_polls: 1, - ..Default::default() - }; - let (mut mock_network, _, client, _) = MockNetwork::new(None, Some(data_client_config), None); - - // Verify we have no in-flight polls - let num_in_flight_polls = get_num_in_flight_polls(client.clone(), true); - assert_eq!(num_in_flight_polls, 0); - - // Add a peer - let peer = mock_network.add_peer(true); - - // Poll the peer - client.in_flight_request_started(&peer); - let handle = poll_peer(client.clone(), peer, None); - - // Respond to the peer poll with an error - if let Some(network_request) = mock_network.next_request().await { - network_request - .response_sender - .send(Err(StorageServiceError::InternalError( - "An unexpected error occurred!".into(), - ))); - } - - // Wait for the poller to complete - handle.await.unwrap(); - - // Verify we have no in-flight polls - let num_in_flight_polls = get_num_in_flight_polls(client.clone(), true); - assert_eq!(num_in_flight_polls, 0); -} - -#[tokio::test] -async fn prioritized_peer_request_selection() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Ensure the properties hold for storage summary and version requests - let storage_summary_request = DataRequest::GetStorageServerSummary; - let get_version_request = DataRequest::GetServerProtocolVersion; - for data_request in [storage_summary_request, get_version_request] { - let storage_request = StorageServiceRequest::new(data_request, true); - - // Ensure no peers can service the request (we have no connections) - assert_matches!( - client.choose_peer_for_request(&storage_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Add a regular peer and verify the peer is selected as the recipient - let regular_peer_1 = mock_network.add_peer(false); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Add a priority peer and verify the peer is selected as the recipient - let priority_peer_1 = mock_network.add_peer(true); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(priority_peer_1) - ); - - // Disconnect the priority peer and verify the regular peer is now chosen - mock_network.disconnect_peer(priority_peer_1); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Connect a new priority peer and verify it is now selected - let priority_peer_2 = mock_network.add_peer(true); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(priority_peer_2) - ); - - // Disconnect the priority peer and verify the regular peer is again chosen - mock_network.disconnect_peer(priority_peer_2); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Disconnect the regular peer so that we no longer have any connections - mock_network.disconnect_peer(regular_peer_1); - } -} - -#[tokio::test] -async fn prioritized_peer_subscription_selection() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Create test data - let known_version = 10000000; - let known_epoch = 10; - - // Ensure the properties hold for both subscription requests - let new_transactions_request = - DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { - known_version, - known_epoch, - include_events: false, - }); - let new_outputs_request = - DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { - known_version, - known_epoch, - }); - for data_request in [new_transactions_request, new_outputs_request] { - let storage_request = StorageServiceRequest::new(data_request, true); - - // Ensure no peers can service the request (we have no connections) - assert_matches!( - client.choose_peer_for_request(&storage_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Add a regular peer and verify the peer cannot support the request - let regular_peer_1 = mock_network.add_peer(false); - assert_matches!( - client.choose_peer_for_request(&storage_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Advertise the data for the regular peer and verify it is now selected - client.update_summary(regular_peer_1, mock_storage_summary(known_version)); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Add a priority peer and verify the regular peer is selected - let priority_peer_1 = mock_network.add_peer(true); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Advertise the data for the priority peer and verify it is now selected - client.update_summary(priority_peer_1, mock_storage_summary(known_version)); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(priority_peer_1) - ); - - // Update the priority peer to be too far behind and verify it is not selected - client.update_summary( - priority_peer_1, - mock_storage_summary(known_version - OPTIMISTIC_FETCH_VERSION_DELTA), - ); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Update the regular peer to be too far behind and verify neither is selected - client.update_summary( - regular_peer_1, - mock_storage_summary(known_version - (OPTIMISTIC_FETCH_VERSION_DELTA * 2)), - ); - assert_matches!( - client.choose_peer_for_request(&storage_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Disconnect the regular peer and verify neither is selected - mock_network.disconnect_peer(regular_peer_1); - assert_matches!( - client.choose_peer_for_request(&storage_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Advertise the data for the priority peer and verify it is now selected again - client.update_summary(priority_peer_1, mock_storage_summary(known_version + 1000)); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(priority_peer_1) - ); - - // Disconnect the priority peer so that we no longer have any connections - mock_network.disconnect_peer(priority_peer_1); - } -} - -#[tokio::test] -async fn all_peer_request_selection() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Ensure no peers can service the given request (we have no connections) - let server_version_request = - StorageServiceRequest::new(DataRequest::GetServerProtocolVersion, true); - assert_matches!( - client.choose_peer_for_request(&server_version_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Add a regular peer and verify the peer is selected as the recipient - let regular_peer_1 = mock_network.add_peer(false); - assert_eq!( - client.choose_peer_for_request(&server_version_request), - Ok(regular_peer_1) - ); - - // Add two prioritized peers - let priority_peer_1 = mock_network.add_peer(true); - let priority_peer_2 = mock_network.add_peer(true); - - // Request data that is not being advertised and verify we get an error - let output_data_request = - DataRequest::GetTransactionOutputsWithProof(TransactionOutputsWithProofRequest { - proof_version: 100, - start_version: 0, - end_version: 100, - }); - let storage_request = StorageServiceRequest::new(output_data_request, false); - assert_matches!( - client.choose_peer_for_request(&storage_request), - Err(Error::DataIsUnavailable(_)) - ); - - // Advertise the data for the regular peer and verify it is now selected - client.update_summary(regular_peer_1, mock_storage_summary(100)); - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Advertise the data for the priority peer and verify the priority peer is selected - client.update_summary(priority_peer_2, mock_storage_summary(100)); - let peer_for_request = client.choose_peer_for_request(&storage_request).unwrap(); - assert_eq!(peer_for_request, priority_peer_2); - - // Reconnect priority peer 1 and remove the advertised data for priority peer 2 - mock_network.reconnect_peer(priority_peer_1); - client.update_summary(priority_peer_2, mock_storage_summary(0)); - - // Request the data again and verify the regular peer is chosen - assert_eq!( - client.choose_peer_for_request(&storage_request), - Ok(regular_peer_1) - ); - - // Advertise the data for priority peer 1 and verify the priority peer is selected - client.update_summary(priority_peer_1, mock_storage_summary(100)); - let peer_for_request = client.choose_peer_for_request(&storage_request).unwrap(); - assert_eq!(peer_for_request, priority_peer_1); - - // Advertise the data for priority peer 2 and verify either priority peer is selected - client.update_summary(priority_peer_2, mock_storage_summary(100)); - let peer_for_request = client.choose_peer_for_request(&storage_request).unwrap(); - assert!(peer_for_request == priority_peer_1 || peer_for_request == priority_peer_2); -} - -#[tokio::test] -async fn validator_peer_prioritization() { - ::aptos_logger::Logger::init_for_testing(); - - // Create a validator node - let base_config = BaseConfig { - role: RoleType::Validator, - ..Default::default() - }; - let (mut mock_network, _, client, _) = MockNetwork::new(Some(base_config), None, None); - - // Add a validator peer and ensure it's prioritized - let validator_peer = mock_network.add_peer_with_network_id(NetworkId::Validator, false); - let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); - assert_eq!(priority_peers, vec![validator_peer]); - assert_eq!(regular_peers, vec![]); - - // Add a vfn peer and ensure it's not prioritized - let vfn_peer = mock_network.add_peer_with_network_id(NetworkId::Vfn, true); - let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); - assert_eq!(priority_peers, vec![validator_peer]); - assert_eq!(regular_peers, vec![vfn_peer]); -} - -#[tokio::test] -async fn vfn_peer_prioritization() { - ::aptos_logger::Logger::init_for_testing(); - - // Create a validator fullnode - let base_config = BaseConfig { - role: RoleType::FullNode, - ..Default::default() - }; - let (mut mock_network, _, client, _) = MockNetwork::new(Some(base_config), None, None); - - // Add a validator peer and ensure it's prioritized - let validator_peer = mock_network.add_peer_with_network_id(NetworkId::Vfn, false); - let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); - assert_eq!(priority_peers, vec![validator_peer]); - assert_eq!(regular_peers, vec![]); - - // Add a pfn peer and ensure it's not prioritized - let pfn_peer = mock_network.add_peer_with_network_id(NetworkId::Public, true); - let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); - assert_eq!(priority_peers, vec![validator_peer]); - assert_eq!(regular_peers, vec![pfn_peer]); -} - -#[tokio::test] -async fn pfn_peer_prioritization() { - ::aptos_logger::Logger::init_for_testing(); - - // Create a public fullnode - let base_config = BaseConfig { - role: RoleType::FullNode, - ..Default::default() - }; - let (mut mock_network, _, client, _) = - MockNetwork::new(Some(base_config), None, Some(vec![NetworkId::Public])); - - // Add an inbound pfn peer and ensure it's not prioritized - let inbound_peer = mock_network.add_peer_with_network_id(NetworkId::Public, false); - let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); - assert_eq!(priority_peers, vec![]); - assert_eq!(regular_peers, vec![inbound_peer]); - - // Add an outbound pfn peer and ensure it's prioritized - let outbound_peer = mock_network.add_peer_with_network_id(NetworkId::Public, true); - let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); - assert_eq!(priority_peers, vec![outbound_peer]); - assert_eq!(regular_peers, vec![inbound_peer]); -} - -// 1. 2 peers -// 2. one advertises bad range, one advertises honest range -// 3. sending a bunch of requests to the bad range (which will always go to the -// bad peer) should lower bad peer's score -// 4. eventually bad peer score should hit threshold and we err with no available -#[tokio::test] -async fn bad_peer_is_eventually_banned_internal() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - let good_peer = mock_network.add_peer(true); - let bad_peer = mock_network.add_peer(true); - - // Bypass poller and just add the storage summaries directly. - - // Good peer advertises txns 0 -> 100. - client.update_summary(good_peer, mock_storage_summary(100)); - // Bad peer advertises txns 0 -> 200 (but can't actually service). - client.update_summary(bad_peer, mock_storage_summary(200)); - client.update_global_summary_cache().unwrap(); - - // The global summary should contain the bad peer's advertisement. - let global_summary = client.get_global_data_summary(); - assert!(global_summary - .advertised_data - .transactions - .contains(&CompleteDataRange::new(0, 200).unwrap())); - - // Spawn a handler for both peers. - tokio::spawn(async move { - while let Some(network_request) = mock_network.next_request().await { - let peer_network_id = network_request.peer_network_id; - let response_sender = network_request.response_sender; - if peer_network_id == good_peer { - // Good peer responds with good response. - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - response_sender.send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); - } else if peer_network_id == bad_peer { - // Bad peer responds with error. - response_sender.send(Err(StorageServiceError::InternalError("".to_string()))); - } - } - }); - - let mut seen_data_unavailable_err = false; - - // Sending a bunch of requests to the bad peer's upper range will fail. - let request_timeout = client.get_response_timeout_ms(); - for _ in 0..20 { - let result = client - .get_transactions_with_proof(200, 200, 200, false, request_timeout) - .await; - - // While the score is still decreasing, we should see a bunch of - // InternalError's. Once we see a `DataIsUnavailable` error, we should - // only see that error. - if !seen_data_unavailable_err { - assert_err!(&result); - if let Err(Error::DataIsUnavailable(_)) = result { - seen_data_unavailable_err = true; - } - } else { - assert_matches!(result, Err(Error::DataIsUnavailable(_))); - } - } - - // Peer should eventually get ignored and we should consider this request - // range unserviceable. - assert!(seen_data_unavailable_err); - - // The global summary should no longer contain the bad peer's advertisement. - client.update_global_summary_cache().unwrap(); - let global_summary = client.get_global_data_summary(); - assert!(!global_summary - .advertised_data - .transactions - .contains(&CompleteDataRange::new(0, 200).unwrap())); - - // We should still be able to send the good peer a request. - let response = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap(); - assert_eq!(response.payload, TransactionListWithProof::new_empty()); -} - -#[tokio::test] -async fn bad_peer_is_eventually_banned_callback() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - let bad_peer = mock_network.add_peer(true); - - // Bypass poller and just add the storage summaries directly. - // Bad peer advertises txns 0 -> 200 (but can't actually service). - client.update_summary(bad_peer, mock_storage_summary(200)); - client.update_global_summary_cache().unwrap(); - - // Spawn a handler for both peers. - tokio::spawn(async move { - while let Some(network_request) = mock_network.next_request().await { - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); - } - }); - - let mut seen_data_unavailable_err = false; - - // Sending a bunch of requests to the bad peer (that we later decide are bad). - let request_timeout = client.get_response_timeout_ms(); - for _ in 0..20 { - let result = client - .get_transactions_with_proof(200, 200, 200, false, request_timeout) - .await; - - // While the score is still decreasing, we should see a bunch of - // InternalError's. Once we see a `DataIsUnavailable` error, we should - // only see that error. - if !seen_data_unavailable_err { - match result { - Ok(response) => { - response.context.response_callback.notify_bad_response( - crate::interface::ResponseError::ProofVerificationError, - ); - }, - Err(Error::DataIsUnavailable(_)) => { - seen_data_unavailable_err = true; - }, - Err(_) => panic!("unexpected result: {:?}", result), - } - } else { - assert_matches!(result, Err(Error::DataIsUnavailable(_))); - } - } - - // Peer should eventually get ignored and we should consider this request - // range unserviceable. - assert!(seen_data_unavailable_err); - - // The global summary should no longer contain the bad peer's advertisement. - client.update_global_summary_cache().unwrap(); - let global_summary = client.get_global_data_summary(); - assert!(!global_summary - .advertised_data - .transactions - .contains(&CompleteDataRange::new(0, 200).unwrap())); -} - -#[tokio::test] -async fn compression_mismatch_disabled() { - ::aptos_logger::Logger::init_for_testing(); - - // Disable compression - let data_client_config = AptosDataClientConfig { - use_compression: false, - ..Default::default() - }; - let (mut mock_network, mock_time, client, poller) = - MockNetwork::new(None, Some(data_client_config), None); - - tokio::spawn(poller.start_poller()); - - // Add a connected peer - let _ = mock_network.add_peer(true); - - // Advance time so the poller sends a data summary request - tokio::task::yield_now().await; - mock_time.advance_async(Duration::from_millis(1_000)).await; - - // Receive their request and respond - let network_request = mock_network.next_request().await.unwrap(); - let data_response = DataResponse::StorageServerSummary(mock_storage_summary(200)); - network_request.response_sender.send(Ok( - StorageServiceResponse::new(data_response, false).unwrap() - )); - - // Let the poller finish processing the response - tokio::task::yield_now().await; - - // Handle the client's transactions request using compression - tokio::spawn(async move { - let network_request = mock_network.next_request().await.unwrap(); - assert!(!network_request.storage_service_request.use_compression); - - // Compress the response - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - let storage_response = StorageServiceResponse::new(data_response, true).unwrap(); - network_request.response_sender.send(Ok(storage_response)); - }); - - // The client should receive a compressed response and return an error - let request_timeout = client.get_response_timeout_ms(); - let response = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap_err(); - assert_matches!(response, Error::InvalidResponse(_)); -} - -#[tokio::test] -async fn compression_mismatch_enabled() { - ::aptos_logger::Logger::init_for_testing(); - - // Enable compression - let data_client_config = AptosDataClientConfig { - use_compression: true, - ..Default::default() - }; - let (mut mock_network, mock_time, client, poller) = - MockNetwork::new(None, Some(data_client_config), None); - - tokio::spawn(poller.start_poller()); - - // Add a connected peer - let _ = mock_network.add_peer(true); - - // Advance time so the poller sends a data summary request - tokio::task::yield_now().await; - mock_time.advance_async(Duration::from_millis(1_000)).await; - - // Receive their request and respond - let network_request = mock_network.next_request().await.unwrap(); - let data_response = DataResponse::StorageServerSummary(mock_storage_summary(200)); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); - - // Let the poller finish processing the response - tokio::task::yield_now().await; - - // Handle the client's transactions request without compression - tokio::spawn(async move { - let network_request = mock_network.next_request().await.unwrap(); - assert!(network_request.storage_service_request.use_compression); - - // Compress the response - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - let storage_response = StorageServiceResponse::new(data_response, false).unwrap(); - network_request.response_sender.send(Ok(storage_response)); - }); - - // The client should receive a compressed response and return an error - let request_timeout = client.get_response_timeout_ms(); - let response = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap_err(); - assert_matches!(response, Error::InvalidResponse(_)); -} - -#[tokio::test] -async fn disable_compression() { - ::aptos_logger::Logger::init_for_testing(); - - // Disable compression - let data_client_config = AptosDataClientConfig { - use_compression: false, - ..Default::default() - }; - let (mut mock_network, mock_time, client, poller) = - MockNetwork::new(None, Some(data_client_config), None); - - tokio::spawn(poller.start_poller()); - - // Add a connected peer - let expected_peer = mock_network.add_peer(true); - - // Advance time so the poller sends a data summary request - tokio::task::yield_now().await; - mock_time.advance_async(Duration::from_millis(1_000)).await; - - // Receive their request - let network_request = mock_network.next_request().await.unwrap(); - assert_eq!(network_request.peer_network_id, expected_peer); - assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); - assert!(!network_request.storage_service_request.use_compression); - assert_matches!( - network_request.storage_service_request.data_request, - DataRequest::GetStorageServerSummary - ); - - // Fulfill their request - let data_response = DataResponse::StorageServerSummary(mock_storage_summary(200)); - network_request.response_sender.send(Ok( - StorageServiceResponse::new(data_response, false).unwrap() - )); - - // Let the poller finish processing the response - tokio::task::yield_now().await; - - // Handle the client's transactions request - tokio::spawn(async move { - let network_request = mock_network.next_request().await.unwrap(); - - assert_eq!(network_request.peer_network_id, expected_peer); - assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); - assert!(!network_request.storage_service_request.use_compression); - assert_matches!( - network_request.storage_service_request.data_request, - DataRequest::GetTransactionsWithProof(TransactionsWithProofRequest { - start_version: 50, - end_version: 100, - proof_version: 100, - include_events: false, - }) - ); - - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - let storage_response = StorageServiceResponse::new(data_response, false).unwrap(); - network_request.response_sender.send(Ok(storage_response)); - }); - - // The client's request should succeed since a peer finally has advertised - // data for this range. - let request_timeout = client.get_response_timeout_ms(); - let response = client - .get_transactions_with_proof(100, 50, 100, false, request_timeout) - .await - .unwrap(); - assert_eq!(response.payload, TransactionListWithProof::new_empty()); -} - -#[tokio::test(flavor = "multi_thread")] -async fn disconnected_peers_garbage_collection() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); - - // Connect several peers - let priority_peer_1 = mock_network.add_peer(true); - let priority_peer_2 = mock_network.add_peer(true); - let priority_peer_3 = mock_network.add_peer(true); - - // Poll all of the peers to initialize the peer states - let all_peers = vec![priority_peer_1, priority_peer_2, priority_peer_3]; - poll_peers(&mut mock_network, &client, all_peers.clone()).await; - - // Verify we have peer states for all peers - verify_peer_states(&client, all_peers.clone()); - - // Disconnect priority peer 1 and update the global data summary - mock_network.disconnect_peer(priority_peer_1); - client.update_global_summary_cache().unwrap(); - - // Verify we have peer states for only the remaining peers - verify_peer_states(&client, vec![priority_peer_2, priority_peer_3]); - - // Disconnect priority peer 2 and update the global data summary - mock_network.disconnect_peer(priority_peer_2); - client.update_global_summary_cache().unwrap(); - - // Verify we have peer states for only priority peer 3 - verify_peer_states(&client, vec![priority_peer_3]); - - // Reconnect priority peer 1, poll it and update the global data summary - mock_network.reconnect_peer(priority_peer_1); - poll_peers(&mut mock_network, &client, vec![priority_peer_1]).await; - client.update_global_summary_cache().unwrap(); - - // Verify we have peer states for priority peer 1 and 3 - verify_peer_states(&client, vec![priority_peer_1, priority_peer_3]); - - // Reconnect priority peer 2, poll it and update the global data summary - mock_network.reconnect_peer(priority_peer_2); - poll_peers(&mut mock_network, &client, vec![priority_peer_2]).await; - client.update_global_summary_cache().unwrap(); - - // Verify we have peer states for all peers - verify_peer_states(&client, all_peers); -} - -#[tokio::test] -async fn bad_peer_is_eventually_added_back() { - ::aptos_logger::Logger::init_for_testing(); - let (mut mock_network, mock_time, client, poller) = MockNetwork::new(None, None, None); - - // Add a connected peer. - mock_network.add_peer(true); - - tokio::spawn(poller.start_poller()); - tokio::spawn(async move { - while let Some(network_request) = mock_network.next_request().await { - match network_request.storage_service_request.data_request { - DataRequest::GetTransactionsWithProof(_) => { - let data_response = - DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new( - data_response, - network_request.storage_service_request.use_compression, - ) - .unwrap())); - }, - DataRequest::GetStorageServerSummary => { - let data_response = - DataResponse::StorageServerSummary(mock_storage_summary(200)); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new( - data_response, - network_request.storage_service_request.use_compression, - ) - .unwrap())); - }, - _ => panic!( - "Unexpected storage request: {:?}", - network_request.storage_service_request - ), - } - } - }); - - // Advance time so the poller sends data summary requests. - let summary_poll_interval = Duration::from_millis(1_000); - for _ in 0..2 { - tokio::task::yield_now().await; - mock_time.advance_async(summary_poll_interval).await; - } - - // Initially this request range is serviceable by this peer. - let global_summary = client.get_global_data_summary(); - assert!(global_summary - .advertised_data - .transactions - .contains(&CompleteDataRange::new(0, 200).unwrap())); - - // Keep decreasing this peer's score by considering its responses bad. - // Eventually its score drops below IGNORE_PEER_THRESHOLD. - let request_timeout = client.get_response_timeout_ms(); - for _ in 0..20 { - let result = client - .get_transactions_with_proof(200, 0, 200, false, request_timeout) - .await; - - if let Ok(response) = result { - response - .context - .response_callback - .notify_bad_response(crate::interface::ResponseError::ProofVerificationError); - } - } - - // Peer is eventually ignored and this request range unserviceable. - client.update_global_summary_cache().unwrap(); - let global_summary = client.get_global_data_summary(); - assert!(!global_summary - .advertised_data - .transactions - .contains(&CompleteDataRange::new(0, 200).unwrap())); - - // This peer still responds to the StorageServerSummary requests. - // Its score keeps increasing and this peer is eventually added back. - for _ in 0..20 { - mock_time.advance_async(summary_poll_interval).await; - } - - let global_summary = client.get_global_data_summary(); - assert!(global_summary - .advertised_data - .transactions - .contains(&CompleteDataRange::new(0, 200).unwrap())); -} - -#[tokio::test] -async fn optimal_chunk_size_calculations() { - // Create a test storage service config - let max_epoch_chunk_size = 600; - let max_state_chunk_size = 500; - let max_transaction_chunk_size = 700; - let max_transaction_output_chunk_size = 800; - let data_client_config = AptosDataClientConfig { - max_epoch_chunk_size, - max_state_chunk_size, - max_transaction_chunk_size, - max_transaction_output_chunk_size, - ..Default::default() - }; - - // Test median calculations - let optimal_chunk_sizes = calculate_optimal_chunk_sizes( - &data_client_config, - vec![7, 5, 6, 8, 10], - vec![100, 200, 300, 100], - vec![900, 700, 500], - vec![40], - ); - assert_eq!(200, optimal_chunk_sizes.state_chunk_size); - assert_eq!(7, optimal_chunk_sizes.epoch_chunk_size); - assert_eq!(700, optimal_chunk_sizes.transaction_chunk_size); - assert_eq!(40, optimal_chunk_sizes.transaction_output_chunk_size); - - // Test no advertised data - let optimal_chunk_sizes = - calculate_optimal_chunk_sizes(&data_client_config, vec![], vec![], vec![], vec![]); - assert_eq!(max_state_chunk_size, optimal_chunk_sizes.state_chunk_size); - assert_eq!(max_epoch_chunk_size, optimal_chunk_sizes.epoch_chunk_size); - assert_eq!( - max_transaction_chunk_size, - optimal_chunk_sizes.transaction_chunk_size - ); - assert_eq!( - max_transaction_output_chunk_size, - optimal_chunk_sizes.transaction_output_chunk_size - ); - - // Verify the config caps the amount of chunks - let optimal_chunk_sizes = calculate_optimal_chunk_sizes( - &data_client_config, - vec![70, 50, 60, 80, 100], - vec![1000, 1000, 2000, 3000], - vec![9000, 7000, 5000], - vec![400], - ); - assert_eq!(max_state_chunk_size, optimal_chunk_sizes.state_chunk_size); - assert_eq!(70, optimal_chunk_sizes.epoch_chunk_size); - assert_eq!( - max_transaction_chunk_size, - optimal_chunk_sizes.transaction_chunk_size - ); - assert_eq!(400, optimal_chunk_sizes.transaction_output_chunk_size); -} - -/// A helper method that fetches peers to poll depending on the peer priority -fn fetch_peer_to_poll( - client: AptosDataClient, - is_priority_peer: bool, -) -> Result, Error> { - // Fetch the next peer to poll - let result = if is_priority_peer { - client.fetch_prioritized_peer_to_poll() - } else { - client.fetch_regular_peer_to_poll() - }; - - // If we get a peer, mark the peer as having an in-flight request - if let Ok(Some(peer_to_poll)) = result { - client.in_flight_request_started(&peer_to_poll); - } - - result -} - -/// Fetches the number of in flight requests for peers depending on priority -fn get_num_in_flight_polls(client: AptosDataClient, is_priority_peer: bool) -> u64 { - if is_priority_peer { - client.get_peer_states().num_in_flight_priority_polls() - } else { - client.get_peer_states().num_in_flight_regular_polls() - } -} - -/// A simple helper function that polls all the specified peers -/// and returns storage server summaries for each. -async fn poll_peers( - mock_network: &mut MockNetwork, - client: &AptosDataClient, - all_peers: Vec, -) { - for peer in all_peers { - // Poll the peer - let handle = poll_peer(client.clone(), peer, None); - - // Respond to the poll request - let network_request = mock_network.next_request().await.unwrap(); - let data_response = DataResponse::StorageServerSummary(StorageServerSummary::default()); - network_request - .response_sender - .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); - - // Wait for the poll to complete - handle.await.unwrap(); - } -} - -/// Verifies the exclusive existence of peer states for all the specified peers -fn verify_peer_states(client: &AptosDataClient, all_peers: Vec) { - let peer_to_states = client.get_peer_states().get_peer_to_states(); - for peer in &all_peers { - assert!(peer_to_states.contains_key(peer)); - } - assert_eq!(peer_to_states.len(), all_peers.len()); -} diff --git a/state-sync/aptos-data-client/src/tests/advertise.rs b/state-sync/aptos-data-client/src/tests/advertise.rs new file mode 100644 index 0000000000000..772a1bc49086f --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/advertise.rs @@ -0,0 +1,159 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + error::Error, + interface::AptosDataClientInterface, + peer_states::calculate_optimal_chunk_sizes, + tests::{mock::MockNetwork, utils}, +}; +use aptos_config::config::AptosDataClientConfig; +use aptos_network::protocols::wire::handshake::v1::ProtocolId; +use aptos_storage_service_types::{ + requests::{DataRequest, TransactionsWithProofRequest}, + responses::{DataResponse, StorageServiceResponse}, +}; +use aptos_types::transaction::TransactionListWithProof; +use claims::assert_matches; +use std::time::Duration; + +#[tokio::test] +async fn request_works_only_when_data_available() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, mock_time, client, poller) = MockNetwork::new(None, None, None); + + tokio::spawn(poller.start_poller()); + + // This request should fail because no peers are currently connected + let request_timeout = client.get_response_timeout_ms(); + let error = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap_err(); + assert_matches!(error, Error::DataIsUnavailable(_)); + + // Add a connected peer + let expected_peer = mock_network.add_peer(true); + + // Requesting some txns now will still fail since no peers are advertising + // availability for the desired range. + let error = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap_err(); + assert_matches!(error, Error::DataIsUnavailable(_)); + + // Advance time so the poller sends a data summary request + tokio::task::yield_now().await; + mock_time.advance_async(Duration::from_millis(1_000)).await; + + // Receive their request and fulfill it + let network_request = mock_network.next_request().await.unwrap(); + assert_eq!(network_request.peer_network_id, expected_peer); + assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); + assert!(network_request.storage_service_request.use_compression); + assert_matches!( + network_request.storage_service_request.data_request, + DataRequest::GetStorageServerSummary + ); + + let summary = utils::create_storage_summary(200); + let data_response = DataResponse::StorageServerSummary(summary); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); + + // Let the poller finish processing the response + tokio::task::yield_now().await; + + // Handle the client's transactions request + tokio::spawn(async move { + let network_request = mock_network.next_request().await.unwrap(); + + assert_eq!(network_request.peer_network_id, expected_peer); + assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); + assert!(network_request.storage_service_request.use_compression); + assert_matches!( + network_request.storage_service_request.data_request, + DataRequest::GetTransactionsWithProof(TransactionsWithProofRequest { + start_version: 50, + end_version: 100, + proof_version: 100, + include_events: false, + }) + ); + + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); + }); + + // The client's request should succeed since a peer finally has advertised + // data for this range. + let response = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap(); + assert_eq!(response.payload, TransactionListWithProof::new_empty()); +} + +#[tokio::test] +async fn optimal_chunk_size_calculations() { + // Create a test storage service config + let max_epoch_chunk_size = 600; + let max_state_chunk_size = 500; + let max_transaction_chunk_size = 700; + let max_transaction_output_chunk_size = 800; + let data_client_config = AptosDataClientConfig { + max_epoch_chunk_size, + max_state_chunk_size, + max_transaction_chunk_size, + max_transaction_output_chunk_size, + ..Default::default() + }; + + // Test median calculations + let optimal_chunk_sizes = calculate_optimal_chunk_sizes( + &data_client_config, + vec![7, 5, 6, 8, 10], + vec![100, 200, 300, 100], + vec![900, 700, 500], + vec![40], + ); + assert_eq!(200, optimal_chunk_sizes.state_chunk_size); + assert_eq!(7, optimal_chunk_sizes.epoch_chunk_size); + assert_eq!(700, optimal_chunk_sizes.transaction_chunk_size); + assert_eq!(40, optimal_chunk_sizes.transaction_output_chunk_size); + + // Test no advertised data + let optimal_chunk_sizes = + calculate_optimal_chunk_sizes(&data_client_config, vec![], vec![], vec![], vec![]); + assert_eq!(max_state_chunk_size, optimal_chunk_sizes.state_chunk_size); + assert_eq!(max_epoch_chunk_size, optimal_chunk_sizes.epoch_chunk_size); + assert_eq!( + max_transaction_chunk_size, + optimal_chunk_sizes.transaction_chunk_size + ); + assert_eq!( + max_transaction_output_chunk_size, + optimal_chunk_sizes.transaction_output_chunk_size + ); + + // Verify the config caps the amount of chunks + let optimal_chunk_sizes = calculate_optimal_chunk_sizes( + &data_client_config, + vec![70, 50, 60, 80, 100], + vec![1000, 1000, 2000, 3000], + vec![9000, 7000, 5000], + vec![400], + ); + assert_eq!(max_state_chunk_size, optimal_chunk_sizes.state_chunk_size); + assert_eq!(70, optimal_chunk_sizes.epoch_chunk_size); + assert_eq!( + max_transaction_chunk_size, + optimal_chunk_sizes.transaction_chunk_size + ); + assert_eq!(400, optimal_chunk_sizes.transaction_output_chunk_size); +} diff --git a/state-sync/aptos-data-client/src/tests/compression.rs b/state-sync/aptos-data-client/src/tests/compression.rs new file mode 100644 index 0000000000000..b073610143de9 --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/compression.rs @@ -0,0 +1,194 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + error::Error, + interface::AptosDataClientInterface, + tests::{mock::MockNetwork, utils}, +}; +use aptos_config::config::AptosDataClientConfig; +use aptos_network::protocols::wire::handshake::v1::ProtocolId; +use aptos_storage_service_types::{ + requests::{DataRequest, TransactionsWithProofRequest}, + responses::{DataResponse, StorageServiceResponse}, +}; +use aptos_types::transaction::TransactionListWithProof; +use claims::assert_matches; +use std::time::Duration; + +#[tokio::test] +async fn compression_mismatch_disabled() { + ::aptos_logger::Logger::init_for_testing(); + + // Disable compression + let data_client_config = AptosDataClientConfig { + use_compression: false, + ..Default::default() + }; + let (mut mock_network, mock_time, client, poller) = + MockNetwork::new(None, Some(data_client_config), None); + + tokio::spawn(poller.start_poller()); + + // Add a connected peer + let _ = mock_network.add_peer(true); + + // Advance time so the poller sends a data summary request + tokio::task::yield_now().await; + mock_time.advance_async(Duration::from_millis(1_000)).await; + + // Receive their request and respond + let network_request = mock_network.next_request().await.unwrap(); + let data_response = DataResponse::StorageServerSummary(utils::create_storage_summary(200)); + network_request.response_sender.send(Ok( + StorageServiceResponse::new(data_response, false).unwrap() + )); + + // Let the poller finish processing the response + tokio::task::yield_now().await; + + // Handle the client's transactions request using compression + tokio::spawn(async move { + let network_request = mock_network.next_request().await.unwrap(); + assert!(!network_request.storage_service_request.use_compression); + + // Compress the response + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + let storage_response = StorageServiceResponse::new(data_response, true).unwrap(); + network_request.response_sender.send(Ok(storage_response)); + }); + + // The client should receive a compressed response and return an error + let request_timeout = client.get_response_timeout_ms(); + let response = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap_err(); + assert_matches!(response, Error::InvalidResponse(_)); +} + +#[tokio::test] +async fn compression_mismatch_enabled() { + ::aptos_logger::Logger::init_for_testing(); + + // Enable compression + let data_client_config = AptosDataClientConfig { + use_compression: true, + ..Default::default() + }; + let (mut mock_network, mock_time, client, poller) = + MockNetwork::new(None, Some(data_client_config), None); + + tokio::spawn(poller.start_poller()); + + // Add a connected peer + let _ = mock_network.add_peer(true); + + // Advance time so the poller sends a data summary request + tokio::task::yield_now().await; + mock_time.advance_async(Duration::from_millis(1_000)).await; + + // Receive their request and respond + let network_request = mock_network.next_request().await.unwrap(); + let data_response = DataResponse::StorageServerSummary(utils::create_storage_summary(200)); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); + + // Let the poller finish processing the response + tokio::task::yield_now().await; + + // Handle the client's transactions request without compression + tokio::spawn(async move { + let network_request = mock_network.next_request().await.unwrap(); + assert!(network_request.storage_service_request.use_compression); + + // Compress the response + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + let storage_response = StorageServiceResponse::new(data_response, false).unwrap(); + network_request.response_sender.send(Ok(storage_response)); + }); + + // The client should receive a compressed response and return an error + let request_timeout = client.get_response_timeout_ms(); + let response = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap_err(); + assert_matches!(response, Error::InvalidResponse(_)); +} + +#[tokio::test] +async fn disable_compression() { + ::aptos_logger::Logger::init_for_testing(); + + // Disable compression + let data_client_config = AptosDataClientConfig { + use_compression: false, + ..Default::default() + }; + let (mut mock_network, mock_time, client, poller) = + MockNetwork::new(None, Some(data_client_config), None); + + tokio::spawn(poller.start_poller()); + + // Add a connected peer + let expected_peer = mock_network.add_peer(true); + + // Advance time so the poller sends a data summary request + tokio::task::yield_now().await; + mock_time.advance_async(Duration::from_millis(1_000)).await; + + // Receive their request + let network_request = mock_network.next_request().await.unwrap(); + assert_eq!(network_request.peer_network_id, expected_peer); + assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); + assert!(!network_request.storage_service_request.use_compression); + assert_matches!( + network_request.storage_service_request.data_request, + DataRequest::GetStorageServerSummary + ); + + // Fulfill their request + let data_response = DataResponse::StorageServerSummary(utils::create_storage_summary(200)); + network_request.response_sender.send(Ok( + StorageServiceResponse::new(data_response, false).unwrap() + )); + + // Let the poller finish processing the response + tokio::task::yield_now().await; + + // Handle the client's transactions request + tokio::spawn(async move { + let network_request = mock_network.next_request().await.unwrap(); + + assert_eq!(network_request.peer_network_id, expected_peer); + assert_eq!(network_request.protocol_id, ProtocolId::StorageServiceRpc); + assert!(!network_request.storage_service_request.use_compression); + assert_matches!( + network_request.storage_service_request.data_request, + DataRequest::GetTransactionsWithProof(TransactionsWithProofRequest { + start_version: 50, + end_version: 100, + proof_version: 100, + include_events: false, + }) + ); + + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + let storage_response = StorageServiceResponse::new(data_response, false).unwrap(); + network_request.response_sender.send(Ok(storage_response)); + }); + + // The client's request should succeed since a peer finally has advertised + // data for this range. + let request_timeout = client.get_response_timeout_ms(); + let response = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap(); + assert_eq!(response.payload, TransactionListWithProof::new_empty()); +} diff --git a/state-sync/aptos-data-client/src/tests/mock.rs b/state-sync/aptos-data-client/src/tests/mock.rs new file mode 100644 index 0000000000000..9d8b20c35470f --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/mock.rs @@ -0,0 +1,288 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + client::AptosDataClient, + error::Result, + global_summary::GlobalDataSummary, + interface::{AptosDataClientInterface, Response}, + poller::DataSummaryPoller, +}; +use aptos_channels::{aptos_channel, message_queues::QueueStyle}; +use aptos_config::{ + config::{AptosDataClientConfig, BaseConfig}, + network_id::{NetworkId, PeerNetworkId}, +}; +use aptos_netcore::transport::ConnectionOrigin; +use aptos_network::{ + application::{interface::NetworkClient, metadata::ConnectionState, storage::PeersAndMetadata}, + peer_manager::{ConnectionRequestSender, PeerManagerRequest, PeerManagerRequestSender}, + protocols::{ + network::{NetworkSender, NewNetworkSender}, + wire::handshake::v1::ProtocolId, + }, + transport::ConnectionMetadata, +}; +use aptos_storage_interface::DbReader; +use aptos_storage_service_client::StorageServiceClient; +use aptos_storage_service_server::network::{NetworkRequest, ResponseSender}; +use aptos_storage_service_types::{ + responses::TransactionOrOutputListWithProof, Epoch, StorageServiceMessage, +}; +use aptos_time_service::{MockTimeService, TimeService}; +use aptos_types::{ + ledger_info::LedgerInfoWithSignatures, + state_store::state_value::StateValueChunkWithProof, + transaction::{TransactionListWithProof, TransactionOutputListWithProof, Version}, + PeerId, +}; +use async_trait::async_trait; +use futures::StreamExt; +use maplit::hashmap; +use mockall::mock; +use std::sync::Arc; + +/// A simple mock network for testing the data client +pub struct MockNetwork { + network_id: NetworkId, + peer_mgr_reqs_rx: aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, + peers_and_metadata: Arc, +} + +impl MockNetwork { + pub fn new( + base_config: Option, + data_client_config: Option, + networks: Option>, + ) -> (Self, MockTimeService, AptosDataClient, DataSummaryPoller) { + // Setup the request managers + let queue_cfg = aptos_channel::Config::new(10).queue_style(QueueStyle::FIFO); + let (peer_mgr_reqs_tx, peer_mgr_reqs_rx) = queue_cfg.build(); + let (connection_reqs_tx, _connection_reqs_rx) = queue_cfg.build(); + + // Setup the network client + let network_sender = NetworkSender::new( + PeerManagerRequestSender::new(peer_mgr_reqs_tx), + ConnectionRequestSender::new(connection_reqs_tx), + ); + let networks = networks + .unwrap_or_else(|| vec![NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]); + let peers_and_metadata = PeersAndMetadata::new(&networks); + let client_network_id = NetworkId::Validator; + let network_client = NetworkClient::new( + vec![], + vec![ProtocolId::StorageServiceRpc], + hashmap! { + client_network_id => network_sender}, + peers_and_metadata.clone(), + ); + + // Create a storage service client + let storage_service_client = StorageServiceClient::new(network_client); + + // Create an aptos data client + let mock_time = TimeService::mock(); + let base_config = base_config.unwrap_or_default(); + let data_client_config = data_client_config.unwrap_or_default(); + let (client, poller) = AptosDataClient::new( + data_client_config, + base_config, + mock_time.clone(), + create_mock_db_reader(), + storage_service_client, + None, + ); + + // Create the mock network + let mock_network = Self { + network_id: client_network_id, + peer_mgr_reqs_rx, + peers_and_metadata, + }; + + (mock_network, mock_time.into_mock(), client, poller) + } + + /// Add a new peer to the network peer DB + pub fn add_peer(&mut self, priority: bool) -> PeerNetworkId { + // Get the network id + let network_id = if priority { + NetworkId::Validator + } else { + NetworkId::Public + }; + self.add_peer_with_network_id(network_id, false) + } + + /// Add a new peer to the network peer DB with the specified network + pub fn add_peer_with_network_id( + &mut self, + network_id: NetworkId, + outbound_connection: bool, + ) -> PeerNetworkId { + // Create a new peer + let peer_id = PeerId::random(); + let peer_network_id = PeerNetworkId::new(network_id, peer_id); + + // Create and save a new connection metadata + let mut connection_metadata = ConnectionMetadata::mock(peer_id); + connection_metadata.origin = if outbound_connection { + ConnectionOrigin::Outbound + } else { + ConnectionOrigin::Inbound + }; + connection_metadata + .application_protocols + .insert(ProtocolId::StorageServiceRpc); + self.peers_and_metadata + .insert_connection_metadata(peer_network_id, connection_metadata) + .unwrap(); + + // Return the new peer + peer_network_id + } + + /// Disconnects the peer in the network peer DB + pub fn disconnect_peer(&mut self, peer: PeerNetworkId) { + self.update_peer_state(peer, ConnectionState::Disconnected); + } + + /// Reconnects the peer in the network peer DB + pub fn reconnect_peer(&mut self, peer: PeerNetworkId) { + self.update_peer_state(peer, ConnectionState::Connected); + } + + /// Updates the state of the given peer + fn update_peer_state(&mut self, peer: PeerNetworkId, state: ConnectionState) { + self.peers_and_metadata + .update_connection_state(peer, state) + .unwrap(); + } + + /// Get the next request sent from the client + pub async fn next_request(&mut self) -> Option { + match self.peer_mgr_reqs_rx.next().await { + Some(PeerManagerRequest::SendRpc(peer_id, network_request)) => { + let peer_network_id = PeerNetworkId::new(self.network_id, peer_id); + let protocol_id = network_request.protocol_id; + let data = network_request.data; + let res_tx = network_request.res_tx; + + let message: StorageServiceMessage = bcs::from_bytes(data.as_ref()).unwrap(); + let storage_service_request = match message { + StorageServiceMessage::Request(request) => request, + _ => panic!("unexpected: {:?}", message), + }; + let response_sender = ResponseSender::new(res_tx); + + Some(NetworkRequest { + peer_network_id, + protocol_id, + storage_service_request, + response_sender, + }) + }, + Some(PeerManagerRequest::SendDirectSend(_, _)) => panic!("Unexpected direct send msg"), + None => None, + } + } +} + +/// Creates a mock data client for testing +pub fn create_mock_data_client() -> Arc { + Arc::new(MockAptosDataClient::new()) +} + +// This automatically creates a MockAptosDataClient +mock! { + pub AptosDataClient {} + + #[async_trait] + impl AptosDataClientInterface for AptosDataClient { + fn get_global_data_summary(&self) -> GlobalDataSummary; + + async fn get_epoch_ending_ledger_infos( + &self, + start_epoch: Epoch, + expected_end_epoch: Epoch, + request_timeout_ms: u64, + ) -> Result>>; + + async fn get_new_transaction_outputs_with_proof( + &self, + known_version: Version, + known_epoch: Epoch, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_new_transactions_with_proof( + &self, + known_version: Version, + known_epoch: Epoch, + include_events: bool, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_new_transactions_or_outputs_with_proof( + &self, + known_version: Version, + known_epoch: Epoch, + include_events: bool, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_number_of_states( + &self, + version: Version, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_state_values_with_proof( + &self, + version: u64, + start_index: u64, + end_index: u64, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_transaction_outputs_with_proof( + &self, + proof_version: Version, + start_version: Version, + end_version: Version, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_transactions_with_proof( + &self, + proof_version: Version, + start_version: Version, + end_version: Version, + include_events: bool, + request_timeout_ms: u64, + ) -> Result>; + + async fn get_transactions_or_outputs_with_proof( + &self, + proof_version: Version, + start_version: Version, + end_version: Version, + include_events: bool, + request_timeout_ms: u64, + ) -> Result>; + } +} + +/// Creates a mock database reader for testing +pub fn create_mock_db_reader() -> Arc { + Arc::new(MockDatabaseReader {}) +} + +/// A simple mock database reader that only implements +/// the functions required by the tests. +pub struct MockDatabaseReader {} +impl DbReader for MockDatabaseReader { + fn get_block_timestamp(&self, version: Version) -> anyhow::Result { + Ok(version * 100_000) + } +} diff --git a/state-sync/aptos-data-client/src/tests/mod.rs b/state-sync/aptos-data-client/src/tests/mod.rs new file mode 100644 index 0000000000000..db4878a6f8456 --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/mod.rs @@ -0,0 +1,10 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod advertise; +mod compression; +pub mod mock; +mod peers; +mod poller; +mod priority; +mod utils; diff --git a/state-sync/aptos-data-client/src/tests/peers.rs b/state-sync/aptos-data-client/src/tests/peers.rs new file mode 100644 index 0000000000000..b9424bd84dc11 --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/peers.rs @@ -0,0 +1,339 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + client::AptosDataClient, + error::Error, + interface::AptosDataClientInterface, + poller::poll_peer, + tests::{mock::MockNetwork, utils}, +}; +use aptos_config::network_id::PeerNetworkId; +use aptos_storage_service_types::{ + requests::DataRequest, + responses::{CompleteDataRange, DataResponse, StorageServerSummary, StorageServiceResponse}, + StorageServiceError, +}; +use aptos_types::transaction::TransactionListWithProof; +use claims::{assert_err, assert_matches}; +use std::time::Duration; + +#[tokio::test] +async fn bad_peer_is_eventually_banned_internal() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + let good_peer = mock_network.add_peer(true); + let bad_peer = mock_network.add_peer(true); + + // Bypass poller and just add the storage summaries directly. + + // Good peer advertises txns 0 -> 100. + client.update_summary(good_peer, utils::create_storage_summary(100)); + // Bad peer advertises txns 0 -> 200 (but can't actually service). + client.update_summary(bad_peer, utils::create_storage_summary(200)); + client.update_global_summary_cache().unwrap(); + + // The global summary should contain the bad peer's advertisement. + let global_summary = client.get_global_data_summary(); + assert!(global_summary + .advertised_data + .transactions + .contains(&CompleteDataRange::new(0, 200).unwrap())); + + // Spawn a handler for both peers. + tokio::spawn(async move { + while let Some(network_request) = mock_network.next_request().await { + let peer_network_id = network_request.peer_network_id; + let response_sender = network_request.response_sender; + if peer_network_id == good_peer { + // Good peer responds with good response. + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + response_sender.send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); + } else if peer_network_id == bad_peer { + // Bad peer responds with error. + response_sender.send(Err(StorageServiceError::InternalError("".to_string()))); + } + } + }); + + let mut seen_data_unavailable_err = false; + + // Sending a bunch of requests to the bad peer's upper range will fail. + let request_timeout = client.get_response_timeout_ms(); + for _ in 0..20 { + let result = client + .get_transactions_with_proof(200, 200, 200, false, request_timeout) + .await; + + // While the score is still decreasing, we should see a bunch of + // InternalError's. Once we see a `DataIsUnavailable` error, we should + // only see that error. + if !seen_data_unavailable_err { + assert_err!(&result); + if let Err(Error::DataIsUnavailable(_)) = result { + seen_data_unavailable_err = true; + } + } else { + assert_matches!(result, Err(Error::DataIsUnavailable(_))); + } + } + + // Peer should eventually get ignored and we should consider this request + // range unserviceable. + assert!(seen_data_unavailable_err); + + // The global summary should no longer contain the bad peer's advertisement. + client.update_global_summary_cache().unwrap(); + let global_summary = client.get_global_data_summary(); + assert!(!global_summary + .advertised_data + .transactions + .contains(&CompleteDataRange::new(0, 200).unwrap())); + + // We should still be able to send the good peer a request. + let response = client + .get_transactions_with_proof(100, 50, 100, false, request_timeout) + .await + .unwrap(); + assert_eq!(response.payload, TransactionListWithProof::new_empty()); +} + +#[tokio::test] +async fn bad_peer_is_eventually_banned_callback() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + let bad_peer = mock_network.add_peer(true); + + // Bypass poller and just add the storage summaries directly. + // Bad peer advertises txns 0 -> 200 (but can't actually service). + client.update_summary(bad_peer, utils::create_storage_summary(200)); + client.update_global_summary_cache().unwrap(); + + // Spawn a handler for both peers. + tokio::spawn(async move { + while let Some(network_request) = mock_network.next_request().await { + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); + } + }); + + let mut seen_data_unavailable_err = false; + + // Sending a bunch of requests to the bad peer (that we later decide are bad). + let request_timeout = client.get_response_timeout_ms(); + for _ in 0..20 { + let result = client + .get_transactions_with_proof(200, 200, 200, false, request_timeout) + .await; + + // While the score is still decreasing, we should see a bunch of + // InternalError's. Once we see a `DataIsUnavailable` error, we should + // only see that error. + if !seen_data_unavailable_err { + match result { + Ok(response) => { + response.context.response_callback.notify_bad_response( + crate::interface::ResponseError::ProofVerificationError, + ); + }, + Err(Error::DataIsUnavailable(_)) => { + seen_data_unavailable_err = true; + }, + Err(_) => panic!("unexpected result: {:?}", result), + } + } else { + assert_matches!(result, Err(Error::DataIsUnavailable(_))); + } + } + + // Peer should eventually get ignored and we should consider this request + // range unserviceable. + assert!(seen_data_unavailable_err); + + // The global summary should no longer contain the bad peer's advertisement. + client.update_global_summary_cache().unwrap(); + let global_summary = client.get_global_data_summary(); + assert!(!global_summary + .advertised_data + .transactions + .contains(&CompleteDataRange::new(0, 200).unwrap())); +} + +#[tokio::test] +async fn bad_peer_is_eventually_added_back() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, mock_time, client, poller) = MockNetwork::new(None, None, None); + + // Add a connected peer. + mock_network.add_peer(true); + + tokio::spawn(poller.start_poller()); + tokio::spawn(async move { + while let Some(network_request) = mock_network.next_request().await { + match network_request.storage_service_request.data_request { + DataRequest::GetTransactionsWithProof(_) => { + let data_response = + DataResponse::TransactionsWithProof(TransactionListWithProof::new_empty()); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new( + data_response, + network_request.storage_service_request.use_compression, + ) + .unwrap())); + }, + DataRequest::GetStorageServerSummary => { + let data_response = + DataResponse::StorageServerSummary(utils::create_storage_summary(200)); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new( + data_response, + network_request.storage_service_request.use_compression, + ) + .unwrap())); + }, + _ => panic!( + "Unexpected storage request: {:?}", + network_request.storage_service_request + ), + } + } + }); + + // Advance time so the poller sends data summary requests. + let summary_poll_interval = Duration::from_millis(1_000); + for _ in 0..2 { + tokio::task::yield_now().await; + mock_time.advance_async(summary_poll_interval).await; + } + + // Initially this request range is serviceable by this peer. + let global_summary = client.get_global_data_summary(); + assert!(global_summary + .advertised_data + .transactions + .contains(&CompleteDataRange::new(0, 200).unwrap())); + + // Keep decreasing this peer's score by considering its responses bad. + // Eventually its score drops below IGNORE_PEER_THRESHOLD. + let request_timeout = client.get_response_timeout_ms(); + for _ in 0..20 { + let result = client + .get_transactions_with_proof(200, 0, 200, false, request_timeout) + .await; + + if let Ok(response) = result { + response + .context + .response_callback + .notify_bad_response(crate::interface::ResponseError::ProofVerificationError); + } + } + + // Peer is eventually ignored and this request range unserviceable. + client.update_global_summary_cache().unwrap(); + let global_summary = client.get_global_data_summary(); + assert!(!global_summary + .advertised_data + .transactions + .contains(&CompleteDataRange::new(0, 200).unwrap())); + + // This peer still responds to the StorageServerSummary requests. + // Its score keeps increasing and this peer is eventually added back. + for _ in 0..20 { + mock_time.advance_async(summary_poll_interval).await; + } + + let global_summary = client.get_global_data_summary(); + assert!(global_summary + .advertised_data + .transactions + .contains(&CompleteDataRange::new(0, 200).unwrap())); +} + +#[tokio::test(flavor = "multi_thread")] +async fn disconnected_peers_garbage_collection() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Connect several peers + let priority_peer_1 = mock_network.add_peer(true); + let priority_peer_2 = mock_network.add_peer(true); + let priority_peer_3 = mock_network.add_peer(true); + + // Poll all of the peers to initialize the peer states + let all_peers = vec![priority_peer_1, priority_peer_2, priority_peer_3]; + poll_peers(&mut mock_network, &client, all_peers.clone()).await; + + // Verify we have peer states for all peers + verify_peer_states(&client, all_peers.clone()); + + // Disconnect priority peer 1 and update the global data summary + mock_network.disconnect_peer(priority_peer_1); + client.update_global_summary_cache().unwrap(); + + // Verify we have peer states for only the remaining peers + verify_peer_states(&client, vec![priority_peer_2, priority_peer_3]); + + // Disconnect priority peer 2 and update the global data summary + mock_network.disconnect_peer(priority_peer_2); + client.update_global_summary_cache().unwrap(); + + // Verify we have peer states for only priority peer 3 + verify_peer_states(&client, vec![priority_peer_3]); + + // Reconnect priority peer 1, poll it and update the global data summary + mock_network.reconnect_peer(priority_peer_1); + poll_peers(&mut mock_network, &client, vec![priority_peer_1]).await; + client.update_global_summary_cache().unwrap(); + + // Verify we have peer states for priority peer 1 and 3 + verify_peer_states(&client, vec![priority_peer_1, priority_peer_3]); + + // Reconnect priority peer 2, poll it and update the global data summary + mock_network.reconnect_peer(priority_peer_2); + poll_peers(&mut mock_network, &client, vec![priority_peer_2]).await; + client.update_global_summary_cache().unwrap(); + + // Verify we have peer states for all peers + verify_peer_states(&client, all_peers); +} + +/// A simple helper function that polls all the specified peers +/// and returns storage server summaries for each. +async fn poll_peers( + mock_network: &mut MockNetwork, + client: &AptosDataClient, + all_peers: Vec, +) { + for peer in all_peers { + // Poll the peer + let handle = poll_peer(client.clone(), peer, None); + + // Respond to the poll request + let network_request = mock_network.next_request().await.unwrap(); + let data_response = DataResponse::StorageServerSummary(StorageServerSummary::default()); + network_request + .response_sender + .send(Ok(StorageServiceResponse::new(data_response, true).unwrap())); + + // Wait for the poll to complete + handle.await.unwrap(); + } +} + +/// Verifies the exclusive existence of peer states for all the specified peers +fn verify_peer_states(client: &AptosDataClient, all_peers: Vec) { + let peer_to_states = client.get_peer_states().get_peer_to_states(); + for peer in &all_peers { + assert!(peer_to_states.contains_key(peer)); + } + assert_eq!(peer_to_states.len(), all_peers.len()); +} diff --git a/state-sync/aptos-data-client/src/tests/poller.rs b/state-sync/aptos-data-client/src/tests/poller.rs new file mode 100644 index 0000000000000..e59985b0acf95 --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/poller.rs @@ -0,0 +1,413 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{client::AptosDataClient, error::Error, poller::poll_peer, tests::mock::MockNetwork}; +use aptos_config::{config::AptosDataClientConfig, network_id::PeerNetworkId}; +use aptos_storage_service_types::StorageServiceError; +use claims::{assert_matches, assert_none}; + +#[tokio::test] +async fn fetch_peers_frequency() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, poller) = MockNetwork::new(None, None, None); + + // Add regular peer 1 and 2 + let _regular_peer_1 = mock_network.add_peer(false); + let _regular_peer_2 = mock_network.add_peer(false); + + // Set `always_poll` to true and fetch the regular peers multiple times. Ensure + // that for each fetch we receive a peer. + let num_fetches = 20; + for _ in 0..num_fetches { + let peer = poller.fetch_regular_peer(true).unwrap(); + client.in_flight_request_complete(&peer); + } + + // Set `always_poll` to false and fetch the regular peers multiple times + let mut regular_peer_count = 0; + for _ in 0..num_fetches { + if let Some(peer) = poller.fetch_regular_peer(false) { + regular_peer_count += 1; + client.in_flight_request_complete(&peer); + } + } + + // Verify we received regular peers at a reduced frequency + assert!(regular_peer_count < num_fetches); + + // Add priority peer 1 and 2 + let _priority_peer_1 = mock_network.add_peer(true); + let _priority_peer_2 = mock_network.add_peer(true); + + // Fetch the prioritized peers multiple times. Ensure that for + // each fetch we receive a peer. + for _ in 0..num_fetches { + let peer = poller.try_fetch_peer(true).unwrap(); + client.in_flight_request_complete(&peer); + } +} + +#[tokio::test] +async fn fetch_peers_ordering() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Ensure the properties hold for both priority and non-priority peers + for is_priority_peer in [true, false] { + // Add peer 1 + let peer_1 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify that we get peer 1 + for _ in 0..3 { + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_1); + client.in_flight_request_complete(&peer_to_poll); + } + + // Add peer 2 + let peer_2 = mock_network.add_peer(is_priority_peer); + + // Request the next peer and verify we get either peer + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert!(peer_to_poll == peer_1 || peer_to_poll == peer_2); + client.in_flight_request_complete(&peer_to_poll); + + // Request the next peer again, but don't mark the poll as complete + let peer_to_poll_1 = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + + // Request another peer again and verify that it's different to the previous peer + let peer_to_poll_2 = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_ne!(peer_to_poll_1, peer_to_poll_2); + + // Neither poll has completed (they're both in-flight), so make another request + // and verify we get no peers. + assert_none!(fetch_peer_to_poll(client.clone(), is_priority_peer).unwrap()); + + // Add peer 3 + let peer_3 = mock_network.add_peer(is_priority_peer); + + // Request another peer again and verify it's peer_3 + let peer_to_poll_3 = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll_3, peer_3); + + // Mark the second poll as completed + client.in_flight_request_complete(&peer_to_poll_2); + + // Make another request and verify we get peer 2 now (as it was ready) + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_to_poll_2); + + // Mark the first poll as completed + client.in_flight_request_complete(&peer_to_poll_1); + + // Make another request and verify we get peer 1 now + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_to_poll_1); + + // Mark the third poll as completed + client.in_flight_request_complete(&peer_to_poll_3); + + // Make another request and verify we get peer 3 now + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_to_poll_3); + client.in_flight_request_complete(&peer_to_poll_3); + } +} + +#[tokio::test] +async fn fetch_peers_disconnect() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Ensure the properties hold for both priority and non-priority peers + for is_priority_peer in [true, false] { + // Request the next peer to poll and verify we have no peers + assert_matches!( + fetch_peer_to_poll(client.clone(), is_priority_peer), + Err(Error::DataIsUnavailable(_)) + ); + + // Add peer 1 + let peer_1 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify it's peer 1 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_1); + client.in_flight_request_complete(&peer_to_poll); + + // Add peer 2 and disconnect peer 1 + let peer_2 = mock_network.add_peer(is_priority_peer); + mock_network.disconnect_peer(peer_1); + + // Request the next peer to poll and verify it's peer 2 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_2); + client.in_flight_request_complete(&peer_to_poll); + + // Disconnect peer 2 + mock_network.disconnect_peer(peer_2); + + // Request the next peer to poll and verify an error is returned because + // there are no connected peers. + assert_matches!( + fetch_peer_to_poll(client.clone(), is_priority_peer), + Err(Error::DataIsUnavailable(_)) + ); + + // Add peer 3 + let peer_3 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify it's peer 3 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_3); + client.in_flight_request_complete(&peer_to_poll); + + // Disconnect peer 3 + mock_network.disconnect_peer(peer_3); + + // Request the next peer to poll and verify an error is returned because + // there are no connected peers. + assert_matches!( + fetch_peer_to_poll(client.clone(), is_priority_peer), + Err(Error::DataIsUnavailable(_)) + ); + } +} + +#[tokio::test] +async fn fetch_peers_reconnect() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Ensure the properties hold for both priority and non-priority peers + for is_priority_peer in [true, false] { + // Request the next peer to poll and verify we have no peers + assert_matches!( + fetch_peer_to_poll(client.clone(), is_priority_peer), + Err(Error::DataIsUnavailable(_)) + ); + + // Add peer 1 + let peer_1 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify it's peer 1 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_1); + client.in_flight_request_complete(&peer_to_poll); + + // Add peer 2 and disconnect peer 1 + let peer_2 = mock_network.add_peer(is_priority_peer); + mock_network.disconnect_peer(peer_1); + + // Request the next peer to poll and verify it's peer 2 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_2); + client.in_flight_request_complete(&peer_to_poll); + + // Disconnect peer 2 and reconnect peer 1 + mock_network.disconnect_peer(peer_2); + mock_network.reconnect_peer(peer_1); + + // Request the next peer to poll and verify it's peer 1 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_1); + + // Reconnect peer 2 + mock_network.reconnect_peer(peer_2); + + // Request the next peer to poll several times and verify it's peer 2 + // (the in-flight request for peer 1 has yet to complete). + for _ in 0..3 { + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_2); + client.in_flight_request_complete(&peer_to_poll); + } + + // Disconnect peer 2 and mark peer 1's in-flight request as complete + mock_network.disconnect_peer(peer_2); + client.in_flight_request_complete(&peer_1); + + // Request the next peer to poll several times and verify it's peer 1 + for _ in 0..3 { + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_1); + client.in_flight_request_complete(&peer_to_poll); + } + + // Disconnect peer 1 + mock_network.disconnect_peer(peer_1); + + // Request the next peer to poll and verify an error is returned because + // there are no connected peers. + assert_matches!( + fetch_peer_to_poll(client.clone(), is_priority_peer), + Err(Error::DataIsUnavailable(_)) + ); + } +} + +#[tokio::test] +async fn fetch_peers_max_in_flight() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a data client with max in-flight requests of 2 + let data_client_config = AptosDataClientConfig { + max_num_in_flight_priority_polls: 2, + max_num_in_flight_regular_polls: 2, + ..Default::default() + }; + let (mut mock_network, _, client, _) = MockNetwork::new(None, Some(data_client_config), None); + + // Ensure the properties hold for both priority and non-priority peers + for is_priority_peer in [true, false] { + // Add peer 1 + let peer_1 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify it's peer 1 + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_1); + + // Add peer 2 + let peer_2 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify it's peer 2 (peer 1's in-flight + // request has not yet completed). + let peer_to_poll = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_eq!(peer_to_poll, peer_2); + + // Add peer 3 + let peer_3 = mock_network.add_peer(is_priority_peer); + + // Request the next peer to poll and verify it's empty (we already have + // the maximum number of in-flight requests). + assert_none!(fetch_peer_to_poll(client.clone(), is_priority_peer).unwrap()); + + // Mark peer 2's in-flight request as complete + client.in_flight_request_complete(&peer_2); + + // Request the next peer to poll and verify it's either peer 2 or peer 3 + let peer_to_poll_1 = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert!(peer_to_poll_1 == peer_2 || peer_to_poll_1 == peer_3); + + // Request the next peer to poll and verify it's empty (we already have + // the maximum number of in-flight requests). + assert_none!(fetch_peer_to_poll(client.clone(), is_priority_peer).unwrap()); + + // Mark peer 1's in-flight request as complete + client.in_flight_request_complete(&peer_1); + + // Request the next peer to poll and verify it's not the peer that already + // has an in-flight request. + let peer_to_poll_2 = fetch_peer_to_poll(client.clone(), is_priority_peer) + .unwrap() + .unwrap(); + assert_ne!(peer_to_poll_1, peer_to_poll_2); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn in_flight_error_handling() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a data client with max in-flight requests of 1 + let data_client_config = AptosDataClientConfig { + max_num_in_flight_priority_polls: 1, + max_num_in_flight_regular_polls: 1, + ..Default::default() + }; + let (mut mock_network, _, client, _) = MockNetwork::new(None, Some(data_client_config), None); + + // Verify we have no in-flight polls + let num_in_flight_polls = get_num_in_flight_polls(client.clone(), true); + assert_eq!(num_in_flight_polls, 0); + + // Add a peer + let peer = mock_network.add_peer(true); + + // Poll the peer + client.in_flight_request_started(&peer); + let handle = poll_peer(client.clone(), peer, None); + + // Respond to the peer poll with an error + if let Some(network_request) = mock_network.next_request().await { + network_request + .response_sender + .send(Err(StorageServiceError::InternalError( + "An unexpected error occurred!".into(), + ))); + } + + // Wait for the poller to complete + handle.await.unwrap(); + + // Verify we have no in-flight polls + let num_in_flight_polls = get_num_in_flight_polls(client.clone(), true); + assert_eq!(num_in_flight_polls, 0); +} + +/// A helper method that fetches peers to poll depending on the peer priority +fn fetch_peer_to_poll( + client: AptosDataClient, + is_priority_peer: bool, +) -> Result, Error> { + // Fetch the next peer to poll + let result = if is_priority_peer { + client.fetch_prioritized_peer_to_poll() + } else { + client.fetch_regular_peer_to_poll() + }; + + // If we get a peer, mark the peer as having an in-flight request + if let Ok(Some(peer_to_poll)) = result { + client.in_flight_request_started(&peer_to_poll); + } + + result +} + +/// Fetches the number of in flight requests for peers depending on priority +fn get_num_in_flight_polls(client: AptosDataClient, is_priority_peer: bool) -> u64 { + if is_priority_peer { + client.get_peer_states().num_in_flight_priority_polls() + } else { + client.get_peer_states().num_in_flight_regular_polls() + } +} diff --git a/state-sync/aptos-data-client/src/tests/priority.rs b/state-sync/aptos-data-client/src/tests/priority.rs new file mode 100644 index 0000000000000..9a3f54a90a377 --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/priority.rs @@ -0,0 +1,322 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + error::Error, + tests::{mock::MockNetwork, utils}, +}; +use aptos_config::{ + config::{BaseConfig, RoleType}, + network_id::NetworkId, +}; +use aptos_storage_service_types::{ + requests::{ + DataRequest, NewTransactionOutputsWithProofRequest, NewTransactionsWithProofRequest, + StorageServiceRequest, TransactionOutputsWithProofRequest, + }, + responses::OPTIMISTIC_FETCH_VERSION_DELTA, +}; +use claims::assert_matches; + +#[tokio::test] +async fn all_peer_request_selection() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Ensure no peers can service the given request (we have no connections) + let server_version_request = + StorageServiceRequest::new(DataRequest::GetServerProtocolVersion, true); + assert_matches!( + client.choose_peer_for_request(&server_version_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Add a regular peer and verify the peer is selected as the recipient + let regular_peer_1 = mock_network.add_peer(false); + assert_eq!( + client.choose_peer_for_request(&server_version_request), + Ok(regular_peer_1) + ); + + // Add two prioritized peers + let priority_peer_1 = mock_network.add_peer(true); + let priority_peer_2 = mock_network.add_peer(true); + + // Request data that is not being advertised and verify we get an error + let output_data_request = + DataRequest::GetTransactionOutputsWithProof(TransactionOutputsWithProofRequest { + proof_version: 100, + start_version: 0, + end_version: 100, + }); + let storage_request = StorageServiceRequest::new(output_data_request, false); + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Advertise the data for the regular peer and verify it is now selected + client.update_summary(regular_peer_1, utils::create_storage_summary(100)); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Advertise the data for the priority peer and verify the priority peer is selected + client.update_summary(priority_peer_2, utils::create_storage_summary(100)); + let peer_for_request = client.choose_peer_for_request(&storage_request).unwrap(); + assert_eq!(peer_for_request, priority_peer_2); + + // Reconnect priority peer 1 and remove the advertised data for priority peer 2 + mock_network.reconnect_peer(priority_peer_1); + client.update_summary(priority_peer_2, utils::create_storage_summary(0)); + + // Request the data again and verify the regular peer is chosen + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Advertise the data for priority peer 1 and verify the priority peer is selected + client.update_summary(priority_peer_1, utils::create_storage_summary(100)); + let peer_for_request = client.choose_peer_for_request(&storage_request).unwrap(); + assert_eq!(peer_for_request, priority_peer_1); + + // Advertise the data for priority peer 2 and verify either priority peer is selected + client.update_summary(priority_peer_2, utils::create_storage_summary(100)); + let peer_for_request = client.choose_peer_for_request(&storage_request).unwrap(); + assert!(peer_for_request == priority_peer_1 || peer_for_request == priority_peer_2); +} + +#[tokio::test] +async fn prioritized_peer_request_selection() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Ensure the properties hold for storage summary and version requests + let storage_summary_request = DataRequest::GetStorageServerSummary; + let get_version_request = DataRequest::GetServerProtocolVersion; + for data_request in [storage_summary_request, get_version_request] { + let storage_request = StorageServiceRequest::new(data_request, true); + + // Ensure no peers can service the request (we have no connections) + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Add a regular peer and verify the peer is selected as the recipient + let regular_peer_1 = mock_network.add_peer(false); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Add a priority peer and verify the peer is selected as the recipient + let priority_peer_1 = mock_network.add_peer(true); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(priority_peer_1) + ); + + // Disconnect the priority peer and verify the regular peer is now chosen + mock_network.disconnect_peer(priority_peer_1); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Connect a new priority peer and verify it is now selected + let priority_peer_2 = mock_network.add_peer(true); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(priority_peer_2) + ); + + // Disconnect the priority peer and verify the regular peer is again chosen + mock_network.disconnect_peer(priority_peer_2); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Disconnect the regular peer so that we no longer have any connections + mock_network.disconnect_peer(regular_peer_1); + } +} + +#[tokio::test] +async fn prioritized_peer_subscription_selection() { + ::aptos_logger::Logger::init_for_testing(); + let (mut mock_network, _, client, _) = MockNetwork::new(None, None, None); + + // Create test data + let known_version = 10000000; + let known_epoch = 10; + + // Ensure the properties hold for both subscription requests + let new_transactions_request = + DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { + known_version, + known_epoch, + include_events: false, + }); + let new_outputs_request = + DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { + known_version, + known_epoch, + }); + for data_request in [new_transactions_request, new_outputs_request] { + let storage_request = StorageServiceRequest::new(data_request, true); + + // Ensure no peers can service the request (we have no connections) + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Add a regular peer and verify the peer cannot support the request + let regular_peer_1 = mock_network.add_peer(false); + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Advertise the data for the regular peer and verify it is now selected + client.update_summary(regular_peer_1, utils::create_storage_summary(known_version)); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Add a priority peer and verify the regular peer is selected + let priority_peer_1 = mock_network.add_peer(true); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Advertise the data for the priority peer and verify it is now selected + client.update_summary( + priority_peer_1, + utils::create_storage_summary(known_version), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(priority_peer_1) + ); + + // Update the priority peer to be too far behind and verify it is not selected + client.update_summary( + priority_peer_1, + utils::create_storage_summary(known_version - OPTIMISTIC_FETCH_VERSION_DELTA), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Update the regular peer to be too far behind and verify neither is selected + client.update_summary( + regular_peer_1, + utils::create_storage_summary(known_version - (OPTIMISTIC_FETCH_VERSION_DELTA * 2)), + ); + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Disconnect the regular peer and verify neither is selected + mock_network.disconnect_peer(regular_peer_1); + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Advertise the data for the priority peer and verify it is now selected again + client.update_summary( + priority_peer_1, + utils::create_storage_summary(known_version + 1000), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(priority_peer_1) + ); + + // Disconnect the priority peer so that we no longer have any connections + mock_network.disconnect_peer(priority_peer_1); + } +} + +#[tokio::test] +async fn validator_peer_prioritization() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a validator node + let base_config = BaseConfig { + role: RoleType::Validator, + ..Default::default() + }; + let (mut mock_network, _, client, _) = MockNetwork::new(Some(base_config), None, None); + + // Add a validator peer and ensure it's prioritized + let validator_peer = mock_network.add_peer_with_network_id(NetworkId::Validator, false); + let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); + assert_eq!(priority_peers, vec![validator_peer]); + assert_eq!(regular_peers, vec![]); + + // Add a vfn peer and ensure it's not prioritized + let vfn_peer = mock_network.add_peer_with_network_id(NetworkId::Vfn, true); + let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); + assert_eq!(priority_peers, vec![validator_peer]); + assert_eq!(regular_peers, vec![vfn_peer]); +} + +#[tokio::test] +async fn vfn_peer_prioritization() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a validator fullnode + let base_config = BaseConfig { + role: RoleType::FullNode, + ..Default::default() + }; + let (mut mock_network, _, client, _) = MockNetwork::new(Some(base_config), None, None); + + // Add a validator peer and ensure it's prioritized + let validator_peer = mock_network.add_peer_with_network_id(NetworkId::Vfn, false); + let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); + assert_eq!(priority_peers, vec![validator_peer]); + assert_eq!(regular_peers, vec![]); + + // Add a pfn peer and ensure it's not prioritized + let pfn_peer = mock_network.add_peer_with_network_id(NetworkId::Public, true); + let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); + assert_eq!(priority_peers, vec![validator_peer]); + assert_eq!(regular_peers, vec![pfn_peer]); +} + +#[tokio::test] +async fn pfn_peer_prioritization() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a public fullnode + let base_config = BaseConfig { + role: RoleType::FullNode, + ..Default::default() + }; + let (mut mock_network, _, client, _) = + MockNetwork::new(Some(base_config), None, Some(vec![NetworkId::Public])); + + // Add an inbound pfn peer and ensure it's not prioritized + let inbound_peer = mock_network.add_peer_with_network_id(NetworkId::Public, false); + let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); + assert_eq!(priority_peers, vec![]); + assert_eq!(regular_peers, vec![inbound_peer]); + + // Add an outbound pfn peer and ensure it's prioritized + let outbound_peer = mock_network.add_peer_with_network_id(NetworkId::Public, true); + let (priority_peers, regular_peers) = client.get_priority_and_regular_peers().unwrap(); + assert_eq!(priority_peers, vec![outbound_peer]); + assert_eq!(regular_peers, vec![inbound_peer]); +} diff --git a/state-sync/aptos-data-client/src/tests/utils.rs b/state-sync/aptos-data-client/src/tests/utils.rs new file mode 100644 index 0000000000000..cef6e82104050 --- /dev/null +++ b/state-sync/aptos-data-client/src/tests/utils.rs @@ -0,0 +1,43 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_crypto::HashValue; +use aptos_storage_service_types::responses::{ + CompleteDataRange, DataSummary, ProtocolMetadata, StorageServerSummary, +}; +use aptos_types::{ + aggregate_signature::AggregateSignature, + block_info::BlockInfo, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + transaction::Version, +}; + +/// Creates a test ledger info at the given version +fn create_ledger_info(version: Version) -> LedgerInfoWithSignatures { + LedgerInfoWithSignatures::new( + LedgerInfo::new( + BlockInfo::new(0, 0, HashValue::zero(), HashValue::zero(), version, 0, None), + HashValue::zero(), + ), + AggregateSignature::empty(), + ) +} + +/// Creates a test storage server summary at the given version +pub fn create_storage_summary(version: Version) -> StorageServerSummary { + StorageServerSummary { + protocol_metadata: ProtocolMetadata { + max_epoch_chunk_size: 1000, + max_state_chunk_size: 1000, + max_transaction_chunk_size: 1000, + max_transaction_output_chunk_size: 1000, + }, + data_summary: DataSummary { + synced_ledger_info: Some(create_ledger_info(version)), + epoch_ending_ledger_infos: None, + transactions: Some(CompleteDataRange::new(0, version).unwrap()), + transaction_outputs: Some(CompleteDataRange::new(0, version).unwrap()), + states: None, + }, + } +} diff --git a/state-sync/state-sync-v2/data-streaming-service/src/streaming_client.rs b/state-sync/state-sync-v2/data-streaming-service/src/streaming_client.rs index 6015399082ce4..07f9a79381388 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/streaming_client.rs +++ b/state-sync/state-sync-v2/data-streaming-service/src/streaming_client.rs @@ -468,7 +468,7 @@ impl DataStreamingClient for StreamingServiceClient { notification_and_feedback, }); // We can ignore the receiver as no data will be sent. - let _ = self.send_stream_request(client_request).await?; + let _receiver = self.send_stream_request(client_request).await?; Ok(()) } } diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/driver.rs b/state-sync/state-sync-v2/state-sync-driver/src/tests/driver.rs index 13d7384fce58f..d5edacf978c7d 100644 --- a/state-sync/state-sync-v2/state-sync-driver/src/tests/driver.rs +++ b/state-sync/state-sync-v2/state-sync-driver/src/tests/driver.rs @@ -305,6 +305,7 @@ async fn create_driver_for_tests( node_config.state_sync.aptos_data_client, node_config.base.clone(), time_service.clone(), + db_rw.reader.clone(), network_client, None, ); diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/driver_factory.rs b/state-sync/state-sync-v2/state-sync-driver/src/tests/driver_factory.rs index 39b5195c5ebb3..c7270e89ec9b9 100644 --- a/state-sync/state-sync-v2/state-sync-driver/src/tests/driver_factory.rs +++ b/state-sync/state-sync-v2/state-sync-driver/src/tests/driver_factory.rs @@ -76,6 +76,7 @@ fn test_new_initialized_configs() { node_config.state_sync.aptos_data_client, node_config.base.clone(), TimeService::mock(), + db_rw.reader.clone(), network_client, None, ); diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/storage_synchronizer.rs b/state-sync/state-sync-v2/state-sync-driver/src/tests/storage_synchronizer.rs index 4bf2121e85115..678b8aae4daf2 100644 --- a/state-sync/state-sync-v2/state-sync-driver/src/tests/storage_synchronizer.rs +++ b/state-sync/state-sync-v2/state-sync-driver/src/tests/storage_synchronizer.rs @@ -451,7 +451,7 @@ async fn test_save_states_invalid_chunk() { ); // Initialize the state synchronizer - let _ = storage_synchronizer + let _join_handle = storage_synchronizer .initialize_state_synchronizer( vec![create_epoch_ending_ledger_info()], create_epoch_ending_ledger_info(), diff --git a/state-sync/storage-service/server/src/handler.rs b/state-sync/storage-service/server/src/handler.rs index a54c10d082a73..1805dc7c8a3bb 100644 --- a/state-sync/storage-service/server/src/handler.rs +++ b/state-sync/storage-service/server/src/handler.rs @@ -6,17 +6,16 @@ use crate::{ logging::{LogEntry, LogSchema}, metrics, metrics::{ - increment_counter, start_timer, LRU_CACHE_HIT, LRU_CACHE_PROBE, SUBSCRIPTION_EVENT_ADD, + increment_counter, start_timer, LRU_CACHE_HIT, LRU_CACHE_PROBE, OPTIMISTIC_FETCH_ADD, }, moderator::RequestModerator, network::ResponseSender, + optimistic_fetch::OptimisticFetchRequest, storage::StorageReaderInterface, - subscription::DataSubscriptionRequest, }; use aptos_config::network_id::PeerNetworkId; use aptos_infallible::{Mutex, RwLock}; use aptos_logger::{debug, error, sample, sample::SampleRate, trace, warn}; -use aptos_network::ProtocolId; use aptos_storage_service_types::{ requests::{ DataRequest, EpochEndingLedgerInfoRequest, StateValuesWithProofRequest, @@ -44,7 +43,7 @@ const SUMMARY_LOG_FREQUENCY_SECS: u64 = 5; // The frequency to log the storage s #[derive(Clone)] pub struct Handler { cached_storage_server_summary: Arc>, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -54,7 +53,7 @@ pub struct Handler { impl Handler { pub fn new( cached_storage_server_summary: Arc>, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -63,7 +62,7 @@ impl Handler { Self { storage, cached_storage_server_summary, - data_subscriptions, + optimistic_fetches, lru_response_cache, request_moderator, time_service, @@ -75,30 +74,24 @@ impl Handler { pub fn process_request_and_respond( &self, peer_network_id: PeerNetworkId, - protocol_id: ProtocolId, request: StorageServiceRequest, response_sender: ResponseSender, ) { // Update the request count increment_counter( &metrics::STORAGE_REQUESTS_RECEIVED, - protocol_id, + peer_network_id.network_id(), request.get_label(), ); - // Handle any data subscriptions - if request.data_request.is_data_subscription_request() { - self.handle_subscription_request( - peer_network_id, - protocol_id, - request, - response_sender, - ); + // Handle any optimistic fetch requests + if request.data_request.is_optimistic_fetch() { + self.handle_optimistic_fetch_request(peer_network_id, request, response_sender); return; } // Process the request and return the response to the client - let response = self.process_request(&peer_network_id, protocol_id, request.clone(), false); + let response = self.process_request(&peer_network_id, request.clone(), false); self.send_response(request, response, response_sender); } @@ -106,24 +99,23 @@ impl Handler { pub(crate) fn process_request( &self, peer_network_id: &PeerNetworkId, - protocol: ProtocolId, request: StorageServiceRequest, - subscription_related: bool, + optimistic_fetch_related: bool, ) -> aptos_storage_service_types::Result { // Time the request processing (the timer will stop when it's dropped) let _timer = start_timer( &metrics::STORAGE_REQUEST_PROCESSING_LATENCY, - protocol, + peer_network_id.network_id(), request.get_label(), ); // Process the request and handle any errors - match self.validate_and_handle_request(peer_network_id, protocol, &request) { + match self.validate_and_handle_request(peer_network_id, &request) { Err(error) => { // Update the error counter increment_counter( &metrics::STORAGE_ERRORS_ENCOUNTERED, - protocol, + peer_network_id.network_id(), error.get_label().into(), ); @@ -134,7 +126,7 @@ impl Handler { .error(&error) .peer_network_id(peer_network_id) .request(&request) - .subscription_related(subscription_related) + .optimistic_fetch_related(optimistic_fetch_related) ); ); @@ -151,7 +143,7 @@ impl Handler { // Update the successful response counter increment_counter( &metrics::STORAGE_RESPONSES_SENT, - protocol, + peer_network_id.network_id(), response.get_label(), ); @@ -165,7 +157,6 @@ impl Handler { fn validate_and_handle_request( &self, peer_network_id: &PeerNetworkId, - protocol: ProtocolId, request: &StorageServiceRequest, ) -> Result { // Validate the request with the moderator @@ -184,7 +175,7 @@ impl Handler { StorageServiceResponse::new(data_response, request.use_compression) .map_err(|error| error.into()) }, - _ => self.process_cachable_request(protocol, request), + _ => self.process_cachable_request(peer_network_id, request), } } @@ -199,34 +190,32 @@ impl Handler { response_sender.send(response); } - /// Handles the given data subscription request - pub fn handle_subscription_request( + /// Handles the given optimistic fetch request + pub fn handle_optimistic_fetch_request( &self, peer_network_id: PeerNetworkId, - protocol_id: ProtocolId, request: StorageServiceRequest, response_sender: ResponseSender, ) { - // Create the subscription request - let subscription_request = DataSubscriptionRequest::new( - protocol_id, + // Create the optimistic fetch request + let optimistic_fetch = OptimisticFetchRequest::new( request.clone(), response_sender, self.time_service.clone(), ); - // Store the subscription and check if any existing subscriptions were found + // Store the optimistic fetch and check if any existing fetches were found if self - .data_subscriptions + .optimistic_fetches .lock() - .insert(peer_network_id, subscription_request) + .insert(peer_network_id, optimistic_fetch) .is_some() { sample!( SampleRate::Duration(Duration::from_secs(INVALID_REQUEST_LOG_FREQUENCY_SECS)), - warn!(LogSchema::new(LogEntry::SubscriptionRequest) + warn!(LogSchema::new(LogEntry::OptimisticFetchRequest) .error(&Error::InvalidRequest( - "An active subscription was already found for the peer!".into() + "An active optimistic fetch was already found for the peer!".into() )) .peer_network_id(&peer_network_id) .request(&request) @@ -234,11 +223,11 @@ impl Handler { ); } - // Update the subscription metrics + // Update the optimistic fetch metrics increment_counter( - &metrics::SUBSCRIPTION_EVENT, - protocol_id, - SUBSCRIPTION_EVENT_ADD.into(), + &metrics::OPTIMISTIC_FETCH_EVENTS, + peer_network_id.network_id(), + OPTIMISTIC_FETCH_ADD.into(), ); } @@ -246,14 +235,22 @@ impl Handler { /// might already be cached. fn process_cachable_request( &self, - protocol: ProtocolId, + peer_network_id: &PeerNetworkId, request: &StorageServiceRequest, ) -> aptos_storage_service_types::Result { - increment_counter(&metrics::LRU_CACHE_EVENT, protocol, LRU_CACHE_PROBE.into()); + increment_counter( + &metrics::LRU_CACHE_EVENT, + peer_network_id.network_id(), + LRU_CACHE_PROBE.into(), + ); // Check if the response is already in the cache if let Some(response) = self.lru_response_cache.lock().get(request) { - increment_counter(&metrics::LRU_CACHE_EVENT, protocol, LRU_CACHE_HIT.into()); + increment_counter( + &metrics::LRU_CACHE_EVENT, + peer_network_id.network_id(), + LRU_CACHE_HIT.into(), + ); return Ok(response.clone()); } diff --git a/state-sync/storage-service/server/src/lib.rs b/state-sync/storage-service/server/src/lib.rs index 6507e6fa6f42b..a1b971d64346a 100644 --- a/state-sync/storage-service/server/src/lib.rs +++ b/state-sync/storage-service/server/src/lib.rs @@ -23,9 +23,9 @@ use futures::stream::StreamExt; use handler::Handler; use lru::LruCache; use moderator::RequestModerator; +use optimistic_fetch::OptimisticFetchRequest; use std::{collections::HashMap, sync::Arc, time::Duration}; use storage::StorageReaderInterface; -use subscription::DataSubscriptionRequest; use thiserror::Error; use tokio::runtime::Handle; @@ -35,8 +35,8 @@ mod logging; pub mod metrics; mod moderator; pub mod network; +mod optimistic_fetch; pub mod storage; -mod subscription; #[cfg(test)] mod tests; @@ -54,14 +54,14 @@ pub struct StorageServiceServer { // request. This is refreshed periodically. cached_storage_server_summary: Arc>, - // A set of active subscriptions for peers waiting for new data - data_subscriptions: Arc>>, - // An LRU cache for commonly requested data items. // Note: This is not just a database cache because it contains // responses that have already been serialized and compressed. lru_response_cache: Arc>>, + // A set of active optimistic fetches for peers waiting for new data + optimistic_fetches: Arc>>, + // A moderator for incoming peer requests request_moderator: Arc, } @@ -78,7 +78,7 @@ impl StorageServiceServer { let bounded_executor = BoundedExecutor::new(config.max_concurrent_requests as usize, executor); let cached_storage_server_summary = Arc::new(RwLock::new(StorageServerSummary::default())); - let data_subscriptions = Arc::new(Mutex::new(HashMap::new())); + let optimistic_fetches = Arc::new(Mutex::new(HashMap::new())); let lru_response_cache = Arc::new(Mutex::new(LruCache::new( config.max_lru_cache_size as usize, ))); @@ -96,8 +96,8 @@ impl StorageServiceServer { network_requests, time_service, cached_storage_server_summary, - data_subscriptions, lru_response_cache, + optimistic_fetches, request_moderator, } } @@ -136,11 +136,11 @@ impl StorageServiceServer { .await; } - /// Spawns a non-terminating task that handles subscriptions - async fn spawn_subscription_handler(&mut self) { + /// Spawns a non-terminating task that handles optimistic fetches + async fn spawn_optimistic_fetch_handler(&mut self) { let cached_storage_server_summary = self.cached_storage_server_summary.clone(); let config = self.config; - let data_subscriptions = self.data_subscriptions.clone(); + let optimistic_fetches = self.optimistic_fetches.clone(); let lru_response_cache = self.lru_response_cache.clone(); let request_moderator = self.request_moderator.clone(); let storage = self.storage.clone(); @@ -154,23 +154,23 @@ impl StorageServiceServer { let ticker = time_service.interval(duration); futures::pin_mut!(ticker); - // Periodically check the data subscriptions + // Periodically check the optimistic fetches loop { ticker.next().await; - // Check and handle the active subscriptions - if let Err(error) = subscription::handle_active_data_subscriptions( + // Check and handle the active optimistic fetches + if let Err(error) = optimistic_fetch::handle_active_optimistic_fetches( cached_storage_server_summary.clone(), config, - data_subscriptions.clone(), + optimistic_fetches.clone(), lru_response_cache.clone(), request_moderator.clone(), storage.clone(), time_service.clone(), ) { - error!(LogSchema::new(LogEntry::SubscriptionRefresh) + error!(LogSchema::new(LogEntry::OptimisticFetchRefresh) .error(&error) - .message("Failed to handle active data subscriptions!")); + .message("Failed to handle active optimistic fetches!")); } } }) @@ -212,8 +212,8 @@ impl StorageServiceServer { // Spawn the refresher for the storage summary cache self.spawn_storage_summary_refresher().await; - // Spawn the subscription handler - self.spawn_subscription_handler().await; + // Spawn the optimistic fetch handler + self.spawn_optimistic_fetch_handler().await; // Spawn the refresher for the request moderator self.spawn_moderator_peer_refresher().await; @@ -236,7 +236,7 @@ impl StorageServiceServer { // avoid starving other async tasks on the same runtime. let storage = self.storage.clone(); let cached_storage_server_summary = self.cached_storage_server_summary.clone(); - let data_subscriptions = self.data_subscriptions.clone(); + let optimistic_fetches = self.optimistic_fetches.clone(); let lru_response_cache = self.lru_response_cache.clone(); let request_moderator = self.request_moderator.clone(); let time_service = self.time_service.clone(); @@ -244,7 +244,7 @@ impl StorageServiceServer { .spawn_blocking(move || { Handler::new( cached_storage_server_summary, - data_subscriptions, + optimistic_fetches, lru_response_cache, request_moderator, storage, @@ -252,7 +252,6 @@ impl StorageServiceServer { ) .process_request_and_respond( peer_network_id, - protocol_id, storage_service_request, network_request.response_sender, ); @@ -266,6 +265,14 @@ impl StorageServiceServer { pub(crate) fn get_request_moderator(&self) -> Arc { self.request_moderator.clone() } + + #[cfg(test)] + /// Returns a copy of the active optimistic fetches for test purposes + pub(crate) fn get_optimistic_fetches( + &self, + ) -> Arc>> { + self.optimistic_fetches.clone() + } } /// Refreshes the cached storage server summary diff --git a/state-sync/storage-service/server/src/logging.rs b/state-sync/storage-service/server/src/logging.rs index df4b86159327b..21ba48fbf217c 100644 --- a/state-sync/storage-service/server/src/logging.rs +++ b/state-sync/storage-service/server/src/logging.rs @@ -13,10 +13,10 @@ pub struct LogSchema<'a> { name: LogEntry, error: Option<&'a Error>, message: Option<&'a str>, + optimistic_fetch_related: Option, peer_network_id: Option<&'a PeerNetworkId>, response: Option<&'a str>, request: Option<&'a StorageServiceRequest>, - subscription_related: Option, } impl<'a> LogSchema<'a> { @@ -25,10 +25,10 @@ impl<'a> LogSchema<'a> { name, error: None, message: None, + optimistic_fetch_related: None, peer_network_id: None, response: None, request: None, - subscription_related: None, } } } @@ -36,13 +36,13 @@ impl<'a> LogSchema<'a> { #[derive(Clone, Copy, Serialize)] #[serde(rename_all = "snake_case")] pub enum LogEntry { + OptimisticFetchRefresh, + OptimisticFetchRequest, + OptimisticFetchResponse, ReceivedStorageRequest, RequestModeratorIgnoredPeer, RequestModeratorRefresh, SentStorageResponse, StorageServiceError, StorageSummaryRefresh, - SubscriptionRefresh, - SubscriptionResponse, - SubscriptionRequest, } diff --git a/state-sync/storage-service/server/src/metrics.rs b/state-sync/storage-service/server/src/metrics.rs index 38a0b5e41cbdb..e7b56d471aece 100644 --- a/state-sync/storage-service/server/src/metrics.rs +++ b/state-sync/storage-service/server/src/metrics.rs @@ -2,18 +2,18 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +use aptos_config::network_id::NetworkId; use aptos_metrics_core::{ register_histogram_vec, register_int_counter_vec, register_int_gauge_vec, HistogramTimer, HistogramVec, IntCounterVec, IntGaugeVec, }; -use aptos_network::ProtocolId; use once_cell::sync::Lazy; /// Useful metric constants for the storage service pub const LRU_CACHE_HIT: &str = "lru_cache_hit"; pub const LRU_CACHE_PROBE: &str = "lru_cache_probe"; -pub const SUBSCRIPTION_EVENT_ADD: &str = "subscription_event_add"; -pub const SUBSCRIPTION_EVENT_EXPIRE: &str = "subscription_event_expire"; +pub const OPTIMISTIC_FETCH_ADD: &str = "optimistic_fetch_add"; +pub const OPTIMISTIC_FETCH_EXPIRE: &str = "optimistic_fetch_expire"; /// Gauge for tracking the number of actively ignored peers pub static IGNORED_PEER_COUNT: Lazy = Lazy::new(|| { @@ -30,7 +30,7 @@ pub static LRU_CACHE_EVENT: Lazy = Lazy::new(|| { register_int_counter_vec!( "aptos_storage_service_server_lru_cache", "Counters for lru cache events in the storage server", - &["protocol", "event"] + &["network_id", "event"] ) .unwrap() }); @@ -46,6 +46,26 @@ pub static NETWORK_FRAME_OVERFLOW: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for optimistic fetch request events +pub static OPTIMISTIC_FETCH_EVENTS: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_storage_service_server_optimistic_fetch_event", + "Counters related to optimistic fetch events", + &["network_id", "event"] + ) + .unwrap() +}); + +/// Time it takes to process a storage request +pub static OPTIMISTIC_FETCH_LATENCIES: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_storage_service_server_optimistic_fetch_latency", + "Time it takes to process an optimistic fetch request", + &["network_id", "request_type"] + ) + .unwrap() +}); + /// Counter for pending network events to the storage service (server-side) pub static PENDING_STORAGE_SERVER_NETWORK_EVENTS: Lazy = Lazy::new(|| { register_int_counter_vec!( @@ -61,7 +81,7 @@ pub static STORAGE_ERRORS_ENCOUNTERED: Lazy = Lazy::new(|| { register_int_counter_vec!( "aptos_storage_service_server_errors", "Counters related to the storage server errors encountered", - &["protocol", "error_type"] + &["network_id", "error_type"] ) .unwrap() }); @@ -71,7 +91,7 @@ pub static STORAGE_REQUESTS_RECEIVED: Lazy = Lazy::new(|| { register_int_counter_vec!( "aptos_storage_service_server_requests_received", "Counters related to the storage server requests received", - &["protocol", "request_type"] + &["network_id", "request_type"] ) .unwrap() }); @@ -81,7 +101,7 @@ pub static STORAGE_RESPONSES_SENT: Lazy = Lazy::new(|| { register_int_counter_vec!( "aptos_storage_service_server_responses_sent", "Counters related to the storage server responses sent", - &["protocol", "response_type"] + &["network_id", "response_type"] ) .unwrap() }); @@ -91,17 +111,7 @@ pub static STORAGE_REQUEST_PROCESSING_LATENCY: Lazy = Lazy::new(|| register_histogram_vec!( "aptos_storage_service_server_request_latency", "Time it takes to process a storage service request", - &["protocol", "request_type"] - ) - .unwrap() -}); - -/// Counter for subscription request events -pub static SUBSCRIPTION_EVENT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "aptos_storage_service_server_subscription_event", - "Counters related to subscription events", - &["protocol", "event"] + &["network_id", "request_type"] ) .unwrap() }); @@ -114,12 +124,24 @@ pub fn increment_network_frame_overflow(response_type: &str) { } /// Increments the given counter with the provided label values. -pub fn increment_counter(counter: &Lazy, protocol: ProtocolId, label: String) { +pub fn increment_counter(counter: &Lazy, network_id: NetworkId, label: String) { counter - .with_label_values(&[protocol.as_str(), &label]) + .with_label_values(&[network_id.as_str(), &label]) .inc(); } +/// Observes the value for the provided histogram and label +pub fn observe_value_with_label( + histogram: &Lazy, + network_id: NetworkId, + label: &str, + value: f64, +) { + histogram + .with_label_values(&[network_id.as_str(), label]) + .observe(value) +} + /// Sets the gauge with the specific label and value pub fn set_gauge(counter: &Lazy, label: &str, value: u64) { counter.with_label_values(&[label]).set(value as i64); @@ -128,10 +150,10 @@ pub fn set_gauge(counter: &Lazy, label: &str, value: u64) { /// Starts the timer for the provided histogram and label values. pub fn start_timer( histogram: &Lazy, - protocol: ProtocolId, + network_id: NetworkId, label: String, ) -> HistogramTimer { histogram - .with_label_values(&[protocol.as_str(), &label]) + .with_label_values(&[network_id.as_str(), &label]) .start_timer() } diff --git a/state-sync/storage-service/server/src/subscription.rs b/state-sync/storage-service/server/src/optimistic_fetch.rs similarity index 73% rename from state-sync/storage-service/server/src/subscription.rs rename to state-sync/storage-service/server/src/optimistic_fetch.rs index 0609b1d481f6b..cb1e78cfa472f 100644 --- a/state-sync/storage-service/server/src/subscription.rs +++ b/state-sync/storage-service/server/src/optimistic_fetch.rs @@ -5,7 +5,7 @@ use crate::{ error::Error, handler::Handler, metrics, - metrics::{increment_counter, SUBSCRIPTION_EVENT_EXPIRE}, + metrics::{increment_counter, OPTIMISTIC_FETCH_EXPIRE}, moderator::RequestModerator, network::ResponseSender, storage::StorageReaderInterface, @@ -14,7 +14,6 @@ use crate::{ use aptos_config::{config::StorageServiceConfig, network_id::PeerNetworkId}; use aptos_infallible::{Mutex, RwLock}; use aptos_logger::warn; -use aptos_network::ProtocolId; use aptos_storage_service_types::{ requests::{ DataRequest, EpochEndingLedgerInfoRequest, StorageServiceRequest, @@ -28,32 +27,29 @@ use aptos_types::ledger_info::LedgerInfoWithSignatures; use lru::LruCache; use std::{cmp::min, collections::HashMap, sync::Arc, time::Instant}; -/// A subscription for data received by a client -pub struct DataSubscriptionRequest { - protocol: ProtocolId, +/// An optimistic fetch request from a peer +pub struct OptimisticFetchRequest { request: StorageServiceRequest, response_sender: ResponseSender, - subscription_start_time: Instant, + fetch_start_time: Instant, time_service: TimeService, } -impl DataSubscriptionRequest { +impl OptimisticFetchRequest { pub fn new( - protocol: ProtocolId, request: StorageServiceRequest, response_sender: ResponseSender, time_service: TimeService, ) -> Self { Self { - protocol, request, response_sender, - subscription_start_time: time_service.now(), + fetch_start_time: time_service.now(), time_service, } } - /// Creates a new storage service request to satisfy the subscription + /// Creates a new storage service request to satisfy the optimistic fetch /// using the new data at the specified `target_ledger_info`. fn get_storage_request_for_missing_data( &self, @@ -114,7 +110,7 @@ impl DataSubscriptionRequest { }, ) }, - request => unreachable!("Unexpected subscription request: {:?}", request), + request => unreachable!("Unexpected optimistic fetch request: {:?}", request), }; let storage_request = StorageServiceRequest::new(data_request, self.request.use_compression); @@ -127,7 +123,7 @@ impl DataSubscriptionRequest { DataRequest::GetNewTransactionOutputsWithProof(request) => request.known_version, DataRequest::GetNewTransactionsWithProof(request) => request.known_version, DataRequest::GetNewTransactionsOrOutputsWithProof(request) => request.known_version, - request => unreachable!("Unexpected subscription request: {:?}", request), + request => unreachable!("Unexpected optimistic fetch request: {:?}", request), } } @@ -137,7 +133,7 @@ impl DataSubscriptionRequest { DataRequest::GetNewTransactionOutputsWithProof(request) => request.known_epoch, DataRequest::GetNewTransactionsWithProof(request) => request.known_epoch, DataRequest::GetNewTransactionsOrOutputsWithProof(request) => request.known_epoch, - request => unreachable!("Unexpected subscription request: {:?}", request), + request => unreachable!("Unexpected optimistic fetch request: {:?}", request), } } @@ -152,73 +148,88 @@ impl DataSubscriptionRequest { DataRequest::GetNewTransactionsOrOutputsWithProof(_) => { config.max_transaction_output_chunk_size }, - request => unreachable!("Unexpected subscription request: {:?}", request), + request => unreachable!("Unexpected optimistic fetch request: {:?}", request), } } - /// Returns true iff the subscription has expired + /// Returns true iff the optimistic fetch has expired fn is_expired(&self, timeout_ms: u64) -> bool { let current_time = self.time_service.now(); let elapsed_time = current_time - .duration_since(self.subscription_start_time) + .duration_since(self.fetch_start_time) .as_millis(); elapsed_time > timeout_ms as u128 } } -/// Handles ready (and expired) data subscriptions -pub(crate) fn handle_active_data_subscriptions( +/// Handles ready (and expired) optimistic fetches +pub(crate) fn handle_active_optimistic_fetches( cached_storage_server_summary: Arc>, config: StorageServiceConfig, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, time_service: TimeService, ) -> Result<(), Error> { - // Remove all expired subscriptions - remove_expired_data_subscriptions(config, data_subscriptions.clone()); + // Remove all expired optimistic fetches + remove_expired_optimistic_fetches(config, optimistic_fetches.clone()); - // Identify the peers with ready subscriptions - let peers_with_ready_subscriptions = get_peers_with_ready_subscriptions( + // Identify the peers with ready optimistic fetches + let peers_with_ready_optimistic_fetches = get_peers_with_ready_optimistic_fetches( cached_storage_server_summary.clone(), - data_subscriptions.clone(), + optimistic_fetches.clone(), lru_response_cache.clone(), request_moderator.clone(), storage.clone(), time_service.clone(), )?; - // Remove and handle the ready subscriptions - for (peer, target_ledger_info) in peers_with_ready_subscriptions { - if let Some(data_subscription) = data_subscriptions.clone().lock().remove(&peer) { + // Remove and handle the ready optimistic fetches + for (peer, target_ledger_info) in peers_with_ready_optimistic_fetches { + if let Some(optimistic_fetch) = optimistic_fetches.clone().lock().remove(&peer) { + let optimistic_fetch_start_time = optimistic_fetch.fetch_start_time; + let optimistic_fetch_request = optimistic_fetch.request.clone(); + + // Notify the peer of the new data if let Err(error) = notify_peer_of_new_data( cached_storage_server_summary.clone(), config, - data_subscriptions.clone(), + optimistic_fetches.clone(), lru_response_cache.clone(), request_moderator.clone(), storage.clone(), time_service.clone(), &peer, - data_subscription, + optimistic_fetch, target_ledger_info, ) { - warn!(LogSchema::new(LogEntry::SubscriptionResponse) + warn!(LogSchema::new(LogEntry::OptimisticFetchResponse) .error(&Error::UnexpectedErrorEncountered(error.to_string()))); } + + // Update the optimistic fetch latency metric + let optimistic_fetch_duration = time_service + .now() + .duration_since(optimistic_fetch_start_time); + metrics::observe_value_with_label( + &metrics::OPTIMISTIC_FETCH_LATENCIES, + peer.network_id(), + &optimistic_fetch_request.get_label(), + optimistic_fetch_duration.as_secs_f64(), + ); } } Ok(()) } -/// Identifies the data subscriptions that can be handled now. -/// Returns the list of peers that made those subscriptions +/// Identifies the optimistic fetches that can be handled now. +/// Returns the list of peers that made those optimistic fetches /// alongside the ledger info at the target version for the peer. -pub(crate) fn get_peers_with_ready_subscriptions( +pub(crate) fn get_peers_with_ready_optimistic_fetches( cached_storage_server_summary: Arc>, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -233,65 +244,63 @@ pub(crate) fn get_peers_with_ready_subscriptions( let highest_synced_version = highest_synced_ledger_info.ledger_info().version(); let highest_synced_epoch = highest_synced_ledger_info.ledger_info().epoch(); - // Identify the peers with ready subscriptions - let mut ready_subscriptions = vec![]; - let mut invalid_peer_subscriptions = vec![]; - for (peer, data_subscription) in data_subscriptions.lock().iter() { - let highest_known_version = data_subscription.highest_known_version(); + // Identify the peers with ready optimistic fetches + let mut ready_optimistic_fetches = vec![]; + let mut invalid_peer_optimistic_fetches = vec![]; + for (peer, optimistic_fetch) in optimistic_fetches.lock().iter() { + let highest_known_version = optimistic_fetch.highest_known_version(); if highest_known_version < highest_synced_version { - let highest_known_epoch = data_subscription.highest_known_epoch(); + let highest_known_epoch = optimistic_fetch.highest_known_epoch(); if highest_known_epoch < highest_synced_epoch { // The peer needs to sync to their epoch ending ledger info let epoch_ending_ledger_info = get_epoch_ending_ledger_info( cached_storage_server_summary.clone(), - data_subscriptions.clone(), + optimistic_fetches.clone(), highest_known_epoch, lru_response_cache.clone(), request_moderator.clone(), peer, - data_subscription.protocol, storage.clone(), time_service.clone(), )?; - // Check that we haven't been sent an invalid subscription request + // Check that we haven't been sent an invalid optimistic fetch request // (i.e., a request that does not respect an epoch boundary). if epoch_ending_ledger_info.ledger_info().version() <= highest_known_version { - invalid_peer_subscriptions.push(*peer); + invalid_peer_optimistic_fetches.push(*peer); } else { - ready_subscriptions.push((*peer, epoch_ending_ledger_info)); + ready_optimistic_fetches.push((*peer, epoch_ending_ledger_info)); } } else { - ready_subscriptions.push((*peer, highest_synced_ledger_info.clone())); + ready_optimistic_fetches.push((*peer, highest_synced_ledger_info.clone())); }; } } - // Remove the invalid subscriptions - for peer in invalid_peer_subscriptions { - if let Some(data_subscription) = data_subscriptions.lock().remove(&peer) { - warn!(LogSchema::new(LogEntry::SubscriptionRefresh) + // Remove the invalid optimistic fetches + for peer in invalid_peer_optimistic_fetches { + if let Some(optimistic_fetch) = optimistic_fetches.lock().remove(&peer) { + warn!(LogSchema::new(LogEntry::OptimisticFetchRefresh) .error(&Error::InvalidRequest( "Mismatch between known version and epoch!".into() )) - .request(&data_subscription.request) - .message("Dropping invalid subscription request!")); + .request(&optimistic_fetch.request) + .message("Dropping invalid optimistic fetch request!")); } } - // Return the ready subscriptions - Ok(ready_subscriptions) + // Return the ready optimistic fetches + Ok(ready_optimistic_fetches) } /// Gets the epoch ending ledger info at the given epoch fn get_epoch_ending_ledger_info( cached_storage_server_summary: Arc>, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, epoch: u64, lru_response_cache: Arc>>, request_moderator: Arc, peer_network_id: &PeerNetworkId, - protocol: ProtocolId, storage: T, time_service: TimeService, ) -> aptos_storage_service_types::Result { @@ -308,14 +317,13 @@ fn get_epoch_ending_ledger_info( // Process the request let handler = Handler::new( cached_storage_server_summary, - data_subscriptions, + optimistic_fetches, lru_response_cache, request_moderator, storage, time_service, ); - let storage_response = - handler.process_request(peer_network_id, protocol, storage_request, true); + let storage_response = handler.process_request(peer_network_id, storage_request, true); // Verify the response match storage_response { @@ -341,43 +349,39 @@ fn get_epoch_ending_ledger_info( } } -/// Notifies a subscriber of new data according to the target ledger info. +/// Notifies a peer of new data according to the target ledger info. /// -/// Note: we don't need to check the size of the subscription response +/// Note: we don't need to check the size of the optimistic fetch response /// because: (i) each sub-part should already be checked; and (ii) -/// subscription responses are best effort. +/// optimistic fetch responses are best effort. fn notify_peer_of_new_data( cached_storage_server_summary: Arc>, config: StorageServiceConfig, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, time_service: TimeService, peer_network_id: &PeerNetworkId, - subscription: DataSubscriptionRequest, + optimistic_fetch: OptimisticFetchRequest, target_ledger_info: LedgerInfoWithSignatures, ) -> aptos_storage_service_types::Result<(), Error> { - match subscription.get_storage_request_for_missing_data(config, &target_ledger_info) { + match optimistic_fetch.get_storage_request_for_missing_data(config, &target_ledger_info) { Ok(storage_request) => { // Handle the storage service request to fetch the missing data let use_compression = storage_request.use_compression; let handler = Handler::new( cached_storage_server_summary, - data_subscriptions, + optimistic_fetches, lru_response_cache, request_moderator, storage, time_service, ); - let storage_response = handler.process_request( - peer_network_id, - subscription.protocol, - storage_request.clone(), - true, - ); + let storage_response = + handler.process_request(peer_network_id, storage_request.clone(), true); - // Transform the missing data into a subscription response + // Transform the missing data into an optimistic fetch response let transformed_data_response = match storage_response { Ok(storage_response) => match storage_response.get_data_response() { Ok(DataResponse::TransactionsWithProof(transactions_with_proof)) => { @@ -441,7 +445,7 @@ fn notify_peer_of_new_data( handler.send_response( storage_request, Ok(storage_response), - subscription.response_sender, + optimistic_fetch.response_sender, ); Ok(()) }, @@ -449,23 +453,24 @@ fn notify_peer_of_new_data( } } -/// Removes all expired data subscriptions -pub(crate) fn remove_expired_data_subscriptions( +/// Removes all expired optimistic fetches +pub(crate) fn remove_expired_optimistic_fetches( config: StorageServiceConfig, - data_subscriptions: Arc>>, + optimistic_fetches: Arc>>, ) { - data_subscriptions.lock().retain(|_, data_subscription| { - // Update the expired subscription metrics - if data_subscription.is_expired(config.max_subscription_period_ms) { - let protocol = data_subscription.protocol; - increment_counter( - &metrics::SUBSCRIPTION_EVENT, - protocol, - SUBSCRIPTION_EVENT_EXPIRE.into(), - ); - } + optimistic_fetches + .lock() + .retain(|peer_network_id, optimistic_fetch| { + // Update the expired optimistic fetch metrics + if optimistic_fetch.is_expired(config.max_optimistic_fetch_period) { + increment_counter( + &metrics::OPTIMISTIC_FETCH_EVENTS, + peer_network_id.network_id(), + OPTIMISTIC_FETCH_EXPIRE.into(), + ); + } - // Only retain non-expired subscriptions - !data_subscription.is_expired(config.max_subscription_period_ms) - }); + // Only retain non-expired optimistic fetches + !optimistic_fetch.is_expired(config.max_optimistic_fetch_period) + }); } diff --git a/state-sync/storage-service/server/src/tests/mock.rs b/state-sync/storage-service/server/src/tests/mock.rs index 14a2b38cb20a0..f1c5eb697c27a 100644 --- a/state-sync/storage-service/server/src/tests/mock.rs +++ b/state-sync/storage-service/server/src/tests/mock.rs @@ -347,8 +347,8 @@ mock! { } } -/// Creates a mock db with the basic expectations required to handle subscription requests -pub fn create_mock_db_for_subscription( +/// Creates a mock db with the basic expectations required to handle optimistic fetch requests +pub fn create_mock_db_for_optimistic_fetch( highest_ledger_info_clone: LedgerInfoWithSignatures, lowest_version: Version, ) -> MockDatabaseReader { diff --git a/state-sync/storage-service/server/src/tests/mod.rs b/state-sync/storage-service/server/src/tests/mod.rs index 64a4c67a2885f..7c6c1bb21e656 100644 --- a/state-sync/storage-service/server/src/tests/mod.rs +++ b/state-sync/storage-service/server/src/tests/mod.rs @@ -8,11 +8,11 @@ mod new_transaction_outputs; mod new_transactions; mod new_transactions_or_outputs; mod number_of_states; +mod optimistic_fetch; mod protocol_version; mod request_moderator; mod state_values; mod storage_summary; -mod subscription; mod transaction_outputs; mod transactions; mod transactions_or_outputs; diff --git a/state-sync/storage-service/server/src/tests/new_transaction_outputs.rs b/state-sync/storage-service/server/src/tests/new_transaction_outputs.rs index 8754c0eb6e07c..188a3375b204d 100644 --- a/state-sync/storage-service/server/src/tests/new_transaction_outputs.rs +++ b/state-sync/storage-service/server/src/tests/new_transaction_outputs.rs @@ -37,7 +37,7 @@ async fn test_get_new_transaction_outputs() { // Create the mock db reader let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + mock::create_mock_db_for_optimistic_fetch(highest_ledger_info.clone(), lowest_version); utils::expect_get_transaction_outputs( &mut db_reader, peer_version + 1, @@ -48,17 +48,21 @@ async fn test_get_new_transaction_outputs() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transaction outputs + // Send a request to optimistically fetch new transaction outputs let mut response_receiver = get_new_outputs_with_proof(&mut mock_client, peer_version, highest_epoch).await; - // Verify no subscription response has been received yet + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Verify no optimistic fetch response has been received yet assert_none!(response_receiver.try_recv().unwrap()); - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transaction_outputs_with_proof( @@ -97,7 +101,7 @@ async fn test_get_new_transaction_outputs_different_networks() { // Create the mock db reader let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + mock::create_mock_db_for_optimistic_fetch(highest_ledger_info.clone(), lowest_version); utils::expect_get_transaction_outputs( &mut db_reader, peer_version_1 + 1, @@ -115,9 +119,10 @@ async fn test_get_new_transaction_outputs_different_networks() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transaction outputs for peer 1 + // Send a request to optimistically fetch new transaction outputs for peer 1 let peer_id = PeerId::random(); let peer_network_1 = PeerNetworkId::new(NetworkId::Validator, peer_id); let mut response_receiver_1 = get_new_outputs_with_proof_for_peer( @@ -128,7 +133,7 @@ async fn test_get_new_transaction_outputs_different_networks() { ) .await; - // Send a request to subscribe to new transaction outputs for peer 2 + // Send a request to optimistically fetch new transaction outputs for peer 2 let peer_network_2 = PeerNetworkId::new(NetworkId::Vfn, peer_id); let mut response_receiver_2 = get_new_outputs_with_proof_for_peer( &mut mock_client, @@ -138,12 +143,15 @@ async fn test_get_new_transaction_outputs_different_networks() { ) .await; - // Verify no subscription response has been received yet + // Wait until the optimistic fetches are active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 2).await; + + // Verify no optimistic fetch response has been received yet assert_none!(response_receiver_1.try_recv().unwrap()); assert_none!(response_receiver_2.try_recv().unwrap()); - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transaction_outputs_with_proof( @@ -186,7 +194,7 @@ async fn test_get_new_transaction_outputs_epoch_change() { ); // Create the mock db reader - let mut db_reader = mock::create_mock_db_for_subscription( + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( utils::create_test_ledger_info_with_sigs(highest_epoch, highest_version), lowest_version, ); @@ -206,14 +214,18 @@ async fn test_get_new_transaction_outputs_epoch_change() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transaction outputs + // Send a request to optimistically fetch new transaction outputs let response_receiver = get_new_outputs_with_proof(&mut mock_client, peer_version, peer_epoch).await; - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transaction_outputs_with_proof( @@ -244,7 +256,7 @@ async fn test_get_new_transaction_outputs_max_chunk() { // Create the mock db reader let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + mock::create_mock_db_for_optimistic_fetch(highest_ledger_info.clone(), lowest_version); utils::expect_get_transaction_outputs( &mut db_reader, peer_version + 1, @@ -255,14 +267,18 @@ async fn test_get_new_transaction_outputs_max_chunk() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transaction outputs + // Send a request to optimistically fetch new transaction outputs let response_receiver = get_new_outputs_with_proof(&mut mock_client, peer_version, highest_epoch).await; - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transaction_outputs_with_proof( diff --git a/state-sync/storage-service/server/src/tests/new_transactions.rs b/state-sync/storage-service/server/src/tests/new_transactions.rs index 011f060b96356..42cd8556ee0d9 100644 --- a/state-sync/storage-service/server/src/tests/new_transactions.rs +++ b/state-sync/storage-service/server/src/tests/new_transactions.rs @@ -39,8 +39,10 @@ async fn test_get_new_transactions() { ); // Create the mock db reader - let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( + highest_ledger_info.clone(), + lowest_version, + ); utils::expect_get_transactions( &mut db_reader, peer_version + 1, @@ -52,9 +54,10 @@ async fn test_get_new_transactions() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transactions + // Send a request to optimistically fetch new transactions let mut response_receiver = get_new_transactions_with_proof( &mut mock_client, peer_version, @@ -63,11 +66,14 @@ async fn test_get_new_transactions() { ) .await; - // Verify no subscription response has been received yet + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Verify no optimistic fetch response has been received yet assert_none!(response_receiver.try_recv().unwrap()); - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transactions_with_proof( @@ -110,8 +116,10 @@ async fn test_get_new_transactions_different_networks() { ); // Create the mock db reader - let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( + highest_ledger_info.clone(), + lowest_version, + ); utils::expect_get_transactions( &mut db_reader, peer_version_1 + 1, @@ -131,9 +139,10 @@ async fn test_get_new_transactions_different_networks() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transactions for peer 1 + // Send a request to optimistically fetch new transactions for peer 1 let peer_id = PeerId::random(); let peer_network_1 = PeerNetworkId::new(NetworkId::Public, peer_id); let mut response_receiver_1 = get_new_transactions_with_proof_for_peer( @@ -145,7 +154,7 @@ async fn test_get_new_transactions_different_networks() { ) .await; - // Send a request to subscribe to new transactions for peer 2 + // Send a request to optimistically fetch new transactions for peer 2 let peer_network_2 = PeerNetworkId::new(NetworkId::Vfn, peer_id); let mut response_receiver_2 = get_new_transactions_with_proof_for_peer( &mut mock_client, @@ -156,12 +165,15 @@ async fn test_get_new_transactions_different_networks() { ) .await; - // Verify no subscription response has been received yet + // Wait until the optimistic fetches are active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 2).await; + + // Verify no optimistic fetch response has been received yet assert_none!(response_receiver_1.try_recv().unwrap()); assert_none!(response_receiver_2.try_recv().unwrap()); - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data for both peers verify_new_transactions_with_proof( @@ -208,7 +220,7 @@ async fn test_get_new_transactions_epoch_change() { ); // Create the mock db reader - let mut db_reader = mock::create_mock_db_for_subscription( + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( utils::create_test_ledger_info_with_sigs(highest_epoch, highest_version), lowest_version, ); @@ -229,9 +241,10 @@ async fn test_get_new_transactions_epoch_change() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transactions + // Send a request to optimistically fetch new transactions let response_receiver = get_new_transactions_with_proof( &mut mock_client, peer_version, @@ -240,8 +253,11 @@ async fn test_get_new_transactions_epoch_change() { ) .await; - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transactions_with_proof( @@ -276,7 +292,7 @@ async fn test_get_new_transactions_max_chunk() { // Create the mock db reader let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + mock::create_mock_db_for_optimistic_fetch(highest_ledger_info.clone(), lowest_version); utils::expect_get_transactions( &mut db_reader, peer_version + 1, @@ -288,9 +304,10 @@ async fn test_get_new_transactions_max_chunk() { // Create the storage client and server let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), None); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transactions + // Send a request to optimistically fetch new transactions let response_receiver = get_new_transactions_with_proof( &mut mock_client, peer_version, @@ -299,8 +316,11 @@ async fn test_get_new_transactions_max_chunk() { ) .await; - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data verify_new_transactions_with_proof( diff --git a/state-sync/storage-service/server/src/tests/new_transactions_or_outputs.rs b/state-sync/storage-service/server/src/tests/new_transactions_or_outputs.rs index 75c9ac6ce3b8d..e6b4f33224b06 100644 --- a/state-sync/storage-service/server/src/tests/new_transactions_or_outputs.rs +++ b/state-sync/storage-service/server/src/tests/new_transactions_or_outputs.rs @@ -46,8 +46,10 @@ async fn test_get_new_transactions_or_outputs() { ); // Creates a small transaction list // Create the mock db reader - let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( + highest_ledger_info.clone(), + lowest_version, + ); utils::expect_get_transaction_outputs( &mut db_reader, peer_version + 1, @@ -74,9 +76,10 @@ async fn test_get_new_transactions_or_outputs() { ); let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), Some(storage_config)); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transactions or outputs + // Send a request to optimistically fetch new transactions or outputs let mut response_receiver = get_new_transactions_or_outputs_with_proof( &mut mock_client, peer_version, @@ -86,11 +89,14 @@ async fn test_get_new_transactions_or_outputs() { ) .await; - // Verify no subscription response has been received yet + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Verify no optimistic fetch response has been received yet assert_none!(response_receiver.try_recv().unwrap()); - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data if fallback_to_transactions { @@ -149,8 +155,10 @@ async fn test_get_new_transactions_or_outputs_different_network() { ); // Creates a small transaction list // Create the mock db reader - let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( + highest_ledger_info.clone(), + lowest_version, + ); utils::expect_get_transaction_outputs( &mut db_reader, peer_version_1 + 1, @@ -192,9 +200,10 @@ async fn test_get_new_transactions_or_outputs_different_network() { ); let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), Some(storage_config)); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transactions or outputs for peer 1 + // Send a request to optimistically fetch new transactions or outputs for peer 1 let peer_id = PeerId::random(); let peer_network_1 = PeerNetworkId::new(NetworkId::Public, peer_id); let mut response_receiver_1 = get_new_transactions_or_outputs_with_proof_for_peer( @@ -207,7 +216,7 @@ async fn test_get_new_transactions_or_outputs_different_network() { ) .await; - // Send a request to subscribe to new transactions or outputs for peer 1 + // Send a request to optimistically fetch new transactions or outputs for peer 1 let peer_network_2 = PeerNetworkId::new(NetworkId::Validator, peer_id); let mut response_receiver_2 = get_new_transactions_or_outputs_with_proof_for_peer( &mut mock_client, @@ -219,12 +228,15 @@ async fn test_get_new_transactions_or_outputs_different_network() { ) .await; - // Verify no subscription response has been received yet + // Wait until the optimistic fetches are active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 2).await; + + // Verify no optimistic fetch response has been received yet assert_none!(response_receiver_1.try_recv().unwrap()); assert_none!(response_receiver_2.try_recv().unwrap()); - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data if fallback_to_transactions { @@ -297,7 +309,7 @@ async fn test_get_new_transactions_or_outputs_epoch_change() { ); // Creates a small transaction list // Create the mock db reader - let mut db_reader = mock::create_mock_db_for_subscription( + let mut db_reader = mock::create_mock_db_for_optimistic_fetch( utils::create_test_ledger_info_with_sigs(highest_epoch, highest_version), lowest_version, ); @@ -333,9 +345,10 @@ async fn test_get_new_transactions_or_outputs_epoch_change() { ); let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), Some(storage_config)); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transaction outputs + // Send a request to optimistically fetch new transaction outputs let response_receiver = get_new_transactions_or_outputs_with_proof( &mut mock_client, peer_version, @@ -345,8 +358,11 @@ async fn test_get_new_transactions_or_outputs_epoch_change() { ) .await; - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data if fallback_to_transactions { @@ -399,7 +415,7 @@ async fn test_get_new_transactions_or_outputs_max_chunk() { // Create the mock db reader let max_num_output_reductions = 5; let mut db_reader = - mock::create_mock_db_for_subscription(highest_ledger_info.clone(), lowest_version); + mock::create_mock_db_for_optimistic_fetch(highest_ledger_info.clone(), lowest_version); for i in 0..=max_num_output_reductions { utils::expect_get_transaction_outputs( &mut db_reader, @@ -428,9 +444,10 @@ async fn test_get_new_transactions_or_outputs_max_chunk() { ); let (mut mock_client, service, mock_time, _) = MockClient::new(Some(db_reader), Some(storage_config)); + let active_optimistic_fetches = service.get_optimistic_fetches(); tokio::spawn(service.start()); - // Send a request to subscribe to new transaction outputs + // Send a request to optimistically fetch new transaction outputs let response_receiver = get_new_transactions_or_outputs_with_proof( &mut mock_client, peer_version, @@ -440,8 +457,11 @@ async fn test_get_new_transactions_or_outputs_max_chunk() { ) .await; - // Elapse enough time to force the subscription thread to work - utils::wait_for_subscription_service_to_refresh(&mut mock_client, &mock_time).await; + // Wait until the optimistic fetch is active + utils::wait_for_active_optimistic_fetches(active_optimistic_fetches.clone(), 1).await; + + // Elapse enough time to force the optimistic fetch thread to work + utils::wait_for_optimistic_fetch_service_to_refresh(&mut mock_client, &mock_time).await; // Verify a response is received and that it contains the correct data if fallback_to_transactions { diff --git a/state-sync/storage-service/server/src/tests/optimistic_fetch.rs b/state-sync/storage-service/server/src/tests/optimistic_fetch.rs new file mode 100644 index 0000000000000..8e8f8d1fe421e --- /dev/null +++ b/state-sync/storage-service/server/src/tests/optimistic_fetch.rs @@ -0,0 +1,284 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + moderator::RequestModerator, + network::ResponseSender, + optimistic_fetch, + optimistic_fetch::OptimisticFetchRequest, + storage::StorageReader, + tests::{mock, utils}, +}; +use aptos_config::{config::StorageServiceConfig, network_id::PeerNetworkId}; +use aptos_infallible::{Mutex, RwLock}; +use aptos_storage_service_types::{ + requests::{ + DataRequest, NewTransactionOutputsWithProofRequest, + NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, + StorageServiceRequest, + }, + responses::{CompleteDataRange, StorageServerSummary}, +}; +use aptos_time_service::TimeService; +use aptos_types::epoch_change::EpochChangeProof; +use futures::channel::oneshot; +use lru::LruCache; +use rand::{rngs::OsRng, Rng}; +use std::{collections::HashMap, sync::Arc, time::Duration}; + +#[tokio::test] +async fn test_peers_with_ready_optimistic_fetches() { + // Create a mock time service + let time_service = TimeService::mock(); + + // Create two peers and optimistic fetch requests + let peer_network_1 = PeerNetworkId::random(); + let peer_network_2 = PeerNetworkId::random(); + let optimistic_fetch_1 = + create_optimistic_fetch_request(time_service.clone(), Some(1), Some(1)); + let optimistic_fetch_2 = + create_optimistic_fetch_request(time_service.clone(), Some(10), Some(1)); + + // Insert the optimistic fetches into the pending map + let optimistic_fetches = Arc::new(Mutex::new(HashMap::new())); + optimistic_fetches + .lock() + .insert(peer_network_1, optimistic_fetch_1); + optimistic_fetches + .lock() + .insert(peer_network_2, optimistic_fetch_2); + + // Create epoch ending test data + let epoch_ending_ledger_info = utils::create_epoch_ending_ledger_info(1, 5); + let epoch_change_proof = EpochChangeProof { + ledger_info_with_sigs: vec![epoch_ending_ledger_info], + more: false, + }; + + // Create the mock db reader + let mut db_reader = mock::create_mock_db_reader(); + utils::expect_get_epoch_ending_ledger_infos(&mut db_reader, 1, 2, epoch_change_proof); + + // Create the storage reader + let storage_reader = StorageReader::new(StorageServiceConfig::default(), Arc::new(db_reader)); + + // Create test data with an empty storage server summary + let cached_storage_server_summary = Arc::new(RwLock::new(StorageServerSummary::default())); + let lru_response_cache = Arc::new(Mutex::new(LruCache::new(0))); + let request_moderator = Arc::new(RequestModerator::new( + cached_storage_server_summary.clone(), + mock::create_peers_and_metadata(vec![]), + StorageServiceConfig::default(), + time_service.clone(), + )); + + // Verify that there are no peers with ready optimistic fetches + let peers_with_ready_optimistic_fetches = + optimistic_fetch::get_peers_with_ready_optimistic_fetches( + cached_storage_server_summary.clone(), + optimistic_fetches.clone(), + lru_response_cache.clone(), + request_moderator.clone(), + storage_reader.clone(), + time_service.clone(), + ) + .unwrap(); + assert!(peers_with_ready_optimistic_fetches.is_empty()); + + // Update the storage server summary so that there is new data for optimistic fetch 1 + let mut storage_server_summary = StorageServerSummary::default(); + storage_server_summary + .data_summary + .epoch_ending_ledger_infos = Some(CompleteDataRange::new(0, 1).unwrap()); + let synced_ledger_info = utils::create_test_ledger_info_with_sigs(1, 2); + storage_server_summary.data_summary.synced_ledger_info = Some(synced_ledger_info.clone()); + *cached_storage_server_summary.write() = storage_server_summary; + + // Verify that optimistic fetch 1 is ready + let peers_with_ready_optimistic_fetches = + optimistic_fetch::get_peers_with_ready_optimistic_fetches( + cached_storage_server_summary.clone(), + optimistic_fetches.clone(), + lru_response_cache.clone(), + request_moderator.clone(), + storage_reader.clone(), + time_service.clone(), + ) + .unwrap(); + assert_eq!(peers_with_ready_optimistic_fetches, vec![( + peer_network_1, + synced_ledger_info + )]); + + // Manually remove optimistic fetch 1 from the map + optimistic_fetches.lock().remove(&peer_network_1); + + // Update the storage server summary so that there is new data for optimistic fetch 2, + // but the optimistic fetch is invalid because it doesn't respect an epoch boundary. + let mut storage_server_summary = StorageServerSummary::default(); + storage_server_summary + .data_summary + .epoch_ending_ledger_infos = Some(CompleteDataRange::new(0, 2).unwrap()); + let synced_ledger_info = utils::create_test_ledger_info_with_sigs(2, 100); + storage_server_summary.data_summary.synced_ledger_info = Some(synced_ledger_info); + *cached_storage_server_summary.write() = storage_server_summary; + + // Verify that optimistic fetch 2 is not returned because it was invalid + let peers_with_ready_optimistic_fetches = + optimistic_fetch::get_peers_with_ready_optimistic_fetches( + cached_storage_server_summary, + optimistic_fetches, + lru_response_cache, + request_moderator, + storage_reader, + time_service, + ) + .unwrap(); + assert_eq!(peers_with_ready_optimistic_fetches, vec![]); + + // Verify that optimistic fetches no longer contains peer 2 + assert!(peers_with_ready_optimistic_fetches.is_empty()); +} + +#[tokio::test] +async fn test_remove_expired_optimistic_fetches() { + // Create a storage service config + let max_optimistic_fetch_period = 100; + let storage_service_config = StorageServiceConfig { + max_optimistic_fetch_period, + ..Default::default() + }; + + // Create a mock time service + let time_service = TimeService::mock(); + + // Create the first batch of test optimistic fetches + let num_optimistic_fetches_in_batch = 10; + let optimistic_fetches = Arc::new(Mutex::new(HashMap::new())); + for _ in 0..num_optimistic_fetches_in_batch { + let optimistic_fetch = create_optimistic_fetch_request(time_service.clone(), None, None); + optimistic_fetches + .lock() + .insert(PeerNetworkId::random(), optimistic_fetch); + } + + // Verify the number of active optimistic fetches + assert_eq!( + optimistic_fetches.lock().len(), + num_optimistic_fetches_in_batch + ); + + // Elapse a small amount of time (not enough to expire the optimistic fetches) + time_service + .clone() + .into_mock() + .advance_async(Duration::from_millis(max_optimistic_fetch_period / 2)) + .await; + + // Remove the expired optimistic fetches and verify none were removed + optimistic_fetch::remove_expired_optimistic_fetches( + storage_service_config, + optimistic_fetches.clone(), + ); + assert_eq!( + optimistic_fetches.lock().len(), + num_optimistic_fetches_in_batch + ); + + // Create another batch of optimistic fetches + for _ in 0..num_optimistic_fetches_in_batch { + let optimistic_fetch = create_optimistic_fetch_request(time_service.clone(), None, None); + optimistic_fetches + .lock() + .insert(PeerNetworkId::random(), optimistic_fetch); + } + + // Verify the new number of active optimistic fetches + assert_eq!( + optimistic_fetches.lock().len(), + num_optimistic_fetches_in_batch * 2 + ); + + // Elapse enough time to expire the first batch of optimistic fetches + time_service + .clone() + .into_mock() + .advance_async(Duration::from_millis(max_optimistic_fetch_period)) + .await; + + // Remove the expired optimistic fetches and verify the first batch was removed + optimistic_fetch::remove_expired_optimistic_fetches( + storage_service_config, + optimistic_fetches.clone(), + ); + assert_eq!( + optimistic_fetches.lock().len(), + num_optimistic_fetches_in_batch + ); + + // Elapse enough time to expire the second batch of optimistic fetches + time_service + .into_mock() + .advance_async(Duration::from_millis(max_optimistic_fetch_period)) + .await; + + // Remove the expired optimistic fetches and verify the second batch was removed + optimistic_fetch::remove_expired_optimistic_fetches( + storage_service_config, + optimistic_fetches.clone(), + ); + assert!(optimistic_fetches.lock().is_empty()); +} + +/// Creates a random request for optimistic fetch data +fn create_optimistic_fetch_data_request( + known_version: Option, + known_epoch: Option, +) -> DataRequest { + let known_version = known_version.unwrap_or_default(); + let known_epoch = known_epoch.unwrap_or_default(); + + // Generate the random data request + let mut rng = OsRng; + let random_number: u8 = rng.gen(); + match random_number % 3 { + 0 => DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { + known_version, + known_epoch, + include_events: false, + }), + 1 => { + DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { + known_version, + known_epoch, + }) + }, + 2 => DataRequest::GetNewTransactionsOrOutputsWithProof( + NewTransactionsOrOutputsWithProofRequest { + known_version, + known_epoch, + include_events: true, + max_num_output_reductions: 1, + }, + ), + num => panic!("This shouldn't be possible! Got num: {:?}", num), + } +} + +/// Creates a random optimistic fetch request +fn create_optimistic_fetch_request( + time_service: TimeService, + known_version: Option, + known_epoch: Option, +) -> OptimisticFetchRequest { + // Create a storage service request + let data_request = create_optimistic_fetch_data_request(known_version, known_epoch); + let storage_service_request = StorageServiceRequest::new(data_request, true); + + // Create the response sender + let (callback, _) = oneshot::channel(); + let response_sender = ResponseSender::new(callback); + + // Create and return the optimistic fetch request + OptimisticFetchRequest::new(storage_service_request, response_sender, time_service) +} diff --git a/state-sync/storage-service/server/src/tests/subscription.rs b/state-sync/storage-service/server/src/tests/subscription.rs deleted file mode 100644 index 75d040523d8f2..0000000000000 --- a/state-sync/storage-service/server/src/tests/subscription.rs +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - moderator::RequestModerator, - network::ResponseSender, - storage::StorageReader, - subscription, - subscription::DataSubscriptionRequest, - tests::{mock, utils}, -}; -use aptos_config::{config::StorageServiceConfig, network_id::PeerNetworkId}; -use aptos_infallible::{Mutex, RwLock}; -use aptos_network::protocols::wire::handshake::v1::ProtocolId; -use aptos_storage_service_types::{ - requests::{ - DataRequest, NewTransactionOutputsWithProofRequest, - NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, - StorageServiceRequest, - }, - responses::{CompleteDataRange, StorageServerSummary}, -}; -use aptos_time_service::TimeService; -use aptos_types::epoch_change::EpochChangeProof; -use futures::channel::oneshot; -use lru::LruCache; -use rand::{rngs::OsRng, Rng}; -use std::{collections::HashMap, sync::Arc, time::Duration}; - -#[tokio::test] -async fn test_peers_with_ready_subscriptions() { - // Create a mock time service - let time_service = TimeService::mock(); - - // Create two peers and data subscriptions - let peer_network_1 = PeerNetworkId::random(); - let peer_network_2 = PeerNetworkId::random(); - let data_subscription_1 = create_subscription_request(time_service.clone(), Some(1), Some(1)); - let data_subscription_2 = create_subscription_request(time_service.clone(), Some(10), Some(1)); - - // Insert the data subscriptions into the pending map - let data_subscriptions = Arc::new(Mutex::new(HashMap::new())); - data_subscriptions - .lock() - .insert(peer_network_1, data_subscription_1); - data_subscriptions - .lock() - .insert(peer_network_2, data_subscription_2); - - // Create epoch ending test data - let epoch_ending_ledger_info = utils::create_epoch_ending_ledger_info(1, 5); - let epoch_change_proof = EpochChangeProof { - ledger_info_with_sigs: vec![epoch_ending_ledger_info], - more: false, - }; - - // Create the mock db reader - let mut db_reader = mock::create_mock_db_reader(); - utils::expect_get_epoch_ending_ledger_infos(&mut db_reader, 1, 2, epoch_change_proof); - - // Create the storage reader - let storage_reader = StorageReader::new(StorageServiceConfig::default(), Arc::new(db_reader)); - - // Create test data with an empty storage server summary - let cached_storage_server_summary = Arc::new(RwLock::new(StorageServerSummary::default())); - let lru_response_cache = Arc::new(Mutex::new(LruCache::new(0))); - let request_moderator = Arc::new(RequestModerator::new( - cached_storage_server_summary.clone(), - mock::create_peers_and_metadata(vec![]), - StorageServiceConfig::default(), - time_service.clone(), - )); - - // Verify that there are no peers with ready subscriptions - let peers_with_ready_subscriptions = subscription::get_peers_with_ready_subscriptions( - cached_storage_server_summary.clone(), - data_subscriptions.clone(), - lru_response_cache.clone(), - request_moderator.clone(), - storage_reader.clone(), - time_service.clone(), - ) - .unwrap(); - assert!(peers_with_ready_subscriptions.is_empty()); - - // Update the storage server summary so that there is new data for subscription 1 - let mut storage_server_summary = StorageServerSummary::default(); - storage_server_summary - .data_summary - .epoch_ending_ledger_infos = Some(CompleteDataRange::new(0, 1).unwrap()); - let synced_ledger_info = utils::create_test_ledger_info_with_sigs(1, 2); - storage_server_summary.data_summary.synced_ledger_info = Some(synced_ledger_info.clone()); - *cached_storage_server_summary.write() = storage_server_summary; - - // Verify that subscription 1 is ready - let peers_with_ready_subscriptions = subscription::get_peers_with_ready_subscriptions( - cached_storage_server_summary.clone(), - data_subscriptions.clone(), - lru_response_cache.clone(), - request_moderator.clone(), - storage_reader.clone(), - time_service.clone(), - ) - .unwrap(); - assert_eq!(peers_with_ready_subscriptions, vec![( - peer_network_1, - synced_ledger_info - )]); - - // Manually remove subscription 1 from the map - data_subscriptions.lock().remove(&peer_network_1); - - // Update the storage server summary so that there is new data for subscription 2, - // but the subscription is invalid because it doesn't respect an epoch boundary. - let mut storage_server_summary = StorageServerSummary::default(); - storage_server_summary - .data_summary - .epoch_ending_ledger_infos = Some(CompleteDataRange::new(0, 2).unwrap()); - let synced_ledger_info = utils::create_test_ledger_info_with_sigs(2, 100); - storage_server_summary.data_summary.synced_ledger_info = Some(synced_ledger_info); - *cached_storage_server_summary.write() = storage_server_summary; - - // Verify that subscription 2 is not returned because it was invalid - let peers_with_ready_subscriptions = subscription::get_peers_with_ready_subscriptions( - cached_storage_server_summary, - data_subscriptions, - lru_response_cache, - request_moderator, - storage_reader, - time_service, - ) - .unwrap(); - assert_eq!(peers_with_ready_subscriptions, vec![]); - - // Verify that data subscriptions no longer contains peer 2 - assert!(peers_with_ready_subscriptions.is_empty()); -} - -#[tokio::test] -async fn test_remove_expired_subscriptions() { - // Create a storage service config - let max_subscription_period_ms = 100; - let storage_service_config = StorageServiceConfig { - max_subscription_period_ms, - ..Default::default() - }; - - // Create a mock time service - let time_service = TimeService::mock(); - - // Create the first batch of test data subscriptions - let num_subscriptions_in_batch = 10; - let data_subscriptions = Arc::new(Mutex::new(HashMap::new())); - for _ in 0..num_subscriptions_in_batch { - let data_subscription = create_subscription_request(time_service.clone(), None, None); - data_subscriptions - .lock() - .insert(PeerNetworkId::random(), data_subscription); - } - - // Verify the number of active data subscriptions - assert_eq!(data_subscriptions.lock().len(), num_subscriptions_in_batch); - - // Elapse a small amount of time (not enough to expire the subscriptions) - time_service - .clone() - .into_mock() - .advance_async(Duration::from_millis(max_subscription_period_ms / 2)) - .await; - - // Remove the expired subscriptions and verify none were removed - subscription::remove_expired_data_subscriptions( - storage_service_config, - data_subscriptions.clone(), - ); - assert_eq!(data_subscriptions.lock().len(), num_subscriptions_in_batch); - - // Create another batch of data subscriptions - for _ in 0..num_subscriptions_in_batch { - let data_subscription = create_subscription_request(time_service.clone(), None, None); - data_subscriptions - .lock() - .insert(PeerNetworkId::random(), data_subscription); - } - - // Verify the new number of active data subscriptions - assert_eq!( - data_subscriptions.lock().len(), - num_subscriptions_in_batch * 2 - ); - - // Elapse enough time to expire the first batch of subscriptions - time_service - .clone() - .into_mock() - .advance_async(Duration::from_millis(max_subscription_period_ms)) - .await; - - // Remove the expired subscriptions and verify the first batch was removed - subscription::remove_expired_data_subscriptions( - storage_service_config, - data_subscriptions.clone(), - ); - assert_eq!(data_subscriptions.lock().len(), num_subscriptions_in_batch); - - // Elapse enough time to expire the second batch of subscriptions - time_service - .into_mock() - .advance_async(Duration::from_millis(max_subscription_period_ms)) - .await; - - // Remove the expired subscriptions and verify the second batch was removed - subscription::remove_expired_data_subscriptions( - storage_service_config, - data_subscriptions.clone(), - ); - assert!(data_subscriptions.lock().is_empty()); -} - -/// Creates a random data request for subscription data -fn create_subscription_data_request( - known_version: Option, - known_epoch: Option, -) -> DataRequest { - let known_version = known_version.unwrap_or_default(); - let known_epoch = known_epoch.unwrap_or_default(); - - // Generate the random data request - let mut rng = OsRng; - let random_number: u8 = rng.gen(); - match random_number % 3 { - 0 => DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { - known_version, - known_epoch, - include_events: false, - }), - 1 => { - DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { - known_version, - known_epoch, - }) - }, - 2 => DataRequest::GetNewTransactionsOrOutputsWithProof( - NewTransactionsOrOutputsWithProofRequest { - known_version, - known_epoch, - include_events: true, - max_num_output_reductions: 1, - }, - ), - num => panic!("This shouldn't be possible! Got num: {:?}", num), - } -} - -/// Creates a random data subscription request -fn create_subscription_request( - time_service: TimeService, - known_version: Option, - known_epoch: Option, -) -> DataSubscriptionRequest { - // Create a storage service request - let data_request = create_subscription_data_request(known_version, known_epoch); - let storage_service_request = StorageServiceRequest::new(data_request, true); - - // Create the response sender - let (callback, _) = oneshot::channel(); - let response_sender = ResponseSender::new(callback); - - // Create and return the data subscription request - DataSubscriptionRequest::new( - ProtocolId::StorageServiceRpc, - storage_service_request, - response_sender, - time_service, - ) -} diff --git a/state-sync/storage-service/server/src/tests/utils.rs b/state-sync/storage-service/server/src/tests/utils.rs index 644ea127d8036..95fdb560fb335 100644 --- a/state-sync/storage-service/server/src/tests/utils.rs +++ b/state-sync/storage-service/server/src/tests/utils.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ + optimistic_fetch::OptimisticFetchRequest, storage::StorageReader, tests::mock::{MockClient, MockDatabaseReader}, StorageServiceServer, @@ -11,6 +12,7 @@ use aptos_config::{ network_id::{NetworkId, PeerNetworkId}, }; use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, SigningKey, Uniform}; +use aptos_infallible::Mutex; use aptos_storage_service_types::{ requests::{ DataRequest, StateValuesWithProofRequest, StorageServiceRequest, @@ -39,6 +41,7 @@ use aptos_types::{ }; use mockall::predicate::eq; use rand::Rng; +use std::{collections::HashMap, sync::Arc, time::Duration}; /// Advances the given timer by the amount of time it takes to refresh storage pub async fn advance_storage_refresh_time(mock_time: &MockTimeService) { @@ -418,14 +421,30 @@ pub async fn wait_for_storage_to_refresh( } } -/// Advances enough time that the subscription service is able to refresh -pub async fn wait_for_subscription_service_to_refresh( +/// Advances enough time that the optimistic fetch service is able to refresh +pub async fn wait_for_optimistic_fetch_service_to_refresh( mock_client: &mut MockClient, mock_time: &MockTimeService, ) { // Elapse enough time to force storage to be updated wait_for_storage_to_refresh(mock_client, mock_time).await; - // Elapse enough time to force the subscription thread to work + // Elapse enough time to force the optimistic fetch thread to work advance_storage_refresh_time(mock_time).await; } + +/// Waits for the specified number of optimistic fetches to be active +pub async fn wait_for_active_optimistic_fetches( + active_optimistic_fetches: Arc>>, + expected_num_active_fetches: usize, +) { + loop { + let num_active_fetches = active_optimistic_fetches.lock().len(); + if num_active_fetches == expected_num_active_fetches { + return; // We found the expected number of active fetches + } + + // Sleep for a while + tokio::time::sleep(Duration::from_millis(100)).await; + } +} diff --git a/state-sync/storage-service/types/src/requests.rs b/state-sync/storage-service/types/src/requests.rs index 6101eca22cacb..e89c219a7d998 100644 --- a/state-sync/storage-service/types/src/requests.rs +++ b/state-sync/storage-service/types/src/requests.rs @@ -34,15 +34,15 @@ impl StorageServiceRequest { #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum DataRequest { GetEpochEndingLedgerInfos(EpochEndingLedgerInfoRequest), // Fetches a list of epoch ending ledger infos - GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest), // Subscribes to new transaction outputs - GetNewTransactionsWithProof(NewTransactionsWithProofRequest), // Subscribes to new transactions with a proof + GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest), // Optimistically fetches new transaction outputs + GetNewTransactionsWithProof(NewTransactionsWithProofRequest), // Optimistically fetches new transactions GetNumberOfStatesAtVersion(Version), // Fetches the number of states at the specified version GetServerProtocolVersion, // Fetches the protocol version run by the server GetStateValuesWithProof(StateValuesWithProofRequest), // Fetches a list of states with a proof GetStorageServerSummary, // Fetches a summary of the storage server state GetTransactionOutputsWithProof(TransactionOutputsWithProofRequest), // Fetches a list of transaction outputs with a proof GetTransactionsWithProof(TransactionsWithProofRequest), // Fetches a list of transactions with a proof - GetNewTransactionsOrOutputsWithProof(NewTransactionsOrOutputsWithProofRequest), // Subscribes to new transactions or outputs with a proof + GetNewTransactionsOrOutputsWithProof(NewTransactionsOrOutputsWithProofRequest), // Optimistically fetches new transactions or outputs GetTransactionsOrOutputsWithProof(TransactionsOrOutputsWithProofRequest), // Fetches a list of transactions or outputs with a proof } @@ -70,7 +70,7 @@ impl DataRequest { matches!(self, &Self::GetStorageServerSummary) } - pub fn is_data_subscription_request(&self) -> bool { + pub fn is_optimistic_fetch(&self) -> bool { matches!(self, &Self::GetNewTransactionOutputsWithProof(_)) || matches!(self, &Self::GetNewTransactionsWithProof(_)) || matches!(self, Self::GetNewTransactionsOrOutputsWithProof(_)) diff --git a/storage/aptosdb/src/aptosdb_test.rs b/storage/aptosdb/src/aptosdb_test.rs index 5f345523505c9..02fc5617c7980 100644 --- a/storage/aptosdb/src/aptosdb_test.rs +++ b/storage/aptosdb/src/aptosdb_test.rs @@ -122,8 +122,9 @@ fn test_error_if_version_pruned() { db.state_store .state_db .state_merkle_pruner - .testonly_update_min_version(5); - db.ledger_pruner.testonly_update_min_version(10); + .save_min_readable_version(5) + .unwrap(); + db.ledger_pruner.save_min_readable_version(10).unwrap(); assert_eq!( db.error_if_state_merkle_pruned("State", 4) .unwrap_err() @@ -252,15 +253,11 @@ pub fn test_state_merkle_pruning_impl( // Prune till the oldest snapshot readable. let pruner = &db.state_store.state_db.state_merkle_pruner; let epoch_snapshot_pruner = &db.state_store.state_db.epoch_snapshot_pruner; - pruner - .pruner_worker - .set_target_db_version(*snapshots.first().unwrap()); - epoch_snapshot_pruner - .pruner_worker - .set_target_db_version(std::cmp::min( - *snapshots.first().unwrap(), - *epoch_snapshots.first().unwrap_or(&Version::MAX), - )); + pruner.set_worker_target_version(*snapshots.first().unwrap()); + epoch_snapshot_pruner.set_worker_target_version(std::cmp::min( + *snapshots.first().unwrap(), + *epoch_snapshots.first().unwrap_or(&Version::MAX), + )); pruner.wait_for_pruner().unwrap(); epoch_snapshot_pruner.wait_for_pruner().unwrap(); diff --git a/storage/aptosdb/src/backup/restore_utils.rs b/storage/aptosdb/src/backup/restore_utils.rs index 17b8164586284..8c742d376059e 100644 --- a/storage/aptosdb/src/backup/restore_utils.rs +++ b/storage/aptosdb/src/backup/restore_utils.rs @@ -1,13 +1,12 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::state_store::StateStore; -///! This file contains utilities that are helpful for performing -///! database restore operations, as required by restore and -///! state sync v2. +//! This file contains utilities that are helpful for performing +//! database restore operations, as required by restore and +//! state sync v2. use crate::{ event_store::EventStore, ledger_store::LedgerStore, new_sharded_kv_schema_batch, - schema::transaction_accumulator::TransactionAccumulatorSchema, + schema::transaction_accumulator::TransactionAccumulatorSchema, state_store::StateStore, transaction_store::TransactionStore, ShardedStateKvSchemaBatch, }; use anyhow::{ensure, Result}; @@ -223,10 +222,11 @@ pub(crate) fn save_transactions_impl( state_kv_batches: &mut ShardedStateKvSchemaBatch, kv_replay: bool, ) -> Result<()> { + // TODO(grao): Support splited ledger db here. for (idx, txn) in txns.iter().enumerate() { transaction_store.put_transaction(first_version + idx as Version, txn, batch)?; } - ledger_store.put_transaction_infos(first_version, txn_infos, batch)?; + ledger_store.put_transaction_infos(first_version, txn_infos, batch, batch)?; event_store.put_events_multiple_versions(first_version, events, batch)?; // insert changes in write set schema batch for (idx, ws) in write_sets.iter().enumerate() { diff --git a/storage/aptosdb/src/db_debugger/checkpoint/mod.rs b/storage/aptosdb/src/db_debugger/checkpoint/mod.rs index e71618e31e3ad..d9cb4b03773b4 100644 --- a/storage/aptosdb/src/db_debugger/checkpoint/mod.rs +++ b/storage/aptosdb/src/db_debugger/checkpoint/mod.rs @@ -21,7 +21,7 @@ impl Cmd { ensure!(!self.output_dir.exists(), "Output dir already exists."); fs::create_dir_all(&self.output_dir)?; - // TODO(grao): Support sharded state merkle db here. - AptosDB::create_checkpoint(self.db_dir, self.output_dir, false) + // TODO(grao): Support sharded state merkle db and split_ledger_db here. + AptosDB::create_checkpoint(self.db_dir, self.output_dir, false, false) } } diff --git a/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs b/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs index 10a8d094616e3..f3384d70f6c3b 100644 --- a/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs +++ b/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs @@ -33,13 +33,13 @@ pub struct Cmd { db_dir: PathBuf, #[clap(long)] - use_state_kv_db: bool, + split_ledger_db: bool, } impl Cmd { pub fn run(self) -> Result<()> { let rocksdb_config = RocksdbConfigs { - use_state_kv_db: self.use_state_kv_db, + split_ledger_db: self.split_ledger_db, ..Default::default() }; let (ledger_db, state_merkle_db, state_kv_db) = AptosDB::open_dbs( diff --git a/storage/aptosdb/src/db_debugger/truncate/mod.rs b/storage/aptosdb/src/db_debugger/truncate/mod.rs index ce9c0f309113d..04c953abfae6d 100644 --- a/storage/aptosdb/src/db_debugger/truncate/mod.rs +++ b/storage/aptosdb/src/db_debugger/truncate/mod.rs @@ -47,7 +47,7 @@ pub struct Cmd { opt_out_backup_checkpoint: bool, #[clap(long)] - use_state_kv_db: bool, + split_ledger_db: bool, } impl Cmd { @@ -61,14 +61,19 @@ impl Cmd { println!("Creating backup at: {:?}", &backup_checkpoint_dir); fs::create_dir_all(&backup_checkpoint_dir)?; // TODO(grao): Support sharded state merkle db here. - AptosDB::create_checkpoint(&self.db_dir, backup_checkpoint_dir, false)?; + AptosDB::create_checkpoint( + &self.db_dir, + backup_checkpoint_dir, + self.split_ledger_db, + false, + )?; println!("Done!"); } else { println!("Opted out backup creation!."); } let rocksdb_config = RocksdbConfigs { - use_state_kv_db: self.use_state_kv_db, + split_ledger_db: self.split_ledger_db, ..Default::default() }; let (ledger_db, state_merkle_db, state_kv_db) = AptosDB::open_dbs( @@ -242,7 +247,7 @@ mod test { ledger_db_batch_size: 15, opt_out_backup_checkpoint: true, backup_checkpoint_dir: None, - use_state_kv_db: false, + split_ledger_db: false, }; cmd.run().unwrap(); diff --git a/storage/aptosdb/src/fake_aptosdb.rs b/storage/aptosdb/src/fake_aptosdb.rs index 73ec96dc624f5..9001ebfcf5329 100644 --- a/storage/aptosdb/src/fake_aptosdb.rs +++ b/storage/aptosdb/src/fake_aptosdb.rs @@ -104,7 +104,7 @@ impl FakeBufferedState { ensure!( new_state_after_checkpoint.base_version >= self.state_after_checkpoint.base_version ); - if let Some(updates_until_next_checkpoint_since_current) = + if let Some(_updates_until_next_checkpoint_since_current) = updates_until_next_checkpoint_since_current_option { self.state_after_checkpoint.current = new_state_after_checkpoint.base.clone(); diff --git a/storage/aptosdb/src/ledger_db.rs b/storage/aptosdb/src/ledger_db.rs index f3719a23fd7cc..a2c46fcf67486 100644 --- a/storage/aptosdb/src/ledger_db.rs +++ b/storage/aptosdb/src/ledger_db.rs @@ -4,18 +4,22 @@ #![forbid(unsafe_code)] #![allow(dead_code)] -use crate::db_options::{ - event_db_column_families, gen_event_cfds, gen_ledger_cfds, gen_ledger_metadata_cfds, - gen_transaction_accumulator_cfds, gen_transaction_cfds, gen_transaction_info_cfds, - gen_write_set_cfds, ledger_db_column_families, ledger_metadata_db_column_families, - transaction_accumulator_db_column_families, transaction_db_column_families, - transaction_info_db_column_families, write_set_db_column_families, +use crate::{ + db_options::{ + event_db_column_families, gen_event_cfds, gen_ledger_cfds, gen_ledger_metadata_cfds, + gen_transaction_accumulator_cfds, gen_transaction_cfds, gen_transaction_info_cfds, + gen_write_set_cfds, ledger_db_column_families, ledger_metadata_db_column_families, + transaction_accumulator_db_column_families, transaction_db_column_families, + transaction_info_db_column_families, write_set_db_column_families, + }, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, }; use anyhow::Result; use aptos_config::config::{RocksdbConfig, RocksdbConfigs}; use aptos_logger::prelude::info; use aptos_rocksdb_options::gen_rocksdb_options; use aptos_schemadb::{ColumnFamilyDescriptor, ColumnFamilyName, DB}; +use aptos_types::transaction::Version; use std::{ path::{Path, PathBuf}, sync::Arc, @@ -129,11 +133,60 @@ impl LedgerDb { } pub(crate) fn create_checkpoint( - _db_root_path: impl AsRef, - _cp_root_path: impl AsRef, + db_root_path: impl AsRef, + cp_root_path: impl AsRef, + split_ledger_db: bool, ) -> Result<()> { - // TODO(grao): Implement this function. - todo!() + let rocksdb_configs = RocksdbConfigs { + split_ledger_db, + ..Default::default() + }; + let ledger_db = Self::new(db_root_path, rocksdb_configs, /*readonly=*/ false)?; + let cp_ledger_db_folder = cp_root_path.as_ref().join(LEDGER_DB_FOLDER_NAME); + + info!( + split_ledger_db = split_ledger_db, + "Creating ledger_db checkpoint at: {cp_ledger_db_folder:?}" + ); + + std::fs::remove_dir_all(&cp_ledger_db_folder).unwrap_or(()); + if split_ledger_db { + std::fs::create_dir_all(&cp_ledger_db_folder).unwrap_or(()); + } + + ledger_db + .metadata_db() + .create_checkpoint(Self::metadata_db_path( + cp_root_path.as_ref(), + split_ledger_db, + ))?; + + if split_ledger_db { + ledger_db + .event_db() + .create_checkpoint(cp_ledger_db_folder.join(EVENT_DB_NAME))?; + ledger_db + .transaction_accumulator_db() + .create_checkpoint(cp_ledger_db_folder.join(TRANSACTION_ACCUMULATOR_DB_NAME))?; + ledger_db + .transaction_db() + .create_checkpoint(cp_ledger_db_folder.join(TRANSACTION_DB_NAME))?; + ledger_db + .transaction_info_db() + .create_checkpoint(cp_ledger_db_folder.join(TRANSACTION_INFO_DB_NAME))?; + ledger_db + .write_set_db() + .create_checkpoint(cp_ledger_db_folder.join(WRITE_SET_DB_NAME))?; + } + + Ok(()) + } + + pub(crate) fn write_pruner_progress(&self, version: Version) -> Result<()> { + self.ledger_metadata_db.put::( + &DbMetadataKey::LedgerPrunerProgress, + &DbMetadataValue::Version(version), + ) } pub fn metadata_db(&self) -> &DB { @@ -156,18 +209,34 @@ impl LedgerDb { &self.transaction_accumulator_db } + pub(crate) fn transaction_accumulator_db_arc(&self) -> Arc { + Arc::clone(&self.transaction_accumulator_db) + } + pub(crate) fn transaction_db(&self) -> &DB { &self.transaction_db } + pub(crate) fn transaction_db_arc(&self) -> Arc { + Arc::clone(&self.transaction_db) + } + pub(crate) fn transaction_info_db(&self) -> &DB { &self.transaction_info_db } + pub(crate) fn transaction_info_db_arc(&self) -> Arc { + Arc::clone(&self.transaction_info_db) + } + pub(crate) fn write_set_db(&self) -> &DB { &self.write_set_db } + pub(crate) fn write_set_db_arc(&self) -> Arc { + Arc::clone(&self.write_set_db) + } + fn open_rocksdb( path: PathBuf, name: &str, diff --git a/storage/aptosdb/src/ledger_store/mod.rs b/storage/aptosdb/src/ledger_store/mod.rs index a413843c61227..2df2accce408b 100644 --- a/storage/aptosdb/src/ledger_store/mod.rs +++ b/storage/aptosdb/src/ledger_store/mod.rs @@ -29,11 +29,11 @@ use aptos_types::{ definition::LeafCount, position::Position, AccumulatorConsistencyProof, TransactionAccumulatorProof, TransactionAccumulatorRangeProof, TransactionInfoWithProof, }, - transaction::{TransactionInfo, Version}, + transaction::{TransactionInfo, TransactionToCommit, Version}, }; use arc_swap::ArcSwap; use itertools::Itertools; -use std::{ops::Deref, sync::Arc}; +use std::{borrow::Borrow, ops::Deref, sync::Arc}; #[derive(Debug)] pub struct LedgerStore { @@ -288,13 +288,16 @@ impl LedgerStore { &self, first_version: u64, txn_infos: &[TransactionInfo], - batch: &SchemaBatch, + // TODO(grao): Consider remove this function and migrate all callers to use the two functions + // below. + transaction_info_batch: &SchemaBatch, + transaction_accumulator_batch: &SchemaBatch, ) -> Result { // write txn_info (first_version..first_version + txn_infos.len() as u64) .zip_eq(txn_infos.iter()) .try_for_each(|(version, txn_info)| { - batch.put::(&version, txn_info) + transaction_info_batch.put::(&version, txn_info) })?; // write hash of txn_info into the accumulator @@ -304,12 +307,44 @@ impl LedgerStore { first_version, /* num_existing_leaves */ &txn_hashes, )?; - writes + writes.iter().try_for_each(|(pos, hash)| { + transaction_accumulator_batch.put::(pos, hash) + })?; + Ok(root_hash) + } + + pub fn put_transaction_accumulator( + &self, + first_version: Version, + txns_to_commit: &[impl Borrow], + transaction_accumulator_batch: &SchemaBatch, + ) -> Result { + let txn_hashes: Vec<_> = txns_to_commit .iter() - .try_for_each(|(pos, hash)| batch.put::(pos, hash))?; + .map(|t| t.borrow().transaction_info().hash()) + .collect(); + + let (root_hash, writes) = Accumulator::append( + self, + first_version, /* num_existing_leaves */ + &txn_hashes, + )?; + writes.iter().try_for_each(|(pos, hash)| { + transaction_accumulator_batch.put::(pos, hash) + })?; + Ok(root_hash) } + pub fn put_transaction_info( + &self, + version: Version, + transaction_info: &TransactionInfo, + transaction_info_batch: &SchemaBatch, + ) -> Result<()> { + transaction_info_batch.put::(&version, transaction_info) + } + /// Write `ledger_info_with_sigs` to `batch`. pub fn put_ledger_info( &self, diff --git a/storage/aptosdb/src/ledger_store/transaction_info_test.rs b/storage/aptosdb/src/ledger_store/transaction_info_test.rs index 92c53a6070b4e..dd2f31875b26f 100644 --- a/storage/aptosdb/src/ledger_store/transaction_info_test.rs +++ b/storage/aptosdb/src/ledger_store/transaction_info_test.rs @@ -37,15 +37,27 @@ fn verify( } fn save(store: &LedgerStore, first_version: Version, txn_infos: &[TransactionInfo]) -> HashValue { - let batch = SchemaBatch::new(); + let transaction_info_batch = SchemaBatch::new(); + let transaction_accumulator_batch = SchemaBatch::new(); let root_hash = store - .put_transaction_infos(first_version, txn_infos, &batch) + .put_transaction_infos( + first_version, + txn_infos, + &transaction_info_batch, + &transaction_accumulator_batch, + ) .unwrap(); store .ledger_db .transaction_info_db() - .write_schemas(batch) + .write_schemas(transaction_info_batch) .unwrap(); + store + .ledger_db + .transaction_accumulator_db() + .write_schemas(transaction_accumulator_batch) + .unwrap(); + root_hash } diff --git a/storage/aptosdb/src/lib.rs b/storage/aptosdb/src/lib.rs index e763678e080c7..786cc4099a704 100644 --- a/storage/aptosdb/src/lib.rs +++ b/storage/aptosdb/src/lib.rs @@ -54,10 +54,9 @@ use crate::{ OTHER_TIMERS_SECONDS, ROCKSDB_PROPERTIES, }, pruner::{ - db_pruner::DBPruner, ledger_pruner_manager::LedgerPrunerManager, - pruner_manager::PrunerManager, pruner_utils, state_kv_pruner::StateKvPruner, + ledger_pruner_manager::LedgerPrunerManager, pruner_manager::PrunerManager, pruner_utils, state_kv_pruner_manager::StateKvPrunerManager, - state_merkle_pruner_manager::StateMerklePrunerManager, state_store::StateMerklePruner, + state_merkle_pruner_manager::StateMerklePrunerManager, }, schema::*, stale_node_index::StaleNodeIndexSchema, @@ -75,7 +74,7 @@ use aptos_config::config::{ use aptos_config::config::{ BUFFERED_STATE_TARGET_ITEMS, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, }; -use aptos_crypto::hash::HashValue; +use aptos_crypto::HashValue; use aptos_db_indexer::Indexer; use aptos_infallible::Mutex; use aptos_logger::prelude::*; @@ -116,7 +115,6 @@ use aptos_types::{ }; use aptos_vm::data_cache::AsMoveResolver; use arr_macro::arr; -use itertools::zip_eq; use move_resource_viewer::MoveValueAnnotator; use once_cell::sync::Lazy; use rayon::prelude::*; @@ -694,27 +692,21 @@ impl AptosDB { pub fn create_checkpoint( db_path: impl AsRef, cp_path: impl AsRef, + use_split_ledger_db: bool, use_sharded_state_merkle_db: bool, ) -> Result<()> { let start = Instant::now(); - let ledger_db_path = db_path.as_ref().join(LEDGER_DB_NAME); - let ledger_cp_path = cp_path.as_ref().join(LEDGER_DB_NAME); - info!("Creating ledger_db checkpoint at: {ledger_cp_path:?}"); - - std::fs::remove_dir_all(&ledger_cp_path).unwrap_or(()); - - // Weird enough, checkpoint doesn't work with readonly or secondary mode (gets stuck). - // https://github.com/facebook/rocksdb/issues/11167 - let ledger_db = aptos_schemadb::DB::open( - ledger_db_path, - LEDGER_DB_NAME, - ledger_db_column_families(), - &aptos_schemadb::Options::default(), - )?; - ledger_db.create_checkpoint(ledger_cp_path)?; + info!( + use_split_ledger_db = use_split_ledger_db, + use_sharded_state_merkle_db = use_sharded_state_merkle_db, + "Creating checkpoint for AptosDB." + ); - StateKvDb::create_checkpoint(db_path.as_ref(), cp_path.as_ref())?; + LedgerDb::create_checkpoint(db_path.as_ref(), cp_path.as_ref(), use_split_ledger_db)?; + if use_split_ledger_db { + StateKvDb::create_checkpoint(db_path.as_ref(), cp_path.as_ref())?; + } StateMerkleDb::create_checkpoint( db_path.as_ref(), cp_path.as_ref(), @@ -797,127 +789,6 @@ impl AptosDB { Ok(events_with_version) } - fn save_ledger_info( - &self, - new_root_hash: HashValue, - ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, - ledger_batch: &SchemaBatch, - ) -> Result<()> { - // If expected ledger info is provided, verify result root hash and save the ledger info. - if let Some(x) = ledger_info_with_sigs { - let expected_root_hash = x.ledger_info().transaction_accumulator_hash(); - ensure!( - new_root_hash == expected_root_hash, - "Root hash calculated doesn't match expected. {:?} vs {:?}", - new_root_hash, - expected_root_hash, - ); - let current_epoch = self - .ledger_store - .get_latest_ledger_info_option() - .map_or(0, |li| li.ledger_info().next_block_epoch()); - ensure!( - x.ledger_info().epoch() == current_epoch, - "Gap in epoch history. Trying to put in LedgerInfo in epoch: {}, current epoch: {}", - x.ledger_info().epoch(), - current_epoch, - ); - - self.ledger_store.put_ledger_info(x, ledger_batch)?; - } - Ok(()) - } - - fn save_transactions_impl( - &self, - txns_to_commit: &[impl Borrow + Sync], - first_version: u64, - expected_state_db_usage: StateStorageUsage, - sharded_state_cache: Option<&ShardedStateCache>, - ) -> Result<(SchemaBatch, ShardedStateKvSchemaBatch, HashValue)> { - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_impl"]) - .start_timer(); - - let ledger_batch = SchemaBatch::new(); - let sharded_state_kv_batches = new_sharded_kv_schema_batch(); - - let last_version = first_version + txns_to_commit.len() as u64 - 1; - - let new_root_hash = thread::scope(|s| { - let t0 = s.spawn(|| { - // Account state updates. - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_state"]) - .start_timer(); - - let state_updates_vec = txns_to_commit - .iter() - .map(|txn_to_commit| txn_to_commit.borrow().state_updates()) - .collect::>(); - - // TODO(grao): Make state_store take sharded state updates. - self.state_store.put_value_sets( - state_updates_vec, - first_version, - expected_state_db_usage, - sharded_state_cache, - &ledger_batch, - &sharded_state_kv_batches, - ) - }); - - let t1 = s.spawn(|| { - // Event updates. Gather event accumulator root hashes. - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_events"]) - .start_timer(); - zip_eq(first_version..=last_version, txns_to_commit) - .map(|(ver, txn_to_commit)| { - self.event_store.put_events( - ver, - txn_to_commit.borrow().events(), - &ledger_batch, - ) - }) - .collect::>>() - }); - - let t2 = s.spawn(|| { - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_txn_infos"]) - .start_timer(); - zip_eq(first_version..=last_version, txns_to_commit).try_for_each( - |(ver, txn_to_commit)| { - // Transaction updates. Gather transaction hashes. - self.transaction_store.put_transaction( - ver, - txn_to_commit.borrow().transaction(), - &ledger_batch, - )?; - self.transaction_store.put_write_set( - ver, - txn_to_commit.borrow().write_set(), - &ledger_batch, - ) - }, - )?; - // Transaction accumulator updates. Get result root hash. - let txn_infos: Vec<_> = txns_to_commit - .iter() - .map(|t| t.borrow().transaction_info()) - .cloned() - .collect(); - self.ledger_store - .put_transaction_infos(first_version, &txn_infos, &ledger_batch) - }); - t0.join().unwrap()?; - t1.join().unwrap()?; - t2.join().unwrap() - }); - Ok((ledger_batch, sharded_state_kv_batches, new_root_hash?)) - } - fn get_table_info_option(&self, handle: TableHandle) -> Result> { match &self.indexer { Some(indexer) => indexer.get_table_info(handle), @@ -935,6 +806,9 @@ impl AptosDB { ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, latest_in_memory_state: &StateDelta, ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["save_transactions_validation"]) + .start_timer(); let buffered_state = self.state_store.buffered_state().lock(); ensure!( base_state_version == buffered_state.current_state().base_version, @@ -993,50 +867,278 @@ impl AptosDB { Ok(()) } - fn commit_ledger_and_state_kv_db( + fn calculate_and_commit_ledger_and_state_kv( &self, - last_version: Version, - ledger_batch: SchemaBatch, - sharded_state_kv_batches: ShardedStateKvSchemaBatch, - new_root_hash: HashValue, - ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, + txns_to_commit: &[impl Borrow + Sync], + first_version: Version, + expected_state_db_usage: StateStorageUsage, + sharded_state_cache: Option<&ShardedStateCache>, + ) -> Result { + let new_root_hash = thread::scope(|s| { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["save_transactions__work"]) + .start_timer(); + // TODO(grao): Write progress for each of the following databases, and handle the + // inconsistency at the startup time. + let t0 = s.spawn(|| self.commit_events(txns_to_commit, first_version)); + let t1 = s.spawn(|| self.commit_write_sets(txns_to_commit, first_version)); + let t2 = s.spawn(|| self.commit_transactions(txns_to_commit, first_version)); + let t3 = s.spawn(|| { + self.commit_state_kv_and_ledger_metadata( + txns_to_commit, + first_version, + expected_state_db_usage, + sharded_state_cache, + ) + }); + let t4 = s.spawn(|| self.commit_transaction_infos(txns_to_commit, first_version)); + let t5 = s.spawn(|| self.commit_transaction_accumulator(txns_to_commit, first_version)); + // TODO(grao): Consider propagating the error instead of panic, if necessary. + t0.join().unwrap()?; + t1.join().unwrap()?; + t2.join().unwrap()?; + t3.join().unwrap()?; + t4.join().unwrap()?; + t5.join().unwrap() + })?; + + Ok(new_root_hash) + } + + fn commit_state_kv_and_ledger_metadata( + &self, + txns_to_commit: &[impl Borrow + Sync], + first_version: Version, + expected_state_db_usage: StateStorageUsage, + sharded_state_cache: Option<&ShardedStateCache>, ) -> Result<()> { - // Commit multiple batches for different DBs in parallel, then write the overall - // progress. let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_commit"]) + .with_label_values(&["commit_state_kv_and_ledger_metadata"]) .start_timer(); - ledger_batch.put::( - &DbMetadataKey::LedgerCommitProgress, - &DbMetadataValue::Version(last_version), + let state_updates_vec = txns_to_commit + .iter() + .map(|txn_to_commit| txn_to_commit.borrow().state_updates()) + .collect::>(); + + let ledger_metadata_batch = SchemaBatch::new(); + let sharded_state_kv_batches = new_sharded_kv_schema_batch(); + + // TODO(grao): Make state_store take sharded state updates. + self.state_store.put_value_sets( + state_updates_vec, + first_version, + expected_state_db_usage, + sharded_state_cache, + &ledger_metadata_batch, + &sharded_state_kv_batches, )?; - COMMIT_POOL.scope(|s| { - // TODO(grao): Consider propagating the error instead of panic, if necessary. - s.spawn(|_| { - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_commit___state_kv_commit"]) - .start_timer(); + let last_version = first_version + txns_to_commit.len() as u64 - 1; + ledger_metadata_batch + .put::( + &DbMetadataKey::LedgerCommitProgress, + &DbMetadataValue::Version(last_version), + ) + .unwrap(); + + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_state_kv_and_ledger_metadata___commit"]) + .start_timer(); + thread::scope(|s| { + s.spawn(|| { + self.ledger_db + .metadata_db() + .write_schemas(ledger_metadata_batch) + .unwrap(); + }); + s.spawn(|| { self.state_kv_db .commit(last_version, sharded_state_kv_batches) .unwrap(); }); - // To the best of our current understanding, these tasks are scheduled in - // LIFO order, so put the ledger commit at the end since it's slower. - s.spawn(|_| { + }); + + Ok(()) + } + + fn commit_events( + &self, + txns_to_commit: &[impl Borrow + Sync], + first_version: Version, + ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_events"]) + .start_timer(); + let batch = SchemaBatch::new(); + txns_to_commit + .par_iter() + .with_min_len(128) + .enumerate() + .try_for_each(|(i, txn_to_commit)| -> Result<()> { + self.event_store.put_events( + first_version + i as u64, + txn_to_commit.borrow().events(), + &batch, + )?; + + Ok(()) + })?; + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_events___commit"]) + .start_timer(); + self.ledger_db.event_db().write_schemas(batch) + } + + fn commit_transactions( + &self, + txns_to_commit: &[impl Borrow + Sync], + first_version: Version, + ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_transactions"]) + .start_timer(); + let chunk_size = 512; + txns_to_commit + .par_chunks(chunk_size) + .enumerate() + .try_for_each(|(chunk_index, txns_in_chunk)| -> Result<()> { + let batch = SchemaBatch::new(); + let chunk_first_version = first_version + (chunk_size * chunk_index) as u64; + txns_in_chunk.iter().enumerate().try_for_each( + |(i, txn_to_commit)| -> Result<()> { + self.transaction_store.put_transaction( + chunk_first_version + i as u64, + txn_to_commit.borrow().transaction(), + &batch, + )?; + + Ok(()) + }, + )?; let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["save_transactions_commit___ledger_commit"]) + .with_label_values(&["commit_transactions___commit"]) .start_timer(); - // TODO(grao): Support splitted ledger DBs here. - self.ledger_db - .metadata_db() - .write_schemas(ledger_batch) - .unwrap(); - }); - }); + self.ledger_db.transaction_db().write_schemas(batch) + }) + } + + fn commit_transaction_accumulator( + &self, + txns_to_commit: &[impl Borrow + Sync], + first_version: u64, + ) -> Result { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_transaction_accumulator"]) + .start_timer(); + + let batch = SchemaBatch::new(); + let root_hash = + self.ledger_store + .put_transaction_accumulator(first_version, txns_to_commit, &batch)?; + + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_transaction_accumulator___commit"]) + .start_timer(); + self.ledger_db + .transaction_accumulator_db() + .write_schemas(batch)?; + + Ok(root_hash) + } + + fn commit_transaction_infos( + &self, + txns_to_commit: &[impl Borrow + Sync], + first_version: u64, + ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_transaction_infos"]) + .start_timer(); + let batch = SchemaBatch::new(); + txns_to_commit + .par_iter() + .with_min_len(128) + .enumerate() + .try_for_each(|(i, txn_to_commit)| -> Result<()> { + let version = first_version + i as u64; + self.ledger_store.put_transaction_info( + version, + txn_to_commit.borrow().transaction_info(), + &batch, + )?; + + Ok(()) + })?; + + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_transaction_infos___commit"]) + .start_timer(); + self.ledger_db.transaction_info_db().write_schemas(batch) + } + + fn commit_write_sets( + &self, + txns_to_commit: &[impl Borrow + Sync], + first_version: Version, + ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_write_sets"]) + .start_timer(); + let batch = SchemaBatch::new(); + txns_to_commit + .par_iter() + .with_min_len(128) + .enumerate() + .try_for_each(|(i, txn_to_commit)| -> Result<()> { + self.transaction_store.put_write_set( + first_version + i as u64, + txn_to_commit.borrow().write_set(), + &batch, + )?; + + Ok(()) + })?; + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_write_sets___commit"]) + .start_timer(); + self.ledger_db.write_set_db().write_schemas(batch) + } + + fn commit_ledger_info( + &self, + last_version: Version, + new_root_hash: HashValue, + ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, + ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["commit_ledger_info"]) + .start_timer(); let ledger_batch = SchemaBatch::new(); - self.save_ledger_info(new_root_hash, ledger_info_with_sigs, &ledger_batch)?; + + // If expected ledger info is provided, verify result root hash and save the ledger info. + if let Some(x) = ledger_info_with_sigs { + let expected_root_hash = x.ledger_info().transaction_accumulator_hash(); + ensure!( + new_root_hash == expected_root_hash, + "Root hash calculated doesn't match expected. {:?} vs {:?}", + new_root_hash, + expected_root_hash, + ); + let current_epoch = self + .ledger_store + .get_latest_ledger_info_option() + .map_or(0, |li| li.ledger_info().next_block_epoch()); + ensure!( + x.ledger_info().epoch() == current_epoch, + "Gap in epoch history. Trying to put in LedgerInfo in epoch: {}, current epoch: {}", + x.ledger_info().epoch(), + current_epoch, + ); + + self.ledger_store.put_ledger_info(x, &ledger_batch)?; + } + ledger_batch.put::( &DbMetadataKey::OverallCommitProgress, &DbMetadataValue::Version(last_version), @@ -1912,24 +2014,18 @@ impl DbWriter for AptosDB { &latest_in_memory_state, )?; - let (ledger_batch, sharded_state_kv_batches, new_root_hash) = self - .save_transactions_impl( - txns_to_commit, - first_version, - latest_in_memory_state.current.usage(), - None, - )?; + let new_root_hash = self.calculate_and_commit_ledger_and_state_kv( + txns_to_commit, + first_version, + latest_in_memory_state.current.usage(), + None, + )?; { let mut buffered_state = self.state_store.buffered_state().lock(); let last_version = first_version + txns_to_commit.len() as u64 - 1; - self.commit_ledger_and_state_kv_db( - last_version, - ledger_batch, - sharded_state_kv_batches, - new_root_hash, - ledger_info_with_sigs, - )?; + + self.commit_ledger_info(last_version, new_root_hash, ledger_info_with_sigs)?; self.maybe_commit_state_merkle_db( &mut buffered_state, @@ -1979,24 +2075,21 @@ impl DbWriter for AptosDB { &latest_in_memory_state, )?; - let (ledger_batch, sharded_state_kv_batches, new_root_hash) = self - .save_transactions_impl( - txns_to_commit, - first_version, - latest_in_memory_state.current.usage(), - Some(sharded_state_cache), - )?; + let new_root_hash = self.calculate_and_commit_ledger_and_state_kv( + txns_to_commit, + first_version, + latest_in_memory_state.current.usage(), + Some(sharded_state_cache), + )?; + let _timer = OTHER_TIMERS_SECONDS + .with_label_values(&["save_transactions__others"]) + .start_timer(); { let mut buffered_state = self.state_store.buffered_state().lock(); let last_version = first_version + txns_to_commit.len() as u64 - 1; - self.commit_ledger_and_state_kv_db( - last_version, - ledger_batch, - sharded_state_kv_batches, - new_root_hash, - ledger_info_with_sigs, - )?; + + self.commit_ledger_info(last_version, new_root_hash, ledger_info_with_sigs)?; if !txns_to_commit.is_empty() { let _timer = OTHER_TIMERS_SECONDS @@ -2114,59 +2207,25 @@ impl DbWriter for AptosDB { &DbMetadataValue::Version(version), )?; - self.ledger_pruner - .pruner() - .save_min_readable_version(version, &batch)?; - - let mut state_merkle_batch = SchemaBatch::new(); - StateMerklePruner::prune_genesis( - self.state_merkle_db.clone(), - &mut state_merkle_batch, - )?; + // Apply the change set writes to the database (atomically) and update in-memory state + // + // TODO(grao): Support sharding here. + self.ledger_db.metadata_db().write_schemas(batch)?; + self.ledger_pruner.save_min_readable_version(version)?; self.state_store .state_merkle_pruner - .pruner() - .save_min_readable_version(version, &state_merkle_batch)?; + .save_min_readable_version(version)?; self.state_store .epoch_snapshot_pruner - .pruner() - .save_min_readable_version(version, &state_merkle_batch)?; - - let mut state_kv_batch = SchemaBatch::new(); - StateKvPruner::prune_genesis( - self.state_store.state_kv_db.clone(), - &mut state_kv_batch, - )?; + .save_min_readable_version(version)?; self.state_store .state_kv_pruner - .pruner() - .save_min_readable_version(version, &state_kv_batch)?; - - // Apply the change set writes to the database (atomically) and update in-memory state - // - // TODO(grao): Support sharding here. - self.state_merkle_db - .metadata_db() - .write_schemas(state_merkle_batch)?; - self.state_kv_db - .clone() - .commit_nonsharded(version, state_kv_batch)?; - self.ledger_db.metadata_db_arc().write_schemas(batch)?; + .save_min_readable_version(version)?; restore_utils::update_latest_ledger_info(self.ledger_store.clone(), ledger_infos)?; self.state_store.reset(); - self.ledger_pruner.pruner().record_progress(version); - self.state_store - .state_merkle_pruner - .pruner() - .record_progress(version); - self.state_store - .epoch_snapshot_pruner - .pruner() - .record_progress(version); - Ok(()) }) } diff --git a/storage/aptosdb/src/pruner/db_pruner.rs b/storage/aptosdb/src/pruner/db_pruner.rs index 80d4b47595cf4..acbbb9f63fe2b 100644 --- a/storage/aptosdb/src/pruner/db_pruner.rs +++ b/storage/aptosdb/src/pruner/db_pruner.rs @@ -3,7 +3,6 @@ use anyhow::{Context, Result}; use aptos_logger::info; -use aptos_schemadb::SchemaBatch; use aptos_types::transaction::Version; use std::cmp::min; @@ -32,11 +31,8 @@ pub trait DBPruner: Send + Sync { /// Initializes the least readable version stored in underlying DB storage fn initialize_min_readable_version(&self) -> Result; - /// Saves the min readable version. - fn save_min_readable_version(&self, version: Version, batch: &SchemaBatch) -> Result<()>; - - /// Returns the least readable version stores in the DB pruner - fn min_readable_version(&self) -> Version; + /// Returns the progress of the pruner. + fn progress(&self) -> Version; /// Sets the target version for the pruner fn set_target_version(&self, target_version: Version); @@ -49,19 +45,13 @@ pub trait DBPruner: Send + Sync { fn get_current_batch_target(&self, max_versions: Version) -> Version { // Current target version might be less than the target version to ensure we don't prune // more than max_version in one go. - min( - self.min_readable_version() + max_versions, - self.target_version(), - ) + min(self.progress() + max_versions, self.target_version()) } /// Records the current progress of the pruner by updating the least readable version fn record_progress(&self, min_readable_version: Version); /// True if there is pruning work pending to be done fn is_pruning_pending(&self) -> bool { - self.target_version() > self.min_readable_version() + self.target_version() > self.progress() } - - /// (For tests only.) Updates the minimal readable version kept by pruner. - fn testonly_update_min_version(&self, version: Version); } diff --git a/storage/aptosdb/src/pruner/db_sub_pruner.rs b/storage/aptosdb/src/pruner/db_sub_pruner.rs index 016854571ea5a..b4b70af96bcfa 100644 --- a/storage/aptosdb/src/pruner/db_sub_pruner.rs +++ b/storage/aptosdb/src/pruner/db_sub_pruner.rs @@ -1,16 +1,11 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use aptos_schemadb::SchemaBatch; +use aptos_types::transaction::Version; /// Defines the trait for sub-pruner of a parent DB pruner pub trait DBSubPruner { /// Performs the actual pruning, a target version is passed, which is the target the pruner /// tries to prune. - fn prune( - &self, - db_batch: &mut SchemaBatch, - min_readable_version: u64, - target_version: u64, - ) -> anyhow::Result<()>; + fn prune(&self, current_progress: Version, target_version: Version) -> anyhow::Result<()>; } diff --git a/storage/aptosdb/src/pruner/event_store/event_store_pruner.rs b/storage/aptosdb/src/pruner/event_store/event_store_pruner.rs index 5ffed3a0b6c94..b35ba237bd4b9 100644 --- a/storage/aptosdb/src/pruner/event_store/event_store_pruner.rs +++ b/storage/aptosdb/src/pruner/event_store/event_store_pruner.rs @@ -1,29 +1,56 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{pruner::db_sub_pruner::DBSubPruner, EventStore}; -use aptos_schemadb::SchemaBatch; + +use crate::{ + pruner::{ + db_sub_pruner::DBSubPruner, pruner_utils::get_or_initialize_ledger_subpruner_progress, + }, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + EventStore, +}; +use anyhow::Result; +use aptos_schemadb::{SchemaBatch, DB}; +use aptos_types::transaction::Version; use std::sync::Arc; #[derive(Debug)] pub struct EventStorePruner { event_store: Arc, + event_db: Arc, } impl DBSubPruner for EventStorePruner { - fn prune( - &self, - db_batch: &mut SchemaBatch, - min_readable_version: u64, - target_version: u64, - ) -> anyhow::Result<()> { + fn prune(&self, current_progress: Version, target_version: Version) -> Result<()> { + let batch = SchemaBatch::new(); self.event_store - .prune_events(min_readable_version, target_version, db_batch)?; - Ok(()) + .prune_events(current_progress, target_version, &batch)?; + batch.put::( + &DbMetadataKey::EventPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.event_db.write_schemas(batch) } } impl EventStorePruner { - pub(in crate::pruner) fn new(event_store: Arc) -> Self { - EventStorePruner { event_store } + pub(in crate::pruner) fn new( + event_store: Arc, + event_db: Arc, + metadata_progress: Version, + ) -> Result { + let progress = get_or_initialize_ledger_subpruner_progress( + &event_db, + &DbMetadataKey::EventPrunerProgress, + metadata_progress, + )?; + + let myself = EventStorePruner { + event_store, + event_db, + }; + + myself.prune(progress, metadata_progress)?; + + Ok(myself) } } diff --git a/storage/aptosdb/src/pruner/ledger_pruner_manager.rs b/storage/aptosdb/src/pruner/ledger_pruner_manager.rs index 5488f20fbb4dd..8e9a44eb5c508 100644 --- a/storage/aptosdb/src/pruner/ledger_pruner_manager.rs +++ b/storage/aptosdb/src/pruner/ledger_pruner_manager.rs @@ -3,53 +3,42 @@ use crate::{ ledger_db::LedgerDb, - metrics::{PRUNER_BATCH_SIZE, PRUNER_WINDOW}, + metrics::{PRUNER_BATCH_SIZE, PRUNER_VERSIONS, PRUNER_WINDOW}, pruner::{ - db_pruner::DBPruner, ledger_pruner_worker::LedgerPrunerWorker, ledger_store::ledger_store_pruner::LedgerPruner, pruner_manager::PrunerManager, + pruner_worker::PrunerWorker, }, pruner_utils, }; +use anyhow::Result; use aptos_config::config::LedgerPrunerConfig; use aptos_infallible::Mutex; -use aptos_types::transaction::Version; -use std::{sync::Arc, thread::JoinHandle}; +use aptos_types::transaction::{AtomicVersion, Version}; +use std::sync::{atomic::Ordering, Arc}; /// The `PrunerManager` for `LedgerPruner`. pub(crate) struct LedgerPrunerManager { - pruner_enabled: bool, + ledger_db: Arc, /// DB version window, which dictates how many version of other stores like transaction, ledger /// info, events etc to keep. prune_window: Version, - /// Ledger pruner. Is always initialized regardless if the pruner is enabled to keep tracks - /// of the min_readable_version. - pruner: Arc, - /// Wrapper class of the ledger pruner. - pruner_worker: Arc, - /// The worker thread handle for ledger_pruner, created upon Pruner instance construction and - /// joined upon its destruction. It is `None` when the ledger pruner is not enabled or it only - /// becomes `None` after joined in `drop()`. - worker_thread: Option>, - /// We send a batch of version to the underlying pruners for performance reason. This tracks the - /// last version we sent to the pruners. Will only be set if the pruner is enabled. - pub(crate) last_version_sent_to_pruner: Arc>, + /// It is None iff the pruner is not enabled. + pruner_worker: Option, /// Ideal batch size of the versions to be sent to the ledger pruner pruning_batch_size: usize, /// latest version latest_version: Arc>, /// Offset for displaying to users user_pruning_window_offset: u64, + /// The minimal readable version for the ledger data. + min_readable_version: AtomicVersion, } impl PrunerManager for LedgerPrunerManager { type Pruner = LedgerPruner; - fn pruner(&self) -> &Self::Pruner { - &self.pruner - } - fn is_pruner_enabled(&self) -> bool { - self.pruner_enabled + self.pruner_worker.is_some() } fn get_prune_window(&self) -> Version { @@ -57,7 +46,7 @@ impl PrunerManager for LedgerPrunerManager { } fn get_min_readable_version(&self) -> Version { - self.pruner.as_ref().min_readable_version() + self.min_readable_version.load(Ordering::SeqCst) } fn get_min_viable_version(&self) -> Version { @@ -77,91 +66,98 @@ impl PrunerManager for LedgerPrunerManager { fn maybe_set_pruner_target_db_version(&self, latest_version: Version) { *self.latest_version.lock() = latest_version; + let min_readable_version = self.get_min_readable_version(); // Only wake up the ledger pruner if there are `ledger_pruner_pruning_batch_size` pending // versions. - if self.pruner_enabled + if self.is_pruner_enabled() && latest_version - >= *self.last_version_sent_to_pruner.as_ref().lock() - + self.pruning_batch_size as u64 + >= min_readable_version + self.pruning_batch_size as u64 + self.prune_window { self.set_pruner_target_db_version(latest_version); - *self.last_version_sent_to_pruner.as_ref().lock() = latest_version; } } - fn set_pruner_target_db_version(&self, latest_version: Version) { - assert!(self.pruner_enabled); + fn save_min_readable_version(&self, min_readable_version: Version) -> Result<()> { + self.min_readable_version + .store(min_readable_version, Ordering::SeqCst); + + PRUNER_VERSIONS + .with_label_values(&["ledger_pruner", "min_readable"]) + .set(min_readable_version as i64); + + self.ledger_db.write_pruner_progress(min_readable_version) + } + + fn is_pruning_pending(&self) -> bool { self.pruner_worker .as_ref() - .set_target_db_version(latest_version.saturating_sub(self.prune_window)); + .map_or(false, |w| w.is_pruning_pending()) + } + + #[cfg(test)] + fn set_worker_target_version(&self, target_version: Version) { + self.pruner_worker + .as_ref() + .unwrap() + .set_target_db_version(target_version); } } impl LedgerPrunerManager { /// Creates a worker thread that waits on a channel for pruning commands. pub fn new(ledger_db: Arc, ledger_pruner_config: LedgerPrunerConfig) -> Self { - let ledger_pruner = pruner_utils::create_ledger_pruner(ledger_db); - - if ledger_pruner_config.enable { - PRUNER_WINDOW - .with_label_values(&["ledger_pruner"]) - .set(ledger_pruner_config.prune_window as i64); - - PRUNER_BATCH_SIZE - .with_label_values(&["ledger_pruner"]) - .set(ledger_pruner_config.batch_size as i64); - } - - let ledger_pruner_worker = Arc::new(LedgerPrunerWorker::new( - Arc::clone(&ledger_pruner), - ledger_pruner_config, - )); - - let ledger_pruner_worker_clone = Arc::clone(&ledger_pruner_worker); - - let ledger_pruner_worker_thread = if ledger_pruner_config.enable { - Some( - std::thread::Builder::new() - .name("aptosdb_ledger_pruner".into()) - .spawn(move || ledger_pruner_worker_clone.as_ref().work()) - .expect("Creating ledger pruner thread should succeed."), - ) + let pruner_worker = if ledger_pruner_config.enable { + Some(Self::init_pruner( + Arc::clone(&ledger_db), + ledger_pruner_config, + )) } else { None }; - let min_readable_version = ledger_pruner.min_readable_version(); + let min_readable_version = + pruner_utils::get_ledger_pruner_progress(&ledger_db).expect("Must succeed."); + + PRUNER_VERSIONS + .with_label_values(&["ledger_pruner", "min_readable"]) + .set(min_readable_version as i64); Self { - pruner_enabled: ledger_pruner_config.enable, + ledger_db, prune_window: ledger_pruner_config.prune_window, - pruner: ledger_pruner, - pruner_worker: ledger_pruner_worker, - worker_thread: ledger_pruner_worker_thread, - last_version_sent_to_pruner: Arc::new(Mutex::new(min_readable_version)), + pruner_worker, pruning_batch_size: ledger_pruner_config.batch_size, latest_version: Arc::new(Mutex::new(min_readable_version)), user_pruning_window_offset: ledger_pruner_config.user_pruning_window_offset, + min_readable_version: AtomicVersion::new(min_readable_version), } } - #[cfg(test)] - pub fn testonly_update_min_version(&self, version: Version) { - self.pruner.testonly_update_min_version(version); + fn init_pruner( + ledger_db: Arc, + ledger_pruner_config: LedgerPrunerConfig, + ) -> PrunerWorker { + let pruner = pruner_utils::create_ledger_pruner(ledger_db); + + PRUNER_WINDOW + .with_label_values(&["ledger_pruner"]) + .set(ledger_pruner_config.prune_window as i64); + + PRUNER_BATCH_SIZE + .with_label_values(&["ledger_pruner"]) + .set(ledger_pruner_config.batch_size as i64); + + PrunerWorker::new(pruner, ledger_pruner_config.batch_size, "ledger") } -} -impl Drop for LedgerPrunerManager { - fn drop(&mut self) { - if self.pruner_enabled { - self.pruner_worker.stop_pruning(); - - assert!(self.worker_thread.is_some()); - self.worker_thread - .take() - .expect("Ledger pruner worker thread must exist.") - .join() - .expect("Ledger pruner worker thread should join peacefully."); - } + fn set_pruner_target_db_version(&self, latest_version: Version) { + assert!(self.pruner_worker.is_some()); + let min_readable_version = latest_version.saturating_sub(self.prune_window); + self.min_readable_version + .store(min_readable_version, Ordering::SeqCst); + self.pruner_worker + .as_ref() + .unwrap() + .set_target_db_version(min_readable_version); } } diff --git a/storage/aptosdb/src/pruner/ledger_pruner_worker.rs b/storage/aptosdb/src/pruner/ledger_pruner_worker.rs deleted file mode 100644 index 46aa204013a2f..0000000000000 --- a/storage/aptosdb/src/pruner/ledger_pruner_worker.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 -use crate::pruner::{db_pruner::DBPruner, ledger_store::ledger_store_pruner::LedgerPruner}; -use aptos_config::config::LedgerPrunerConfig; -use aptos_logger::{ - error, - prelude::{sample, SampleRate}, -}; -use aptos_types::transaction::Version; -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread::sleep, - time::Duration, -}; - -/// Maintains the ledger pruner and periodically calls the db_pruner's prune method to prune the DB. -/// This also exposes API to report the progress to the parent thread. -pub struct LedgerPrunerWorker { - /// The worker will sleep for this period of time after pruning each batch. - pruning_time_interval_in_ms: u64, - /// Ledger pruner. - pruner: Arc, - /// Max items to prune per batch. For the ledger pruner, this means the max versions to prune - /// and for the state pruner, this means the max stale nodes to prune. - max_versions_to_prune_per_batch: u64, - /// Indicates whether the pruning loop should be running. Will only be set to true on pruner - /// destruction. - quit_worker: AtomicBool, -} - -impl LedgerPrunerWorker { - pub(crate) fn new( - ledger_pruner: Arc, - ledger_pruner_config: LedgerPrunerConfig, - ) -> Self { - Self { - pruning_time_interval_in_ms: if cfg!(test) { 100 } else { 1 }, - pruner: ledger_pruner, - max_versions_to_prune_per_batch: ledger_pruner_config.batch_size as u64, - quit_worker: AtomicBool::new(false), - } - } - - // Loop that does the real pruning job. - pub(crate) fn work(&self) { - while !self.quit_worker.load(Ordering::Relaxed) { - let pruner_result = self - .pruner - .prune(self.max_versions_to_prune_per_batch as usize); - if pruner_result.is_err() { - sample!( - SampleRate::Duration(Duration::from_secs(1)), - error!(error = ?pruner_result.err().unwrap(), - "Ledger pruner has error.") - ); - sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); - return; - } - if !self.pruner.is_pruning_pending() { - sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); - } - } - } - - pub fn set_target_db_version(&self, target_db_version: Version) { - assert!(target_db_version >= self.pruner.target_version()); - self.pruner.set_target_version(target_db_version); - } - - pub fn stop_pruning(&self) { - self.quit_worker.store(true, Ordering::Relaxed); - } -} diff --git a/storage/aptosdb/src/pruner/ledger_store/ledger_metadata_pruner.rs b/storage/aptosdb/src/pruner/ledger_store/ledger_metadata_pruner.rs new file mode 100644 index 0000000000000..c4514b3687177 --- /dev/null +++ b/storage/aptosdb/src/pruner/ledger_store/ledger_metadata_pruner.rs @@ -0,0 +1,64 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::{ + db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + version_data::VersionDataSchema, +}; +use anyhow::{anyhow, Result}; +use aptos_schemadb::{ReadOptions, SchemaBatch, DB}; +use aptos_types::transaction::Version; +use std::sync::Arc; + +#[derive(Debug)] +pub struct LedgerMetadataPruner { + ledger_metadata_db: Arc, +} + +impl LedgerMetadataPruner { + pub(in crate::pruner) fn new(ledger_metadata_db: Arc) -> Result { + if let Some(v) = + ledger_metadata_db.get::(&DbMetadataKey::LedgerPrunerProgress)? + { + v.expect_version(); + } else { + // NOTE: I **think** all db should have the LedgerPrunerProgress. Have a fallback path + // here in case the database was super old before we introducing this progress counter. + let mut iter = ledger_metadata_db.iter::(ReadOptions::default())?; + iter.seek_to_first(); + let version = match iter.next().transpose()? { + Some((version, _)) => version, + None => 0, + }; + ledger_metadata_db.put::( + &DbMetadataKey::LedgerPrunerProgress, + &DbMetadataValue::Version(version), + )?; + } + + Ok(LedgerMetadataPruner { ledger_metadata_db }) + } + + pub(in crate::pruner) fn prune( + &self, + current_progress: Version, + target_version: Version, + ) -> Result<()> { + let batch = SchemaBatch::new(); + for version in current_progress..target_version { + batch.delete::(&version)?; + } + batch.put::( + &DbMetadataKey::LedgerPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.ledger_metadata_db.write_schemas(batch) + } + + pub(in crate::pruner) fn progress(&self) -> Result { + self.ledger_metadata_db + .get::(&DbMetadataKey::LedgerPrunerProgress)? + .map(|v| v.expect_version()) + .ok_or_else(|| anyhow!("LedgerPrunerProgress cannot be None.")) + } +} diff --git a/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs b/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs index bc92e4a82f332..19d66aae57fe2 100644 --- a/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs +++ b/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs @@ -2,40 +2,40 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - db_metadata::DbMetadataSchema, + ledger_db::LedgerDb, metrics::PRUNER_VERSIONS, pruner::{ db_pruner::DBPruner, db_sub_pruner::DBSubPruner, event_store::event_store_pruner::EventStorePruner, - ledger_store::version_data_pruner::VersionDataPruner, + ledger_store::ledger_metadata_pruner::LedgerMetadataPruner, transaction_store::{ - transaction_store_pruner::TransactionStorePruner, write_set_pruner::WriteSetPruner, + transaction_accumulator_pruner::TransactionAccumulatorPruner, + transaction_info_pruner::TransactionInfoPruner, transaction_pruner::TransactionPruner, + write_set_pruner::WriteSetPruner, }, }, - schema::{ - db_metadata::{DbMetadataKey, DbMetadataValue}, - transaction::TransactionSchema, - }, EventStore, TransactionStore, }; -use aptos_logger::warn; -use aptos_schemadb::{ReadOptions, SchemaBatch, DB}; +use anyhow::Result; use aptos_types::transaction::{AtomicVersion, Version}; -use std::sync::{atomic::Ordering, Arc}; +use std::{ + cmp::min, + sync::{atomic::Ordering, Arc}, +}; pub const LEDGER_PRUNER_NAME: &str = "ledger_pruner"; /// Responsible for pruning everything except for the state tree. pub(crate) struct LedgerPruner { - db: Arc, /// Keeps track of the target version that the pruner needs to achieve. target_version: AtomicVersion, - min_readable_version: AtomicVersion, - transaction_store_pruner: Arc, - version_data_pruner: Arc, - event_store_pruner: Arc, - write_set_pruner: Arc, + + progress: AtomicVersion, + + ledger_metadata_pruner: Box, + + sub_pruners: Vec>, } impl DBPruner for LedgerPruner { @@ -43,145 +43,108 @@ impl DBPruner for LedgerPruner { LEDGER_PRUNER_NAME } - fn prune(&self, max_versions: usize) -> anyhow::Result { - if !self.is_pruning_pending() { - return Ok(self.min_readable_version()); - } + fn prune(&self, max_versions: usize) -> Result { + let mut progress = self.progress(); + let target_version = self.target_version(); - // Collect the schema batch writes - let mut db_batch = SchemaBatch::new(); - let current_target_version = self.prune_inner(max_versions, &mut db_batch)?; - self.save_min_readable_version(current_target_version, &db_batch)?; - // Commit all the changes to DB atomically - self.db.write_schemas(db_batch)?; - - // TODO(zcc): recording progress after writing schemas might provide wrong answers to - // API calls when they query min_readable_version while the write_schemas are still in - // progress. - self.record_progress(current_target_version); - Ok(current_target_version) - } + while progress < target_version { + let current_batch_target_version = + min(progress + max_versions as Version, target_version); - fn save_min_readable_version( - &self, - version: Version, - batch: &SchemaBatch, - ) -> anyhow::Result<()> { - batch.put::( - &DbMetadataKey::LedgerPrunerProgress, - &DbMetadataValue::Version(version), - ) - } + self.ledger_metadata_pruner + .prune(progress, current_batch_target_version)?; - fn initialize_min_readable_version(&self) -> anyhow::Result { - let stored_min_version = self - .db - .get::(&DbMetadataKey::LedgerPrunerProgress)? - .map_or(0, |v| v.expect_version()); - let mut iter = self.db.iter::(ReadOptions::default())?; - iter.seek(&stored_min_version)?; - let version = match iter.next().transpose()? { - Some((version, _)) => version, - None => 0, - }; - match version.cmp(&stored_min_version) { - std::cmp::Ordering::Greater => { - let res = self.db.put::( - &DbMetadataKey::LedgerPrunerProgress, - &DbMetadataValue::Version(version), - ); - warn!( - stored_min_version = stored_min_version, - actual_min_version = version, - res = ?res, - "Try to update stored min readable transaction version to the actual one.", - ); - Ok(version) - }, - std::cmp::Ordering::Equal => Ok(version), - std::cmp::Ordering::Less => { - panic!("No transaction is found at or after stored ledger pruner progress ({}), db might be corrupted.", stored_min_version) - }, + // NOTE: If necessary, this can be done in parallel. + self.sub_pruners + .iter() + .try_for_each(|pruner| pruner.prune(progress, current_batch_target_version))?; + + progress = current_batch_target_version; + self.record_progress(progress); } + + Ok(target_version) } - fn min_readable_version(&self) -> Version { - self.min_readable_version.load(Ordering::Relaxed) + fn initialize_min_readable_version(&self) -> Result { + self.ledger_metadata_pruner.progress() + } + + fn progress(&self) -> Version { + self.progress.load(Ordering::SeqCst) } fn set_target_version(&self, target_version: Version) { - self.target_version.store(target_version, Ordering::Relaxed); + self.target_version.store(target_version, Ordering::SeqCst); PRUNER_VERSIONS .with_label_values(&["ledger_pruner", "target"]) .set(target_version as i64); } fn target_version(&self) -> Version { - self.target_version.load(Ordering::Relaxed) + self.target_version.load(Ordering::SeqCst) } fn record_progress(&self, min_readable_version: Version) { - self.min_readable_version - .store(min_readable_version, Ordering::Relaxed); + self.progress.store(min_readable_version, Ordering::SeqCst); PRUNER_VERSIONS - .with_label_values(&["ledger_pruner", "min_readable"]) + .with_label_values(&["ledger_pruner", "progress"]) .set(min_readable_version as i64); } - - /// (For tests only.) Updates the minimal readable version kept by pruner. - fn testonly_update_min_version(&self, version: Version) { - self.min_readable_version.store(version, Ordering::Relaxed) - } } impl LedgerPruner { - pub fn new( - db: Arc, - transaction_store: Arc, - event_store: Arc, - ) -> Self { + pub fn new(ledger_db: Arc) -> Result { + let ledger_metadata_pruner = Box::new( + LedgerMetadataPruner::new(ledger_db.metadata_db_arc()) + .expect("Failed to initialize ledger_metadata_pruner."), + ); + + let metadata_progress = ledger_metadata_pruner.progress()?; + + let transaction_store = Arc::new(TransactionStore::new(Arc::clone(&ledger_db))); + + let event_store_pruner = Box::new(EventStorePruner::new( + Arc::new(EventStore::new(ledger_db.event_db_arc())), + ledger_db.event_db_arc(), + metadata_progress, + )?); + let transaction_accumulator_pruner = Box::new(TransactionAccumulatorPruner::new( + Arc::clone(&transaction_store), + ledger_db.transaction_accumulator_db_arc(), + metadata_progress, + )?); + let transaction_info_pruner = Box::new(TransactionInfoPruner::new( + Arc::clone(&transaction_store), + ledger_db.transaction_info_db_arc(), + metadata_progress, + )?); + let transaction_pruner = Box::new(TransactionPruner::new( + Arc::clone(&transaction_store), + ledger_db.transaction_db_arc(), + metadata_progress, + )?); + let write_set_pruner = Box::new(WriteSetPruner::new( + Arc::clone(&transaction_store), + ledger_db.write_set_db_arc(), + metadata_progress, + )?); + let pruner = LedgerPruner { - db, - target_version: AtomicVersion::new(0), - min_readable_version: AtomicVersion::new(0), - transaction_store_pruner: Arc::new(TransactionStorePruner::new( - transaction_store.clone(), - )), - event_store_pruner: Arc::new(EventStorePruner::new(event_store)), - write_set_pruner: Arc::new(WriteSetPruner::new(transaction_store)), - version_data_pruner: Arc::new(VersionDataPruner::new()), + target_version: AtomicVersion::new(metadata_progress), + progress: AtomicVersion::new(metadata_progress), + ledger_metadata_pruner, + sub_pruners: vec![ + event_store_pruner, + transaction_accumulator_pruner, + transaction_info_pruner, + transaction_pruner, + write_set_pruner, + ], }; - pruner.initialize(); - pruner - } - - fn prune_inner( - &self, - max_versions: usize, - db_batch: &mut SchemaBatch, - ) -> anyhow::Result { - let min_readable_version = self.min_readable_version(); - - // Current target version might be less than the target version to ensure we don't prune - // more than max_version in one go. - let current_target_version = self.get_current_batch_target(max_versions as Version); - if current_target_version < min_readable_version { - return Ok(min_readable_version); - } + pruner.initialize(); - self.transaction_store_pruner.prune( - db_batch, - min_readable_version, - current_target_version, - )?; - self.write_set_pruner - .prune(db_batch, min_readable_version, current_target_version)?; - self.version_data_pruner - .prune(db_batch, min_readable_version, current_target_version)?; - self.event_store_pruner - .prune(db_batch, min_readable_version, current_target_version)?; - - Ok(current_target_version) + Ok(pruner) } } diff --git a/storage/aptosdb/src/pruner/ledger_store/mod.rs b/storage/aptosdb/src/pruner/ledger_store/mod.rs index d0f57aa5727f5..764c7e183cbd0 100644 --- a/storage/aptosdb/src/pruner/ledger_store/mod.rs +++ b/storage/aptosdb/src/pruner/ledger_store/mod.rs @@ -1,5 +1,5 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +pub(crate) mod ledger_metadata_pruner; pub(crate) mod ledger_store_pruner; -pub(crate) mod version_data_pruner; diff --git a/storage/aptosdb/src/pruner/ledger_store/version_data_pruner.rs b/storage/aptosdb/src/pruner/ledger_store/version_data_pruner.rs deleted file mode 100644 index 672f9bd84f88e..0000000000000 --- a/storage/aptosdb/src/pruner/ledger_store/version_data_pruner.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{pruner::db_sub_pruner::DBSubPruner, schema::version_data::VersionDataSchema}; -use aptos_schemadb::SchemaBatch; - -#[derive(Debug)] -pub struct VersionDataPruner {} - -impl DBSubPruner for VersionDataPruner { - fn prune( - &self, - db_batch: &mut SchemaBatch, - min_readable_version: u64, - target_version: u64, - ) -> anyhow::Result<()> { - for version in min_readable_version..target_version { - db_batch.delete::(&version)?; - } - Ok(()) - } -} - -impl VersionDataPruner { - pub(in crate::pruner) fn new() -> Self { - VersionDataPruner {} - } -} diff --git a/storage/aptosdb/src/pruner/mod.rs b/storage/aptosdb/src/pruner/mod.rs index c09e264e6ab33..51393f7fcbd20 100644 --- a/storage/aptosdb/src/pruner/mod.rs +++ b/storage/aptosdb/src/pruner/mod.rs @@ -5,13 +5,11 @@ pub(crate) mod db_pruner; pub(crate) mod db_sub_pruner; pub(crate) mod event_store; -pub(crate) mod ledger_pruner_worker; pub(crate) mod ledger_store; pub(crate) mod pruner_manager; pub mod pruner_utils; +pub(crate) mod pruner_worker; pub(crate) mod state_kv_pruner; -pub(crate) mod state_kv_pruner_worker; -pub(crate) mod state_merkle_pruner_worker; pub(crate) mod state_store; pub(crate) mod transaction_store; diff --git a/storage/aptosdb/src/pruner/pruner_manager.rs b/storage/aptosdb/src/pruner/pruner_manager.rs index 58f26a6a05775..3ba5d94692a44 100644 --- a/storage/aptosdb/src/pruner/pruner_manager.rs +++ b/storage/aptosdb/src/pruner/pruner_manager.rs @@ -10,8 +10,9 @@ use aptos_types::transaction::Version; /// The `PrunerManager` is meant to be part of a `AptosDB` instance and runs in the background to /// prune old data. /// -/// It creates a worker thread on construction and joins it on destruction. When destructed, it -/// quits the worker thread eagerly without waiting for all pending work to be done. +/// If the pruner is enabled. It creates a worker thread on construction and joins it on +/// destruction. When destructed, it quits the worker thread eagerly without waiting for +/// all pending work to be done. pub trait PrunerManager: Sync { type Pruner: DBPruner; @@ -19,16 +20,20 @@ pub trait PrunerManager: Sync { fn get_prune_window(&self) -> Version; - fn get_min_viable_version(&self) -> Version; + fn get_min_viable_version(&self) -> Version { + unimplemented!() + } fn get_min_readable_version(&self) -> Version; /// Sets pruner target version when necessary. fn maybe_set_pruner_target_db_version(&self, latest_version: Version); - fn set_pruner_target_db_version(&self, latest_version: Version); + // Only used at the end of fast sync to store the min_readable_version to db and update the + // in memory progress. + fn save_min_readable_version(&self, min_readable_version: Version) -> anyhow::Result<()>; - fn pruner(&self) -> &Self::Pruner; + fn is_pruning_pending(&self) -> bool; /// (For tests only.) Notifies the worker thread and waits for it to finish its job by polling /// an internal counter. @@ -54,11 +59,14 @@ pub trait PrunerManager: Sync { let end = Instant::now() + TIMEOUT; while Instant::now() < end { - if !self.pruner().is_pruning_pending() { + if !self.is_pruning_pending() { return Ok(()); } sleep(Duration::from_millis(1)); } anyhow::bail!("Timeout waiting for pruner worker."); } + + #[cfg(test)] + fn set_worker_target_version(&self, target_version: Version); } diff --git a/storage/aptosdb/src/pruner/pruner_metadata.rs b/storage/aptosdb/src/pruner/pruner_metadata.rs deleted file mode 100644 index f38e0f6abc0d0..0000000000000 --- a/storage/aptosdb/src/pruner/pruner_metadata.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_types::transaction::Version; -use num_derive::FromPrimitive; -use num_derive::ToPrimitive; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] -#[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] -pub(crate) enum PrunerMetadata { - LatestVersion(Version), -} - -#[derive(Clone, Debug, Deserialize, FromPrimitive, PartialEq, Eq, ToPrimitive, Serialize)] -#[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] -#[repr(u8)] -pub enum PrunerTag { - LedgerPruner = 0, - StateMerklePruner = 1, - EpochEndingStateMerklePruner = 2, -} diff --git a/storage/aptosdb/src/pruner/pruner_utils.rs b/storage/aptosdb/src/pruner/pruner_utils.rs index 5575962f2f80f..72433a55b3ad6 100644 --- a/storage/aptosdb/src/pruner/pruner_utils.rs +++ b/storage/aptosdb/src/pruner/pruner_utils.rs @@ -10,12 +10,18 @@ use crate::{ state_kv_pruner::StateKvPruner, state_store::{generics::StaleNodeIndexSchemaTrait, StateMerklePruner}, }, + schema::{ + db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + version_data::VersionDataSchema, + }, state_kv_db::StateKvDb, state_merkle_db::StateMerkleDb, - EventStore, TransactionStore, + utils::get_progress, }; +use anyhow::Result; use aptos_jellyfish_merkle::StaleNodeIndex; -use aptos_schemadb::schema::KeyCodec; +use aptos_schemadb::{schema::KeyCodec, ReadOptions, DB}; +use aptos_types::transaction::Version; use std::sync::Arc; /// A utility function to instantiate the state pruner @@ -30,14 +36,65 @@ where /// A utility function to instantiate the ledger pruner pub(crate) fn create_ledger_pruner(ledger_db: Arc) -> Arc { - Arc::new(LedgerPruner::new( - ledger_db.metadata_db_arc(), - Arc::new(TransactionStore::new(Arc::clone(&ledger_db))), - Arc::new(EventStore::new(ledger_db.event_db_arc())), - )) + Arc::new(LedgerPruner::new(ledger_db).expect("Failed to create ledger pruner.")) } /// A utility function to instantiate the state kv pruner. pub(crate) fn create_state_kv_pruner(state_kv_db: Arc) -> Arc { Arc::new(StateKvPruner::new(state_kv_db)) } + +pub(crate) fn get_ledger_pruner_progress(ledger_db: &LedgerDb) -> Result { + Ok( + if let Some(version) = get_progress( + ledger_db.metadata_db(), + &DbMetadataKey::LedgerPrunerProgress, + )? { + version + } else { + let mut iter = ledger_db + .metadata_db() + .iter::(ReadOptions::default())?; + iter.seek_to_first(); + match iter.next().transpose()? { + Some((version, _)) => version, + None => 0, + } + }, + ) +} + +pub(crate) fn get_state_kv_pruner_progress(state_kv_db: &StateKvDb) -> Result { + Ok(get_progress( + state_kv_db.metadata_db(), + &DbMetadataKey::StateKvPrunerProgress, + )? + .unwrap_or(0)) +} + +pub(crate) fn get_state_merkle_pruner_progress( + state_merkle_db: &StateMerkleDb, +) -> Result +where + StaleNodeIndex: KeyCodec, +{ + Ok(get_progress(state_merkle_db.metadata_db(), &S::tag())?.unwrap_or(0)) +} + +pub(crate) fn get_or_initialize_ledger_subpruner_progress( + sub_db: &DB, + progress_key: &DbMetadataKey, + metadata_progress: Version, +) -> Result { + Ok( + if let Some(v) = sub_db.get::(progress_key)? { + v.expect_version() + } else { + sub_db.put::( + progress_key, + &DbMetadataValue::Version(metadata_progress), + )?; + metadata_progress + }, + ) +} diff --git a/storage/aptosdb/src/pruner/pruner_worker.rs b/storage/aptosdb/src/pruner/pruner_worker.rs new file mode 100644 index 0000000000000..954758b87e390 --- /dev/null +++ b/storage/aptosdb/src/pruner/pruner_worker.rs @@ -0,0 +1,118 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::pruner::db_pruner::DBPruner; +use aptos_logger::{ + error, + prelude::{sample, SampleRate}, +}; +use aptos_types::transaction::Version; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::{sleep, JoinHandle}, + time::Duration, +}; + +/// Maintains the pruner and periodically calls the db_pruner's prune method to prune the DB. +/// This also exposes API to report the progress to the parent thread. +pub struct PrunerWorker { + // The name of the worker. + worker_name: String, + /// The thread to run pruner. + worker_thread: Option>, + + inner: Arc, +} + +pub struct PrunerWorkerInner { + /// The worker will sleep for this period of time after pruning each batch. + pruning_time_interval_in_ms: u64, + /// The pruner. + pruner: Arc, + /// A threshold to control how many items we prune for each batch. + batch_size: usize, + /// Indicates whether the pruning loop should be running. Will only be set to true on pruner + /// destruction. + quit_worker: AtomicBool, +} + +impl PrunerWorkerInner { + fn new(pruner: Arc, batch_size: usize) -> Arc { + Arc::new(Self { + pruning_time_interval_in_ms: if cfg!(test) { 100 } else { 1 }, + pruner, + batch_size, + quit_worker: AtomicBool::new(false), + }) + } + + // Loop that does the real pruning job. + fn work(&self) { + while !self.quit_worker.load(Ordering::SeqCst) { + let pruner_result = self.pruner.prune(self.batch_size); + if pruner_result.is_err() { + sample!( + SampleRate::Duration(Duration::from_secs(1)), + error!(error = ?pruner_result.err().unwrap(), + "Pruner has error.") + ); + sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); + continue; + } + if !self.pruner.is_pruning_pending() { + sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); + } + } + } + + fn stop_pruning(&self) { + self.quit_worker.store(true, Ordering::SeqCst); + } +} + +impl PrunerWorker { + pub(crate) fn new(pruner: Arc, batch_size: usize, name: &str) -> Self { + let inner = PrunerWorkerInner::new(pruner, batch_size); + let inner_cloned = Arc::clone(&inner); + + let worker_thread = std::thread::Builder::new() + .name(format!("{name}_pruner")) + .spawn(move || inner_cloned.work()) + .expect("Creating pruner thread should succeed."); + + Self { + worker_name: name.into(), + worker_thread: Some(worker_thread), + inner, + } + } + + pub fn set_target_db_version(&self, target_db_version: Version) { + if target_db_version > self.inner.pruner.target_version() { + self.inner.pruner.set_target_version(target_db_version); + } + } + + pub fn is_pruning_pending(&self) -> bool { + self.inner.pruner.is_pruning_pending() + } +} + +impl Drop for PrunerWorker { + fn drop(&mut self) { + self.inner.stop_pruning(); + self.worker_thread + .take() + .unwrap_or_else(|| panic!("Pruner worker ({}) thread must exist.", self.worker_name)) + .join() + .unwrap_or_else(|e| { + panic!( + "Pruner worker ({}) thread should join peacefully: {e:?}", + self.worker_name + ) + }); + } +} diff --git a/storage/aptosdb/src/pruner/state_kv_pruner.rs b/storage/aptosdb/src/pruner/state_kv_pruner.rs index ec8dfc160df49..70a6f317effa6 100644 --- a/storage/aptosdb/src/pruner/state_kv_pruner.rs +++ b/storage/aptosdb/src/pruner/state_kv_pruner.rs @@ -4,14 +4,11 @@ use crate::{ db_metadata::DbMetadataSchema, metrics::PRUNER_VERSIONS, - pruner::{ - db_pruner::DBPruner, db_sub_pruner::DBSubPruner, - state_store::state_value_pruner::StateValuePruner, - }, - pruner_utils, + pruner::{db_pruner::DBPruner, state_store::state_value_pruner::StateValuePruner}, schema::db_metadata::{DbMetadataKey, DbMetadataValue}, state_kv_db::StateKvDb, }; +use anyhow::Result; use aptos_schemadb::SchemaBatch; use aptos_types::transaction::{AtomicVersion, Version}; use std::sync::{atomic::Ordering, Arc}; @@ -23,8 +20,8 @@ pub(crate) struct StateKvPruner { state_kv_db: Arc, /// Keeps track of the target version that the pruner needs to achieve. target_version: AtomicVersion, - min_readable_version: AtomicVersion, - state_value_pruner: Arc, + progress: AtomicVersion, + state_value_pruner: Arc, } impl DBPruner for StateKvPruner { @@ -32,31 +29,20 @@ impl DBPruner for StateKvPruner { STATE_KV_PRUNER_NAME } - fn prune(&self, max_versions: usize) -> anyhow::Result { + fn prune(&self, max_versions: usize) -> Result { if !self.is_pruning_pending() { - return Ok(self.min_readable_version()); + return Ok(self.progress()); } let mut db_batch = SchemaBatch::new(); let current_target_version = self.prune_inner(max_versions, &mut db_batch)?; - self.save_min_readable_version(current_target_version, &db_batch)?; + self.save_progress(current_target_version, &db_batch)?; self.state_kv_db.commit_raw_batch(db_batch)?; self.record_progress(current_target_version); Ok(current_target_version) } - fn save_min_readable_version( - &self, - version: Version, - batch: &SchemaBatch, - ) -> anyhow::Result<()> { - batch.put::( - &DbMetadataKey::StateKvPrunerProgress, - &DbMetadataValue::Version(version), - ) - } - fn initialize_min_readable_version(&self) -> anyhow::Result { Ok(self .state_kv_db @@ -65,8 +51,8 @@ impl DBPruner for StateKvPruner { .map_or(0, |v| v.expect_version())) } - fn min_readable_version(&self) -> Version { - self.min_readable_version.load(Ordering::Relaxed) + fn progress(&self) -> Version { + self.progress.load(Ordering::SeqCst) } fn set_target_version(&self, target_version: Version) { @@ -81,17 +67,11 @@ impl DBPruner for StateKvPruner { } fn record_progress(&self, min_readable_version: Version) { - self.min_readable_version - .store(min_readable_version, Ordering::Relaxed); + self.progress.store(min_readable_version, Ordering::Relaxed); PRUNER_VERSIONS - .with_label_values(&["state_kv_pruner", "min_readable"]) + .with_label_values(&["state_kv_pruner", "progress"]) .set(min_readable_version as i64); } - - /// (For tests only.) Updates the minimal readable version kept by pruner. - fn testonly_update_min_version(&self, version: Version) { - self.min_readable_version.store(version, Ordering::Relaxed) - } } impl StateKvPruner { @@ -99,43 +79,35 @@ impl StateKvPruner { let pruner = StateKvPruner { state_kv_db: Arc::clone(&state_kv_db), target_version: AtomicVersion::new(0), - min_readable_version: AtomicVersion::new(0), + progress: AtomicVersion::new(0), state_value_pruner: Arc::new(StateValuePruner::new(state_kv_db)), }; pruner.initialize(); pruner } - /// Prunes the genesis transaction and saves the db alterations to the given change set - pub fn prune_genesis( - state_kv_db: Arc, - db_batch: &mut SchemaBatch, - ) -> anyhow::Result<()> { - let target_version = 1; // The genesis version is 0. Delete [0,1) (exclusive) - let max_version = 1; // We should only be pruning a single version - - let state_kv_pruner = pruner_utils::create_state_kv_pruner(state_kv_db); - state_kv_pruner.set_target_version(target_version); - state_kv_pruner.prune_inner(max_version, db_batch)?; - - Ok(()) - } - fn prune_inner( &self, max_versions: usize, db_batch: &mut SchemaBatch, ) -> anyhow::Result { - let min_readable_version = self.min_readable_version(); + let progress = self.progress(); let current_target_version = self.get_current_batch_target(max_versions as Version); - if current_target_version < min_readable_version { - return Ok(min_readable_version); + if current_target_version < progress { + return Ok(progress); } self.state_value_pruner - .prune(db_batch, min_readable_version, current_target_version)?; + .prune(db_batch, progress, current_target_version)?; Ok(current_target_version) } + + fn save_progress(&self, version: Version, batch: &SchemaBatch) -> Result<()> { + batch.put::( + &DbMetadataKey::StateKvPrunerProgress, + &DbMetadataValue::Version(version), + ) + } } diff --git a/storage/aptosdb/src/pruner/state_kv_pruner_manager.rs b/storage/aptosdb/src/pruner/state_kv_pruner_manager.rs index 744269b2422fc..936da9eb0cc9f 100644 --- a/storage/aptosdb/src/pruner/state_kv_pruner_manager.rs +++ b/storage/aptosdb/src/pruner/state_kv_pruner_manager.rs @@ -2,51 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - metrics::{PRUNER_BATCH_SIZE, PRUNER_WINDOW}, + metrics::{PRUNER_BATCH_SIZE, PRUNER_VERSIONS, PRUNER_WINDOW}, pruner::{ - db_pruner::DBPruner, pruner_manager::PrunerManager, state_kv_pruner::StateKvPruner, - state_kv_pruner_worker::StateKvPrunerWorker, + pruner_manager::PrunerManager, pruner_worker::PrunerWorker, state_kv_pruner::StateKvPruner, }, pruner_utils, state_kv_db::StateKvDb, }; +use anyhow::Result; use aptos_config::config::LedgerPrunerConfig; -use aptos_infallible::Mutex; -use aptos_types::transaction::Version; -use std::{sync::Arc, thread::JoinHandle}; +use aptos_types::transaction::{AtomicVersion, Version}; +use std::sync::{atomic::Ordering, Arc}; /// The `PrunerManager` for `StateKvPruner`. pub(crate) struct StateKvPrunerManager { - pruner_enabled: bool, + state_kv_db: Arc, /// DB version window, which dictates how many version of state values to keep. prune_window: Version, - /// State kv pruner. Is always initialized regardless if the pruner is enabled to keep tracks - /// of the min_readable_version. - pruner: Arc, - /// Wrapper class of the state kv pruner. - pruner_worker: Arc, - /// The worker thread handle for state_kv_pruner, created upon Pruner instance construction and - /// joined upon its destruction. It is `None` when the state kv pruner is not enabled or it only - /// becomes `None` after joined in `drop()`. - worker_thread: Option>, - /// We send a batch of version to the underlying pruners for performance reason. This tracks the - /// last version we sent to the pruners. Will only be set if the pruner is enabled. - pub(crate) last_version_sent_to_pruner: Arc>, + /// It is None iff the pruner is not enabled. + pruner_worker: Option, /// Ideal batch size of the versions to be sent to the state kv pruner. pruning_batch_size: usize, - /// latest version - latest_version: Arc>, + /// The minimal readable version for the ledger data. + min_readable_version: AtomicVersion, } impl PrunerManager for StateKvPrunerManager { type Pruner = StateKvPruner; - fn pruner(&self) -> &Self::Pruner { - &self.pruner - } - fn is_pruner_enabled(&self) -> bool { - self.pruner_enabled + self.pruner_worker.is_some() } fn get_prune_window(&self) -> Version { @@ -54,94 +39,99 @@ impl PrunerManager for StateKvPrunerManager { } fn get_min_readable_version(&self) -> Version { - self.pruner.as_ref().min_readable_version() - } - - fn get_min_viable_version(&self) -> Version { - unimplemented!() + self.min_readable_version.load(Ordering::SeqCst) } /// Sets pruner target version when necessary. fn maybe_set_pruner_target_db_version(&self, latest_version: Version) { - *self.latest_version.lock() = latest_version; - - if self.pruner_enabled + let min_readable_version = self.get_min_readable_version(); + // Only wake up the state kv pruner if there are `ledger_pruner_pruning_batch_size` pending + if self.is_pruner_enabled() && latest_version - >= *self.last_version_sent_to_pruner.as_ref().lock() - + self.pruning_batch_size as u64 + >= min_readable_version + self.pruning_batch_size as u64 + self.prune_window { self.set_pruner_target_db_version(latest_version); - *self.last_version_sent_to_pruner.as_ref().lock() = latest_version; } } - fn set_pruner_target_db_version(&self, latest_version: Version) { - assert!(self.pruner_enabled); + fn save_min_readable_version(&self, min_readable_version: Version) -> Result<()> { + self.min_readable_version + .store(min_readable_version, Ordering::SeqCst); + + PRUNER_VERSIONS + .with_label_values(&["state_kv_pruner", "min_readable"]) + .set(min_readable_version as i64); + + self.state_kv_db.write_pruner_progress(min_readable_version) + } + + fn is_pruning_pending(&self) -> bool { + self.pruner_worker + .as_ref() + .map_or(false, |w| w.is_pruning_pending()) + } + + #[cfg(test)] + fn set_worker_target_version(&self, target_version: Version) { self.pruner_worker .as_ref() - .set_target_db_version(latest_version.saturating_sub(self.prune_window)); + .unwrap() + .set_target_db_version(target_version); } } impl StateKvPrunerManager { - /// Creates a worker thread that waits on a channel for pruning commands. pub fn new(state_kv_db: Arc, state_kv_pruner_config: LedgerPrunerConfig) -> Self { - let state_kv_pruner = pruner_utils::create_state_kv_pruner(state_kv_db); - - if state_kv_pruner_config.enable { - PRUNER_WINDOW - .with_label_values(&["state_kv_pruner"]) - .set(state_kv_pruner_config.prune_window as i64); - - PRUNER_BATCH_SIZE - .with_label_values(&["state_kv_pruner"]) - .set(state_kv_pruner_config.batch_size as i64); - } - - let state_kv_pruner_worker = Arc::new(StateKvPrunerWorker::new( - Arc::clone(&state_kv_pruner), - state_kv_pruner_config, - )); - - let state_kv_pruner_worker_clone = Arc::clone(&state_kv_pruner_worker); - - let state_kv_pruner_worker_thread = if state_kv_pruner_config.enable { - Some( - std::thread::Builder::new() - .name("aptosdb_state_kv_pruner".into()) - .spawn(move || state_kv_pruner_worker_clone.as_ref().work()) - .expect("Creating state kv pruner thread should succeed."), - ) + let pruner_worker = if state_kv_pruner_config.enable { + Some(Self::init_pruner( + Arc::clone(&state_kv_db), + state_kv_pruner_config, + )) } else { None }; - let min_readable_version = state_kv_pruner.min_readable_version(); + let min_readable_version = + pruner_utils::get_state_kv_pruner_progress(&state_kv_db).expect("Must succeed."); + + PRUNER_VERSIONS + .with_label_values(&["state_kv_pruner", "min_readable"]) + .set(min_readable_version as i64); Self { - pruner_enabled: state_kv_pruner_config.enable, + state_kv_db, prune_window: state_kv_pruner_config.prune_window, - pruner: state_kv_pruner, - pruner_worker: state_kv_pruner_worker, - worker_thread: state_kv_pruner_worker_thread, - last_version_sent_to_pruner: Arc::new(Mutex::new(min_readable_version)), + pruner_worker, pruning_batch_size: state_kv_pruner_config.batch_size, - latest_version: Arc::new(Mutex::new(min_readable_version)), + min_readable_version: AtomicVersion::new(min_readable_version), } } -} -impl Drop for StateKvPrunerManager { - fn drop(&mut self) { - if self.pruner_enabled { - self.pruner_worker.stop_pruning(); - - assert!(self.worker_thread.is_some()); - self.worker_thread - .take() - .expect("State kv pruner worker thread must exist.") - .join() - .expect("State kv pruner worker thread should join peacefully."); - } + fn init_pruner( + state_kv_db: Arc, + state_kv_pruner_config: LedgerPrunerConfig, + ) -> PrunerWorker { + let pruner = pruner_utils::create_state_kv_pruner(state_kv_db); + + PRUNER_WINDOW + .with_label_values(&["state_kv_pruner"]) + .set(state_kv_pruner_config.prune_window as i64); + + PRUNER_BATCH_SIZE + .with_label_values(&["state_kv_pruner"]) + .set(state_kv_pruner_config.batch_size as i64); + + PrunerWorker::new(pruner, state_kv_pruner_config.batch_size, "state_kv") + } + + fn set_pruner_target_db_version(&self, latest_version: Version) { + assert!(self.pruner_worker.is_some()); + let min_readable_version = latest_version.saturating_sub(self.prune_window); + self.min_readable_version + .store(min_readable_version, Ordering::SeqCst); + self.pruner_worker + .as_ref() + .unwrap() + .set_target_db_version(min_readable_version); } } diff --git a/storage/aptosdb/src/pruner/state_kv_pruner_worker.rs b/storage/aptosdb/src/pruner/state_kv_pruner_worker.rs deleted file mode 100644 index abc073ca5fc16..0000000000000 --- a/storage/aptosdb/src/pruner/state_kv_pruner_worker.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::pruner::{db_pruner::DBPruner, state_kv_pruner::StateKvPruner}; -use aptos_config::config::LedgerPrunerConfig; -use aptos_logger::{ - error, - prelude::{sample, SampleRate}, -}; -use aptos_types::transaction::Version; -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread::sleep, - time::Duration, -}; - -/// Maintains the state kv pruner and periodically calls the db_pruner's prune method to prune the DB. -/// This also exposes API to report the progress to the parent thread. -pub struct StateKvPrunerWorker { - /// The worker will sleep for this period of time after pruning each batch. - pruning_time_interval_in_ms: u64, - /// State kv pruner. - pruner: Arc, - /// Max number of versions to prune per batch. - max_versions_to_prune_per_batch: u64, - /// Indicates whether the pruning loop should be running. Will only be set to true on pruner - /// destruction. - quit_worker: AtomicBool, -} - -impl StateKvPrunerWorker { - pub(crate) fn new( - state_kv_pruner: Arc, - state_kv_pruner_config: LedgerPrunerConfig, - ) -> Self { - Self { - pruning_time_interval_in_ms: if cfg!(test) { 100 } else { 1 }, - pruner: state_kv_pruner, - max_versions_to_prune_per_batch: state_kv_pruner_config.batch_size as u64, - quit_worker: AtomicBool::new(false), - } - } - - // Loop that does the real pruning job. - pub(crate) fn work(&self) { - while !self.quit_worker.load(Ordering::Relaxed) { - let pruner_result = self - .pruner - .prune(self.max_versions_to_prune_per_batch as usize); - if pruner_result.is_err() { - sample!( - SampleRate::Duration(Duration::from_secs(1)), - error!(error = ?pruner_result.err().unwrap(), - "State kv pruner has error.") - ); - sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); - return; - } - if !self.pruner.is_pruning_pending() { - sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); - } - } - } - - pub fn set_target_db_version(&self, target_db_version: Version) { - assert!(target_db_version >= self.pruner.target_version()); - self.pruner.set_target_version(target_db_version); - } - - pub fn stop_pruning(&self) { - self.quit_worker.store(true, Ordering::Relaxed); - } -} diff --git a/storage/aptosdb/src/pruner/state_merkle_pruner_manager.rs b/storage/aptosdb/src/pruner/state_merkle_pruner_manager.rs index 73e9c96350d9a..8008e06a4ece0 100644 --- a/storage/aptosdb/src/pruner/state_merkle_pruner_manager.rs +++ b/storage/aptosdb/src/pruner/state_merkle_pruner_manager.rs @@ -5,22 +5,24 @@ //! meant to be triggered by other threads as they commit new data to the DB. use crate::{ - metrics::{PRUNER_BATCH_SIZE, PRUNER_WINDOW}, + metrics::{PRUNER_BATCH_SIZE, PRUNER_VERSIONS, PRUNER_WINDOW}, pruner::{ - db_pruner::DBPruner, pruner_manager::PrunerManager, - state_merkle_pruner_worker::StateMerklePrunerWorker, + pruner_worker::PrunerWorker, state_store::{generics::StaleNodeIndexSchemaTrait, StateMerklePruner}, }, pruner_utils, state_merkle_db::StateMerkleDb, }; +use anyhow::Result; use aptos_config::config::StateMerklePrunerConfig; -use aptos_infallible::Mutex; use aptos_jellyfish_merkle::StaleNodeIndex; use aptos_schemadb::schema::KeyCodec; -use aptos_types::transaction::Version; -use std::{sync::Arc, thread::JoinHandle}; +use aptos_types::transaction::{AtomicVersion, Version}; +use std::{ + marker::PhantomData, + sync::{atomic::Ordering, Arc}, +}; /// The `Pruner` is meant to be part of a `AptosDB` instance and runs in the background to prune old /// data. @@ -28,29 +30,20 @@ use std::{sync::Arc, thread::JoinHandle}; /// If the state pruner is enabled, it creates a worker thread on construction and joins it on /// destruction. When destructed, it quits the worker thread eagerly without waiting for all /// pending work to be done. -#[derive(Debug)] pub struct StateMerklePrunerManager where StaleNodeIndex: KeyCodec, { - pruner_enabled: bool, + state_merkle_db: Arc, /// DB version window, which dictates how many versions of state store /// to keep. prune_window: Version, - /// State pruner. Is always initialized regardless if the pruner is enabled to keep tracks - /// of the min_readable_version. - pruner: Arc>, - /// Wrapper class of the state pruner. - pub(crate) pruner_worker: Arc>, - /// The worker thread handle for state_merkle_pruner, created upon Pruner instance construction and - /// joined upon its destruction. It is `None` when state pruner is not enabled or it only - /// becomes `None` after joined in `drop()`. - worker_thread: Option>, - /// We send a batch of version to the underlying pruners for performance reason. This tracks the - /// last version we sent to the pruner. Will only be set if the pruner is enabled. - last_version_sent_to_pruner: Arc>, - /// latest version - latest_version: Arc>, + /// It is None iff the pruner is not enabled. + pruner_worker: Option, + /// The minimal readable version for the ledger data. + min_readable_version: AtomicVersion, + + _phantom: PhantomData, } impl PrunerManager for StateMerklePrunerManager @@ -59,12 +52,8 @@ where { type Pruner = StateMerklePruner; - fn pruner(&self) -> &Self::Pruner { - &self.pruner - } - fn is_pruner_enabled(&self) -> bool { - self.pruner_enabled + self.pruner_worker.is_some() } fn get_prune_window(&self) -> Version { @@ -72,29 +61,41 @@ where } fn get_min_readable_version(&self) -> Version { - self.pruner.as_ref().min_readable_version() - } - - fn get_min_viable_version(&self) -> Version { - unimplemented!() + self.min_readable_version.load(Ordering::SeqCst) } /// Sets pruner target version when necessary. fn maybe_set_pruner_target_db_version(&self, latest_version: Version) { - *self.latest_version.lock() = latest_version; - // Always wake up the state pruner. - if self.pruner_enabled { + if self.is_pruner_enabled() { self.set_pruner_target_db_version(latest_version); - *self.last_version_sent_to_pruner.as_ref().lock() = latest_version; } } - fn set_pruner_target_db_version(&self, latest_version: Version) { - assert!(self.pruner_enabled); + fn save_min_readable_version(&self, min_readable_version: Version) -> Result<()> { + self.min_readable_version + .store(min_readable_version, Ordering::SeqCst); + + PRUNER_VERSIONS + .with_label_values(&[S::name(), "min_readable"]) + .set(min_readable_version as i64); + + self.state_merkle_db + .write_pruner_progress(min_readable_version) + } + + fn is_pruning_pending(&self) -> bool { self.pruner_worker .as_ref() - .set_target_db_version(latest_version.saturating_sub(self.prune_window)); + .map_or(false, |w| w.is_pruning_pending()) + } + + #[cfg(test)] + fn set_worker_target_version(&self, target_version: Version) { + self.pruner_worker + .as_ref() + .unwrap() + .set_target_db_version(target_version); } } @@ -103,65 +104,61 @@ where StaleNodeIndex: KeyCodec, { /// Creates a worker thread that waits on a channel for pruning commands. - pub fn new(state_merkle_db: Arc, config: StateMerklePrunerConfig) -> Self { - let state_db_clone = Arc::clone(&state_merkle_db); - let pruner = pruner_utils::create_state_merkle_pruner(state_db_clone); - - if config.enable { - PRUNER_WINDOW - .with_label_values(&[S::name()]) - .set(config.prune_window as i64); - - PRUNER_BATCH_SIZE - .with_label_values(&[S::name()]) - .set(config.batch_size as i64); - } - - let pruner_worker = Arc::new(StateMerklePrunerWorker::new(Arc::clone(&pruner), config)); - let state_merkle_pruner_worker_clone = Arc::clone(&pruner_worker); - - let worker_thread = if config.enable { - Some( - std::thread::Builder::new() - .name("aptosdb_state_merkle_pruner".into()) - .spawn(move || state_merkle_pruner_worker_clone.as_ref().work()) - .expect("Creating state pruner thread should succeed."), - ) + pub fn new( + state_merkle_db: Arc, + state_merkle_pruner_config: StateMerklePrunerConfig, + ) -> Self { + let pruner_worker = if state_merkle_pruner_config.enable { + Some(Self::init_pruner( + Arc::clone(&state_merkle_db), + state_merkle_pruner_config, + )) } else { None }; - let min_readable_version = pruner.as_ref().min_readable_version(); + let min_readable_version = pruner_utils::get_state_merkle_pruner_progress(&state_merkle_db) + .expect("Must succeed."); + + PRUNER_VERSIONS + .with_label_values(&[S::name(), "min_readable"]) + .set(min_readable_version as i64); + Self { - pruner_enabled: config.enable, - prune_window: config.prune_window, - pruner, + state_merkle_db, + prune_window: state_merkle_pruner_config.prune_window, pruner_worker, - worker_thread, - last_version_sent_to_pruner: Arc::new(Mutex::new(min_readable_version)), - latest_version: Arc::new(Mutex::new(min_readable_version)), + min_readable_version: AtomicVersion::new(min_readable_version), + _phantom: PhantomData, } } - #[cfg(test)] - pub fn testonly_update_min_version(&self, version: Version) { - self.pruner.testonly_update_min_version(version); + fn init_pruner( + state_merkle_db: Arc, + state_merkle_pruner_config: StateMerklePrunerConfig, + ) -> PrunerWorker { + let pruner = pruner_utils::create_state_merkle_pruner::(state_merkle_db); + + PRUNER_WINDOW + .with_label_values(&[S::name()]) + .set(state_merkle_pruner_config.prune_window as i64); + + PRUNER_BATCH_SIZE + .with_label_values(&[S::name()]) + .set(state_merkle_pruner_config.batch_size as i64); + + PrunerWorker::new( + pruner, + state_merkle_pruner_config.batch_size, + "state_merkle", + ) } -} -impl Drop for StateMerklePrunerManager -where - StaleNodeIndex: KeyCodec, -{ - fn drop(&mut self) { - if self.pruner_enabled { - self.pruner_worker.stop_pruning(); - assert!(self.worker_thread.is_some()); - self.worker_thread - .take() - .expect("State merkle pruner worker thread must exist.") - .join() - .expect("State merkle pruner worker thread should join peacefully."); - } + fn set_pruner_target_db_version(&self, latest_version: Version) { + assert!(self.pruner_worker.is_some()); + self.pruner_worker + .as_ref() + .unwrap() + .set_target_db_version(latest_version.saturating_sub(self.prune_window)); } } diff --git a/storage/aptosdb/src/pruner/state_merkle_pruner_worker.rs b/storage/aptosdb/src/pruner/state_merkle_pruner_worker.rs deleted file mode 100644 index 5d03a6c827dc3..0000000000000 --- a/storage/aptosdb/src/pruner/state_merkle_pruner_worker.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 -use crate::pruner::{ - db_pruner::DBPruner, - state_store::{generics::StaleNodeIndexSchemaTrait, StateMerklePruner}, -}; -use aptos_config::config::StateMerklePrunerConfig; -use aptos_jellyfish_merkle::StaleNodeIndex; -use aptos_logger::{ - error, - prelude::{sample, SampleRate}, -}; -use aptos_schemadb::schema::KeyCodec; -use aptos_types::transaction::Version; -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread::sleep, - time::Duration, -}; - -/// Maintains the state store pruner and periodically calls the db_pruner's prune method to prune -/// the DB. This also exposes API to report the progress to the parent thread. -#[derive(Debug)] -pub struct StateMerklePrunerWorker { - /// The worker will sleep for this period of time after pruning each batch. - pruning_time_interval_in_ms: u64, - /// State store pruner. - pruner: Arc>, - /// Max items to prune per batch (i.e. the max stale nodes to prune.) - max_node_to_prune_per_batch: u64, - /// Indicates whether the pruning loop should be running. Will only be set to true on pruner - /// destruction. - quit_worker: AtomicBool, - _phantom: std::marker::PhantomData, -} - -impl StateMerklePrunerWorker -where - StaleNodeIndex: KeyCodec, -{ - pub(crate) fn new( - state_merkle_pruner: Arc>, - state_merkle_pruner_config: StateMerklePrunerConfig, - ) -> Self { - Self { - pruning_time_interval_in_ms: if cfg!(test) { 100 } else { 1 }, - pruner: state_merkle_pruner, - max_node_to_prune_per_batch: state_merkle_pruner_config.batch_size as u64, - quit_worker: AtomicBool::new(false), - _phantom: std::marker::PhantomData, - } - } - - // Loop that does the real pruning job. - pub(crate) fn work(&self) { - while !self.quit_worker.load(Ordering::Relaxed) { - let pruner_result = self.pruner.prune(self.max_node_to_prune_per_batch as usize); - if pruner_result.is_err() { - sample!( - SampleRate::Duration(Duration::from_secs(1)), - error!(error = ?pruner_result.err().unwrap(), - "State pruner has error.") - ); - sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); - return; - } - if !self.pruner.is_pruning_pending() { - sleep(Duration::from_millis(self.pruning_time_interval_in_ms)); - } - } - } - - pub fn set_target_db_version(&self, target_db_version: Version) { - if target_db_version > self.pruner.target_version() { - self.pruner.set_target_version(target_db_version); - } - } - - pub fn stop_pruning(&self) { - self.quit_worker.store(true, Ordering::Relaxed); - } -} diff --git a/storage/aptosdb/src/pruner/state_store/mod.rs b/storage/aptosdb/src/pruner/state_store/mod.rs index f4a0aa418ab98..56c1c94526e6d 100644 --- a/storage/aptosdb/src/pruner/state_store/mod.rs +++ b/storage/aptosdb/src/pruner/state_store/mod.rs @@ -6,10 +6,9 @@ use crate::{ jellyfish_merkle_node::JellyfishMerkleNodeSchema, metrics::PRUNER_VERSIONS, pruner::{db_pruner::DBPruner, state_store::generics::StaleNodeIndexSchemaTrait}, - pruner_utils, schema::db_metadata::DbMetadataValue, state_merkle_db::StateMerkleDb, - StaleNodeIndexCrossEpochSchema, OTHER_TIMERS_SECONDS, + OTHER_TIMERS_SECONDS, }; use anyhow::Result; use aptos_infallible::Mutex; @@ -50,12 +49,12 @@ where fn prune(&self, batch_size: usize) -> Result { if !self.is_pruning_pending() { - return Ok(self.min_readable_version()); + return Ok(self.progress()); } - let min_readable_version = self.min_readable_version(); + let progress = self.progress(); let target_version = self.target_version(); - match self.prune_state_merkle(min_readable_version, target_version, batch_size, None) { + match self.prune_state_merkle(progress, target_version, batch_size, None) { Ok(new_min_readable_version) => Ok(new_min_readable_version), Err(e) => { error!( @@ -68,14 +67,6 @@ where } } - fn save_min_readable_version( - &self, - version: Version, - batch: &SchemaBatch, - ) -> anyhow::Result<()> { - batch.put::(&S::tag(), &DbMetadataValue::Version(version)) - } - fn initialize_min_readable_version(&self) -> Result { Ok(self .state_merkle_db @@ -84,7 +75,7 @@ where .map_or(0, |v| v.expect_version())) } - fn min_readable_version(&self) -> Version { + fn progress(&self) -> Version { let (version, _) = *self.progress.lock(); version } @@ -109,11 +100,6 @@ where let (min_readable_version, fully_pruned) = *self.progress.lock(); self.target_version() > min_readable_version || !fully_pruned } - - /// (For tests only.) Updates the minimal readable version kept by pruner. - fn testonly_update_min_version(&self, version: Version) { - self.record_progress_impl(version, true /* is_fully_pruned */); - } } impl StateMerklePruner @@ -170,7 +156,7 @@ where batch.delete::(&index) })?; - self.save_min_readable_version(new_min_readable_version, &batch)?; + self.save_progress(new_min_readable_version, &batch)?; // TODO(grao): Support sharding here. self.state_merkle_db.metadata_db().write_schemas(batch)?; @@ -187,7 +173,7 @@ where fn record_progress_impl(&self, min_readable_version: Version, is_fully_pruned: bool) { *self.progress.lock() = (min_readable_version, is_fully_pruned); PRUNER_VERSIONS - .with_label_values(&[S::name(), "min_readable"]) + .with_label_values(&[S::name(), "progress"]) .set(min_readable_version as i64); } @@ -227,31 +213,8 @@ where }; Ok((indices, is_end_of_target_version)) } -} - -impl StateMerklePruner { - /// Prunes the genesis state and saves the db alterations to the given change set - pub fn prune_genesis( - state_merkle_db: Arc, - batch: &mut SchemaBatch, - ) -> Result<()> { - let target_version = 1; // The genesis version is 0. Delete [0,1) (exclusive) - let max_version = 1; // We should only be pruning a single version - - let state_merkle_pruner = pruner_utils::create_state_merkle_pruner::< - StaleNodeIndexCrossEpochSchema, - >(state_merkle_db); - state_merkle_pruner.set_target_version(target_version); - let min_readable_version = state_merkle_pruner.min_readable_version(); - let target_version = state_merkle_pruner.target_version(); - state_merkle_pruner.prune_state_merkle( - min_readable_version, - target_version, - max_version, - Some(batch), - )?; - - Ok(()) + fn save_progress(&self, version: Version, batch: &SchemaBatch) -> anyhow::Result<()> { + batch.put::(&S::tag(), &DbMetadataValue::Version(version)) } } diff --git a/storage/aptosdb/src/pruner/state_store/state_value_pruner.rs b/storage/aptosdb/src/pruner/state_store/state_value_pruner.rs index dc728788f6074..efdc2af2af4da 100644 --- a/storage/aptosdb/src/pruner/state_store/state_value_pruner.rs +++ b/storage/aptosdb/src/pruner/state_store/state_value_pruner.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - pruner::db_sub_pruner::DBSubPruner, schema::{stale_state_value_index::StaleStateValueIndexSchema, state_value::StateValueSchema}, state_kv_db::StateKvDb, }; @@ -13,8 +12,12 @@ pub struct StateValuePruner { state_kv_db: Arc, } -impl DBSubPruner for StateValuePruner { - fn prune( +impl StateValuePruner { + pub(in crate::pruner) fn new(state_kv_db: Arc) -> Self { + StateValuePruner { state_kv_db } + } + + pub(in crate::pruner) fn prune( &self, db_batch: &mut SchemaBatch, min_readable_version: u64, @@ -37,9 +40,3 @@ impl DBSubPruner for StateValuePruner { Ok(()) } } - -impl StateValuePruner { - pub(in crate::pruner) fn new(state_kv_db: Arc) -> Self { - StateValuePruner { state_kv_db } - } -} diff --git a/storage/aptosdb/src/pruner/state_store/test.rs b/storage/aptosdb/src/pruner/state_store/test.rs index 85124a2c2975e..15b53a8b07d77 100644 --- a/storage/aptosdb/src/pruner/state_store/test.rs +++ b/storage/aptosdb/src/pruner/state_store/test.rs @@ -3,7 +3,6 @@ use crate::{ new_sharded_kv_schema_batch, - pruner::{state_merkle_pruner_worker::StateMerklePrunerWorker, *}, stale_node_index::StaleNodeIndexSchema, stale_state_value_index::StaleStateValueIndexSchema, state_merkle_db::StateMerkleDb, @@ -327,54 +326,6 @@ fn test_state_store_pruner_disabled() { } } -#[test] -fn test_worker_quit_eagerly() { - let key = StateKey::raw(String::from("test_key1").into_bytes()); - - let value0 = StateValue::from(String::from("test_val1").into_bytes()); - let value1 = StateValue::from(String::from("test_val2").into_bytes()); - let value2 = StateValue::from(String::from("test_val3").into_bytes()); - - let tmp_dir = TempPath::new(); - let aptos_db = AptosDB::new_for_test(&tmp_dir); - let state_store = &aptos_db.state_store; - - let _root0 = put_value_set( - state_store, - vec![(key.clone(), value0.clone())], - 0, /* version */ - ); - let _root1 = put_value_set( - state_store, - vec![(key.clone(), value1.clone())], - 1, /* version */ - ); - let _root2 = put_value_set( - state_store, - vec![(key.clone(), value2.clone())], - 2, /* version */ - ); - - { - let state_merkle_pruner = pruner_utils::create_state_merkle_pruner::( - Arc::clone(&aptos_db.state_merkle_db), - ); - let worker = StateMerklePrunerWorker::new(state_merkle_pruner, StateMerklePrunerConfig { - enable: true, - prune_window: 1, - batch_size: 100, - }); - worker.set_target_db_version(/*target_db_version=*/ 1); - worker.set_target_db_version(/*target_db_version=*/ 2); - // Worker quits immediately. - worker.stop_pruning(); - worker.work(); - verify_state_in_store(state_store, key.clone(), Some(&value0), 0); - verify_state_in_store(state_store, key.clone(), Some(&value1), 1); - verify_state_in_store(state_store, key, Some(&value2), 2); - } -} - proptest! { #![proptest_config(ProptestConfig::with_cases(10))] diff --git a/storage/aptosdb/src/pruner/transaction_store/mod.rs b/storage/aptosdb/src/pruner/transaction_store/mod.rs index e589ef0711b47..3b5d05337ce7a 100644 --- a/storage/aptosdb/src/pruner/transaction_store/mod.rs +++ b/storage/aptosdb/src/pruner/transaction_store/mod.rs @@ -3,5 +3,7 @@ #[cfg(test)] mod test; -pub(crate) mod transaction_store_pruner; +pub(crate) mod transaction_accumulator_pruner; +pub(crate) mod transaction_info_pruner; +pub(crate) mod transaction_pruner; pub(crate) mod write_set_pruner; diff --git a/storage/aptosdb/src/pruner/transaction_store/test.rs b/storage/aptosdb/src/pruner/transaction_store/test.rs index 5b8124bfd09fc..da113075b5b27 100644 --- a/storage/aptosdb/src/pruner/transaction_store/test.rs +++ b/storage/aptosdb/src/pruner/transaction_store/test.rs @@ -133,10 +133,6 @@ fn verify_txn_store_pruner( .wake_and_wait_pruner(i as u64 /* latest_version */) .unwrap(); // ensure that all transaction up to i * 2 has been pruned - assert_eq!( - *pruner.last_version_sent_to_pruner.as_ref().lock(), - i as u64 - ); for j in 0..i { verify_txn_not_in_store(transaction_store, &txns, j as u64, ledger_version); // Ensure that transaction accumulator is pruned in DB. This can be done by trying to @@ -250,14 +246,25 @@ fn put_txn_in_store( .write_schemas(transaction_batch) .unwrap(); let transaction_info_batch = SchemaBatch::new(); + let transaction_accumulator_batch = SchemaBatch::new(); ledger_store - .put_transaction_infos(0, txn_infos, &transaction_info_batch) + .put_transaction_infos( + 0, + txn_infos, + &transaction_info_batch, + &transaction_accumulator_batch, + ) .unwrap(); aptos_db .ledger_db .transaction_info_db() .write_schemas(transaction_info_batch) .unwrap(); + aptos_db + .ledger_db + .transaction_accumulator_db() + .write_schemas(transaction_accumulator_batch) + .unwrap(); } fn verify_transaction_in_transaction_store( diff --git a/storage/aptosdb/src/pruner/transaction_store/transaction_accumulator_pruner.rs b/storage/aptosdb/src/pruner/transaction_store/transaction_accumulator_pruner.rs new file mode 100644 index 0000000000000..47769b16aaf0e --- /dev/null +++ b/storage/aptosdb/src/pruner/transaction_store/transaction_accumulator_pruner.rs @@ -0,0 +1,59 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + pruner::{ + db_sub_pruner::DBSubPruner, pruner_utils::get_or_initialize_ledger_subpruner_progress, + }, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + TransactionStore, +}; +use anyhow::Result; +use aptos_schemadb::{SchemaBatch, DB}; +use aptos_types::transaction::Version; +use std::sync::Arc; + +#[derive(Debug)] +pub struct TransactionAccumulatorPruner { + transaction_store: Arc, + transaction_accumulator_db: Arc, +} + +impl DBSubPruner for TransactionAccumulatorPruner { + fn prune(&self, current_progress: Version, target_version: Version) -> Result<()> { + let batch = SchemaBatch::new(); + self.transaction_store.prune_transaction_accumulator( + current_progress, + target_version, + &batch, + )?; + batch.put::( + &DbMetadataKey::TransactionAccumulatorPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.transaction_accumulator_db.write_schemas(batch) + } +} + +impl TransactionAccumulatorPruner { + pub(in crate::pruner) fn new( + transaction_store: Arc, + transaction_accumulator_db: Arc, + metadata_progress: Version, + ) -> Result { + let progress = get_or_initialize_ledger_subpruner_progress( + &transaction_accumulator_db, + &DbMetadataKey::TransactionAccumulatorPrunerProgress, + metadata_progress, + )?; + + let myself = TransactionAccumulatorPruner { + transaction_store, + transaction_accumulator_db, + }; + + myself.prune(progress, metadata_progress)?; + + Ok(myself) + } +} diff --git a/storage/aptosdb/src/pruner/transaction_store/transaction_info_pruner.rs b/storage/aptosdb/src/pruner/transaction_store/transaction_info_pruner.rs new file mode 100644 index 0000000000000..4887f23e65209 --- /dev/null +++ b/storage/aptosdb/src/pruner/transaction_store/transaction_info_pruner.rs @@ -0,0 +1,59 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + pruner::{ + db_sub_pruner::DBSubPruner, pruner_utils::get_or_initialize_ledger_subpruner_progress, + }, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + TransactionStore, +}; +use anyhow::Result; +use aptos_schemadb::{SchemaBatch, DB}; +use aptos_types::transaction::Version; +use std::sync::Arc; + +#[derive(Debug)] +pub struct TransactionInfoPruner { + transaction_store: Arc, + transaction_info_db: Arc, +} + +impl DBSubPruner for TransactionInfoPruner { + fn prune(&self, current_progress: Version, target_version: Version) -> Result<()> { + let batch = SchemaBatch::new(); + self.transaction_store.prune_transaction_info_schema( + current_progress, + target_version, + &batch, + )?; + batch.put::( + &DbMetadataKey::TransactionInfoPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.transaction_info_db.write_schemas(batch) + } +} + +impl TransactionInfoPruner { + pub(in crate::pruner) fn new( + transaction_store: Arc, + transaction_info_db: Arc, + metadata_progress: Version, + ) -> Result { + let progress = get_or_initialize_ledger_subpruner_progress( + &transaction_info_db, + &DbMetadataKey::TransactionInfoPrunerProgress, + metadata_progress, + )?; + + let myself = TransactionInfoPruner { + transaction_store, + transaction_info_db, + }; + + myself.prune(progress, metadata_progress)?; + + Ok(myself) + } +} diff --git a/storage/aptosdb/src/pruner/transaction_store/transaction_pruner.rs b/storage/aptosdb/src/pruner/transaction_store/transaction_pruner.rs new file mode 100644 index 0000000000000..e5e8e21ebedf4 --- /dev/null +++ b/storage/aptosdb/src/pruner/transaction_store/transaction_pruner.rs @@ -0,0 +1,75 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + pruner::{ + db_sub_pruner::DBSubPruner, pruner_utils::get_or_initialize_ledger_subpruner_progress, + }, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + TransactionStore, +}; +use anyhow::Result; +use aptos_schemadb::{SchemaBatch, DB}; +use aptos_types::transaction::{Transaction, Version}; +use std::sync::Arc; + +#[derive(Debug)] +pub struct TransactionPruner { + transaction_store: Arc, + transaction_db: Arc, +} + +impl DBSubPruner for TransactionPruner { + fn prune(&self, current_progress: Version, target_version: Version) -> Result<()> { + let batch = SchemaBatch::new(); + let candidate_transactions = + self.get_pruning_candidate_transactions(current_progress, target_version)?; + self.transaction_store + .prune_transaction_by_hash(&candidate_transactions, &batch)?; + self.transaction_store + .prune_transaction_by_account(&candidate_transactions, &batch)?; + self.transaction_store.prune_transaction_schema( + current_progress, + target_version, + &batch, + )?; + batch.put::( + &DbMetadataKey::TransactionPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.transaction_db.write_schemas(batch) + } +} + +impl TransactionPruner { + pub(in crate::pruner) fn new( + transaction_store: Arc, + transaction_db: Arc, + metadata_progress: Version, + ) -> Result { + let progress = get_or_initialize_ledger_subpruner_progress( + &transaction_db, + &DbMetadataKey::TransactionPrunerProgress, + metadata_progress, + )?; + + let myself = TransactionPruner { + transaction_store, + transaction_db, + }; + + myself.prune(progress, metadata_progress)?; + + Ok(myself) + } + + fn get_pruning_candidate_transactions( + &self, + start: Version, + end: Version, + ) -> anyhow::Result> { + self.transaction_store + .get_transaction_iter(start, (end - start) as usize)? + .collect() + } +} diff --git a/storage/aptosdb/src/pruner/transaction_store/transaction_store_pruner.rs b/storage/aptosdb/src/pruner/transaction_store/transaction_store_pruner.rs deleted file mode 100644 index 68a151be260f8..0000000000000 --- a/storage/aptosdb/src/pruner/transaction_store/transaction_store_pruner.rs +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 -use crate::{pruner::db_sub_pruner::DBSubPruner, TransactionStore}; -use aptos_schemadb::SchemaBatch; -use aptos_types::transaction::{Transaction, Version}; -use std::sync::Arc; - -#[derive(Debug)] -pub struct TransactionStorePruner { - transaction_store: Arc, -} - -impl DBSubPruner for TransactionStorePruner { - fn prune( - &self, - db_batch: &mut SchemaBatch, - min_readable_version: u64, - target_version: u64, - ) -> anyhow::Result<()> { - // Current target version might be less than the target version to ensure we don't prune - // more than max_version in one go. - - let candidate_transactions = - self.get_pruning_candidate_transactions(min_readable_version, target_version)?; - self.transaction_store - .prune_transaction_by_hash(&candidate_transactions, db_batch)?; - self.transaction_store - .prune_transaction_by_account(&candidate_transactions, db_batch)?; - self.transaction_store.prune_transaction_schema( - min_readable_version, - target_version, - db_batch, - )?; - self.transaction_store.prune_transaction_info_schema( - min_readable_version, - target_version, - db_batch, - )?; - self.transaction_store.prune_transaction_accumulator( - min_readable_version, - target_version, - db_batch, - )?; - Ok(()) - } -} - -impl TransactionStorePruner { - pub(in crate::pruner) fn new(transaction_store: Arc) -> Self { - TransactionStorePruner { transaction_store } - } - - fn get_pruning_candidate_transactions( - &self, - start: Version, - end: Version, - ) -> anyhow::Result> { - self.transaction_store - .get_transaction_iter(start, (end - start) as usize)? - .collect() - } -} diff --git a/storage/aptosdb/src/pruner/transaction_store/write_set_pruner.rs b/storage/aptosdb/src/pruner/transaction_store/write_set_pruner.rs index 53f63bd11b92f..dab713b98aa0f 100644 --- a/storage/aptosdb/src/pruner/transaction_store/write_set_pruner.rs +++ b/storage/aptosdb/src/pruner/transaction_store/write_set_pruner.rs @@ -1,29 +1,56 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{pruner::db_sub_pruner::DBSubPruner, TransactionStore}; -use aptos_schemadb::SchemaBatch; + +use crate::{ + pruner::{ + db_sub_pruner::DBSubPruner, pruner_utils::get_or_initialize_ledger_subpruner_progress, + }, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + TransactionStore, +}; +use anyhow::Result; +use aptos_schemadb::{SchemaBatch, DB}; +use aptos_types::transaction::Version; use std::sync::Arc; #[derive(Debug)] pub struct WriteSetPruner { transaction_store: Arc, + write_set_db: Arc, } impl DBSubPruner for WriteSetPruner { - fn prune( - &self, - db_batch: &mut SchemaBatch, - min_readable_version: u64, - target_version: u64, - ) -> anyhow::Result<()> { + fn prune(&self, current_progress: Version, target_version: Version) -> Result<()> { + let batch = SchemaBatch::new(); self.transaction_store - .prune_write_set(min_readable_version, target_version, db_batch)?; - Ok(()) + .prune_write_set(current_progress, target_version, &batch)?; + batch.put::( + &DbMetadataKey::WriteSetPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.write_set_db.write_schemas(batch) } } impl WriteSetPruner { - pub(in crate::pruner) fn new(transaction_store: Arc) -> Self { - WriteSetPruner { transaction_store } + pub(in crate::pruner) fn new( + transaction_store: Arc, + write_set_db: Arc, + metadata_progress: Version, + ) -> Result { + let progress = get_or_initialize_ledger_subpruner_progress( + &write_set_db, + &DbMetadataKey::WriteSetPrunerProgress, + metadata_progress, + )?; + + let myself = WriteSetPruner { + transaction_store, + write_set_db, + }; + + myself.prune(progress, metadata_progress)?; + + Ok(myself) } } diff --git a/storage/aptosdb/src/schema/db_metadata/mod.rs b/storage/aptosdb/src/schema/db_metadata/mod.rs index fc5dbb137fbaa..61958912d39bd 100644 --- a/storage/aptosdb/src/schema/db_metadata/mod.rs +++ b/storage/aptosdb/src/schema/db_metadata/mod.rs @@ -57,6 +57,11 @@ pub enum DbMetadataKey { StateKvShardCommitProgress(ShardId), StateMerkleCommitProgress, StateMerkleShardCommitProgress(ShardId), + EventPrunerProgress, + TransactionAccumulatorPrunerProgress, + TransactionInfoPrunerProgress, + TransactionPrunerProgress, + WriteSetPrunerProgress, } define_schema!( diff --git a/storage/aptosdb/src/state_kv_db.rs b/storage/aptosdb/src/state_kv_db.rs index 440b1ea7fc94d..d37a385343168 100644 --- a/storage/aptosdb/src/state_kv_db.rs +++ b/storage/aptosdb/src/state_kv_db.rs @@ -38,7 +38,7 @@ impl StateKvDb { readonly: bool, ledger_db: Arc, ) -> Result { - if !rocksdb_configs.use_state_kv_db { + if !rocksdb_configs.split_ledger_db { info!("State K/V DB is not enabled!"); return Ok(Self { state_kv_metadata_db: Arc::clone(&ledger_db), @@ -95,20 +95,6 @@ impl StateKvDb { Ok(state_kv_db) } - // TODO(grao): Remove this function. - pub(crate) fn commit_nonsharded( - &self, - version: Version, - state_kv_batch: SchemaBatch, - ) -> Result<()> { - state_kv_batch.put::( - &DbMetadataKey::StateKvCommitProgress, - &DbMetadataValue::Version(version), - )?; - - self.commit_raw_batch(state_kv_batch) - } - pub(crate) fn commit( &self, version: Version, @@ -141,6 +127,13 @@ impl StateKvDb { ) } + pub(crate) fn write_pruner_progress(&self, version: Version) -> Result<()> { + self.state_kv_metadata_db.put::( + &DbMetadataKey::StateKvPrunerProgress, + &DbMetadataValue::Version(version), + ) + } + pub(crate) fn create_checkpoint( db_root_path: impl AsRef, cp_root_path: impl AsRef, diff --git a/storage/aptosdb/src/state_merkle_db.rs b/storage/aptosdb/src/state_merkle_db.rs index 57ddd55afd293..e44c93073e36a 100644 --- a/storage/aptosdb/src/state_merkle_db.rs +++ b/storage/aptosdb/src/state_merkle_db.rs @@ -362,6 +362,13 @@ impl StateMerkleDb { &self.lru_cache } + pub(crate) fn write_pruner_progress(&self, version: Version) -> Result<()> { + self.state_merkle_metadata_db.put::( + &DbMetadataKey::StateMerklePrunerProgress, + &DbMetadataValue::Version(version), + ) + } + fn db_by_key(&self, node_key: &NodeKey) -> &DB { if let Some(shard_id) = node_key.get_shard_id() { self.db_shard(shard_id) diff --git a/storage/aptosdb/src/test_helper.rs b/storage/aptosdb/src/test_helper.rs index 2b33189e03064..1163259bb730f 100644 --- a/storage/aptosdb/src/test_helper.rs +++ b/storage/aptosdb/src/test_helper.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -///! This module provides reusable helpers in tests. +//! This module provides reusable helpers in tests. use super::*; use crate::{ jellyfish_merkle_node::JellyfishMerkleNodeSchema, schema::state_value::StateValueSchema, @@ -868,7 +868,7 @@ pub fn verify_committed_transactions( pub fn put_transaction_info(db: &AptosDB, version: Version, txn_info: &TransactionInfo) { let batch = SchemaBatch::new(); db.ledger_store - .put_transaction_infos(version, &[txn_info.clone()], &batch) + .put_transaction_infos(version, &[txn_info.clone()], &batch, &batch) .unwrap(); db.ledger_db.transaction_db().write_schemas(batch).unwrap(); } diff --git a/storage/aptosdb/src/transaction_store/test.rs b/storage/aptosdb/src/transaction_store/test.rs index b6fc708f3f7d2..09e79c219969c 100644 --- a/storage/aptosdb/src/transaction_store/test.rs +++ b/storage/aptosdb/src/transaction_store/test.rs @@ -2,6 +2,8 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::redundant_clone)] // Required to work around prop_assert_eq! limitations + use super::*; use crate::AptosDB; use aptos_proptest_helpers::Index; diff --git a/storage/aptosdb/src/utils/mod.rs b/storage/aptosdb/src/utils/mod.rs index 645bf83b587b7..1ebc49f52b579 100644 --- a/storage/aptosdb/src/utils/mod.rs +++ b/storage/aptosdb/src/utils/mod.rs @@ -3,3 +3,20 @@ pub mod iterators; pub(crate) mod truncation_helper; + +use crate::schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}; +use anyhow::Result; +use aptos_schemadb::DB; +use aptos_types::transaction::Version; + +pub(crate) fn get_progress(db: &DB, progress_key: &DbMetadataKey) -> Result> { + Ok( + if let Some(DbMetadataValue::Version(progress)) = + db.get::(progress_key)? + { + Some(progress) + } else { + None + }, + ) +} diff --git a/storage/aptosdb/src/utils/truncation_helper.rs b/storage/aptosdb/src/utils/truncation_helper.rs index 40556cc75ae8c..2b40e9218ca24 100644 --- a/storage/aptosdb/src/utils/truncation_helper.rs +++ b/storage/aptosdb/src/utils/truncation_helper.rs @@ -17,6 +17,7 @@ use crate::{ }, state_kv_db::StateKvDb, state_merkle_db::StateMerkleDb, + utils::get_progress, EventStore, TransactionStore, NUM_STATE_SHARDS, }; use anyhow::Result; @@ -37,15 +38,15 @@ use std::{ }; pub(crate) fn get_overall_commit_progress(ledger_metadata_db: &DB) -> Result> { - get_commit_progress(ledger_metadata_db, &DbMetadataKey::OverallCommitProgress) + get_progress(ledger_metadata_db, &DbMetadataKey::OverallCommitProgress) } pub(crate) fn get_ledger_commit_progress(ledger_metadata_db: &DB) -> Result> { - get_commit_progress(ledger_metadata_db, &DbMetadataKey::LedgerCommitProgress) + get_progress(ledger_metadata_db, &DbMetadataKey::LedgerCommitProgress) } pub(crate) fn get_state_kv_commit_progress(state_kv_db: &StateKvDb) -> Result> { - get_commit_progress( + get_progress( state_kv_db.metadata_db(), &DbMetadataKey::StateKvCommitProgress, ) @@ -54,24 +55,12 @@ pub(crate) fn get_state_kv_commit_progress(state_kv_db: &StateKvDb) -> Result Result> { - get_commit_progress( + get_progress( state_merkle_db.metadata_db(), &DbMetadataKey::StateMerkleCommitProgress, ) } -fn get_commit_progress(db: &DB, progress_key: &DbMetadataKey) -> Result> { - Ok( - if let Some(DbMetadataValue::Version(overall_commit_progress)) = - db.get::(progress_key)? - { - Some(overall_commit_progress) - } else { - None - }, - ) -} - pub(crate) fn truncate_ledger_db( ledger_db: Arc, current_version: Version, diff --git a/storage/backup/backup-cli/src/backup_types/tests.rs b/storage/backup/backup-cli/src/backup_types/tests.rs index 3bcc0cefbf886..7bf8d31e8c8cf 100644 --- a/storage/backup/backup-cli/src/backup_types/tests.rs +++ b/storage/backup/backup-cli/src/backup_types/tests.rs @@ -201,6 +201,11 @@ proptest! { #![proptest_config(ProptestConfig::with_cases(10))] #[test] + // Ignore for now because the pruner now is going to see the version data to figure out the + // progress, but we don't have version data before the state_snapshot_ver. As the result the + // API will throw an error when getting the old transactions. + // TODO(areshand): Figure out a plan for this. + #[ignore] #[cfg_attr(feature = "consensus-only-perf-test", ignore)] fn test_end_to_end(d in test_data_strategy().no_shrink()) { test_end_to_end_impl(d) diff --git a/storage/backup/backup-cli/src/backup_types/transaction/restore.rs b/storage/backup/backup-cli/src/backup_types/transaction/restore.rs index d38a37dd1ce7f..a665003b5284a 100644 --- a/storage/backup/backup-cli/src/backup_types/transaction/restore.rs +++ b/storage/backup/backup-cli/src/backup_types/transaction/restore.rs @@ -482,9 +482,7 @@ impl TransactionRestoreBatchController { // create iterator of txn and its outputs to be replayed after the snapshot. Ok(stream::iter( - izip!(txns, txn_infos, write_sets, event_vecs) - .into_iter() - .map(Result::<_>::Ok), + izip!(txns, txn_infos, write_sets, event_vecs).map(Result::<_>::Ok), )) }) }) diff --git a/storage/backup/backup-cli/src/coordinators/restore.rs b/storage/backup/backup-cli/src/coordinators/restore.rs index a4f054f6a96f6..db85a91e90740 100644 --- a/storage/backup/backup-cli/src/coordinators/restore.rs +++ b/storage/backup/backup-cli/src/coordinators/restore.rs @@ -119,13 +119,17 @@ impl RestoreCoordinator { ) .await?; - let target_version = self.global_opt.target_version; - COORDINATOR_TARGET_VERSION.set(target_version as i64); - // calculate the start_version and replay_version let max_txn_ver = metadata_view .max_transaction_version()? .ok_or_else(|| anyhow!("No transaction backup found."))?; + let target_version = std::cmp::min(self.global_opt.target_version, max_txn_ver); + info!( + "User specified target version: {}, max transaction version: {}, Target version is set to {}", + self.global_opt.target_version, max_txn_ver, target_version + ); + + COORDINATOR_TARGET_VERSION.set(target_version as i64); let lhs = self.ledger_history_start_version(); let latest_tree_version = self @@ -185,7 +189,7 @@ impl RestoreCoordinator { snapshot.unwrap() } else { metadata_view - .select_state_snapshot(std::cmp::min(self.target_version(), max_txn_ver))? + .select_state_snapshot(target_version)? .expect("Cannot find tree snapshot before target version") }; diff --git a/storage/backup/backup-cli/src/utils/mod.rs b/storage/backup/backup-cli/src/utils/mod.rs index 3067cf0f6c825..948b05f9be0f4 100644 --- a/storage/backup/backup-cli/src/utils/mod.rs +++ b/storage/backup/backup-cli/src/utils/mod.rs @@ -69,8 +69,6 @@ pub struct RocksdbOpt { #[clap(long, hidden(true))] split_ledger_db: bool, #[clap(long, hidden(true))] - use_state_kv_db: bool, - #[clap(long, hidden(true))] use_sharded_state_merkle_db: bool, #[clap(long, hidden(true), default_value = "5000")] state_kv_db_max_open_files: i32, @@ -100,7 +98,6 @@ impl From for RocksdbConfigs { ..Default::default() }, split_ledger_db: opt.split_ledger_db, - use_state_kv_db: opt.use_state_kv_db, use_sharded_state_merkle_db: opt.use_sharded_state_merkle_db, state_kv_db_config: RocksdbConfig { max_open_files: opt.state_kv_db_max_open_files, diff --git a/storage/backup/backup-cli/src/utils/stream/buffered_x.rs b/storage/backup/backup-cli/src/utils/stream/buffered_x.rs index 03df996233464..62193c712b859 100644 --- a/storage/backup/backup-cli/src/utils/stream/buffered_x.rs +++ b/storage/backup/backup-cli/src/utils/stream/buffered_x.rs @@ -2,9 +2,9 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -///! This is a copy of `futures::stream::buffered` from `futures 0.3.6`, except that it uses -///! `FuturesOrderedX` which provides concurrency control. So we can buffer more results without -///! too many futures driven at the same time. +//! This is a copy of `futures::stream::buffered` from `futures 0.3.6`, except that it uses +//! `FuturesOrderedX` which provides concurrency control. So we can buffer more results without +//! too many futures driven at the same time. use crate::utils::stream::futures_ordered_x::FuturesOrderedX; use futures::{ ready, diff --git a/storage/backup/backup-cli/src/utils/stream/try_buffered_x.rs b/storage/backup/backup-cli/src/utils/stream/try_buffered_x.rs index 494ad8c21077f..8496f04239bf0 100644 --- a/storage/backup/backup-cli/src/utils/stream/try_buffered_x.rs +++ b/storage/backup/backup-cli/src/utils/stream/try_buffered_x.rs @@ -2,9 +2,9 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -///! This is a copy of `futures::try_stream::try_buffered` from `futures 0.3.16`, except that it uses -///! `FuturesOrderedX` which provides concurrency control. So we can buffer more results without -///! too many futures driven at the same time. +//! This is a copy of `futures::try_stream::try_buffered` from `futures 0.3.16`, except that it uses +//! `FuturesOrderedX` which provides concurrency control. So we can buffer more results without +//! too many futures driven at the same time. use crate::utils::stream::futures_ordered_x::FuturesOrderedX; use core::pin::Pin; use futures::{ diff --git a/storage/jellyfish-merkle/src/test_helper.rs b/storage/jellyfish-merkle/src/test_helper.rs index 7b18af51b5d1c..e606900af8496 100644 --- a/storage/jellyfish-merkle/src/test_helper.rs +++ b/storage/jellyfish-merkle/src/test_helper.rs @@ -191,8 +191,8 @@ pub fn test_get_range_proof((btree, n): (BTreeMap( - tree: &JellyfishMerkleTree<'a, MockTreeStore, V>, +fn test_existent_keys_impl( + tree: &JellyfishMerkleTree<'_, MockTreeStore, V>, version: Version, existent_kvs: &HashMap, ) { @@ -207,8 +207,8 @@ fn test_existent_keys_impl<'a, V: TestKey>( } } -fn test_nonexistent_keys_impl<'a, V: TestKey>( - tree: &JellyfishMerkleTree<'a, MockTreeStore, V>, +fn test_nonexistent_keys_impl( + tree: &JellyfishMerkleTree<'_, MockTreeStore, V>, version: Version, nonexistent_keys: &[HashValue], ) { diff --git a/storage/scratchpad/Cargo.toml b/storage/scratchpad/Cargo.toml index 80146f14dd19e..92b70b11fd9cf 100644 --- a/storage/scratchpad/Cargo.toml +++ b/storage/scratchpad/Cargo.toml @@ -20,6 +20,7 @@ aptos-types = { workspace = true } bitvec = { workspace = true } criterion = { workspace = true, optional = true } itertools = { workspace = true } +jemallocator = { workspace = true } once_cell = { workspace = true } proptest = { workspace = true, optional = true } rayon = { workspace = true } diff --git a/storage/scratchpad/benches/sparse_merkle.rs b/storage/scratchpad/benches/sparse_merkle.rs index f39797f429595..f28f50c8af10a 100644 --- a/storage/scratchpad/benches/sparse_merkle.rs +++ b/storage/scratchpad/benches/sparse_merkle.rs @@ -13,22 +13,19 @@ use itertools::zip_eq; use rand::{distributions::Standard, prelude::StdRng, seq::IteratorRandom, Rng, SeedableRng}; use std::collections::HashSet; +#[cfg(unix)] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + struct Block { smt: SparseMerkleTree, - updates: Vec)>>, + updates: Vec<(HashValue, Option)>, proof_reader: ProofReader, } impl Block { - fn updates(&self) -> Vec)>> { - self.updates - .iter() - .map(|small_batch| small_batch.iter().map(|(k, v)| (*k, v.as_ref())).collect()) - .collect() - } - - fn updates_flat_batch(&self) -> Vec<(HashValue, Option<&StateValue>)> { - self.updates().iter().flatten().cloned().collect() + fn updates(&self) -> Vec<(HashValue, Option<&StateValue>)> { + self.updates.iter().map(|(k, v)| (*k, v.as_ref())).collect() } } @@ -43,7 +40,7 @@ impl Group { for block in &self.blocks { let block_size = block.updates.len(); - let one_large_batch = block.updates_flat_batch(); + let one_large_batch = block.updates(); group.throughput(Throughput::Elements(block_size as u64)); @@ -141,7 +138,7 @@ impl Benches { Block { smt: base_block .smt - .batch_update(base_block.updates_flat_batch(), &base_block.proof_reader) + .batch_update(base_block.updates(), &base_block.proof_reader) .unwrap(), updates, proof_reader, @@ -167,8 +164,8 @@ impl Benches { rng: &mut StdRng, keys: &[HashValue], block_size: usize, - ) -> Vec)>> { - std::iter::repeat_with(|| vec![Self::gen_update(rng, keys), Self::gen_update(rng, keys)]) + ) -> Vec<(HashValue, Option)> { + std::iter::repeat_with(|| Self::gen_update(rng, keys)) .take(block_size) .collect() } @@ -188,11 +185,10 @@ impl Benches { fn gen_proof_reader( naive_smt: &mut NaiveSmt, - updates: &[Vec<(HashValue, Option)>], + updates: &[(HashValue, Option)], ) -> ProofReader { let proofs = updates .iter() - .flatten() .map(|(key, _)| (*key, naive_smt.get_proof(key))) .collect(); ProofReader::new(proofs) diff --git a/storage/state-view/src/in_memory_state_view.rs b/storage/state-view/src/in_memory_state_view.rs new file mode 100644 index 0000000000000..519a9b31c01b3 --- /dev/null +++ b/storage/state-view/src/in_memory_state_view.rs @@ -0,0 +1,43 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 +#![forbid(unsafe_code)] +use crate::TStateView; +use anyhow::Result; +use aptos_types::state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, +}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +// A State view backed by in-memory hashmap. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub struct InMemoryStateView { + state_data: HashMap, +} + +impl InMemoryStateView { + pub fn new(state_data: HashMap) -> Self { + Self { state_data } + } +} + +impl TStateView for InMemoryStateView { + type Key = StateKey; + + fn get_state_value(&self, state_key: &StateKey) -> Result> { + Ok(self.state_data.get(state_key).cloned()) + } + + fn is_genesis(&self) -> bool { + unimplemented!("is_genesis is not implemented for InMemoryStateView") + } + + fn get_usage(&self) -> Result { + Ok(StateStorageUsage::new_untracked()) + } + + fn as_in_memory_state_view(&self) -> InMemoryStateView { + self.clone() + } +} diff --git a/storage/state-view/src/lib.rs b/storage/state-view/src/lib.rs index 7fa8c53b7c0b7..f52017ef34cf9 100644 --- a/storage/state-view/src/lib.rs +++ b/storage/state-view/src/lib.rs @@ -6,7 +6,10 @@ //! This crate defines [`trait StateView`](StateView). -use crate::account_with_state_view::{AccountWithStateView, AsAccountWithStateView}; +use crate::{ + account_with_state_view::{AccountWithStateView, AsAccountWithStateView}, + in_memory_state_view::InMemoryStateView, +}; use anyhow::Result; use aptos_crypto::HashValue; use aptos_types::{ @@ -20,6 +23,7 @@ use std::ops::Deref; pub mod account_with_state_cache; pub mod account_with_state_view; +pub mod in_memory_state_view; /// `StateView` is a trait that defines a read-only snapshot of the global state. It is passed to /// the VM for transaction execution, during which the VM is guaranteed to read anything at the @@ -47,6 +51,10 @@ pub trait TStateView { /// Get state storage usage info at epoch ending. fn get_usage(&self) -> Result; + + fn as_in_memory_state_view(&self) -> InMemoryStateView { + unreachable!("in-memory state view conversion not supported yet") + } } pub trait StateView: TStateView {} diff --git a/terraform/aptos-node-testnet/aws/variables.tf b/terraform/aptos-node-testnet/aws/variables.tf index 8a32a7756033e..d1257333dca9a 100644 --- a/terraform/aptos-node-testnet/aws/variables.tf +++ b/terraform/aptos-node-testnet/aws/variables.tf @@ -71,7 +71,6 @@ variable "chain_id" { default = 4 } - variable "era" { description = "Chain era, used to start a clean chain" default = 15 @@ -114,7 +113,7 @@ variable "logger_helm_values" { variable "enable_monitoring" { description = "Enable monitoring helm chart" - default = true + default = false } variable "monitoring_helm_values" { @@ -125,12 +124,12 @@ variable "monitoring_helm_values" { variable "enable_prometheus_node_exporter" { description = "Enable prometheus-node-exporter within monitoring helm chart" - default = true + default = false } variable "enable_kube_state_metrics" { description = "Enable kube-state-metrics within monitoring helm chart" - default = true + default = false } variable "testnet_addons_helm_values" { diff --git a/terraform/aptos-node-testnet/gcp/addons.tf b/terraform/aptos-node-testnet/gcp/addons.tf index 529d124632118..01dee100f4b5e 100644 --- a/terraform/aptos-node-testnet/gcp/addons.tf +++ b/terraform/aptos-node-testnet/gcp/addons.tf @@ -1,5 +1,6 @@ locals { - chaos_mesh_helm_chart_path = "${path.module}/../../helm/chaos" + chaos_mesh_helm_chart_path = "${path.module}/../../helm/chaos" + testnet_addons_helm_chart_path = "${path.module}/../../helm/testnet-addons" } resource "kubernetes_namespace" "chaos-mesh" { @@ -56,3 +57,119 @@ resource "helm_release" "chaos-mesh" { value = sha1(join("", [for f in fileset(local.chaos_mesh_helm_chart_path, "**") : filesha1("${local.chaos_mesh_helm_chart_path}/${f}")])) } } + +resource "google_service_account" "k8s-gcp-integrations" { + project = var.project + account_id = "${local.workspace_name}-testnet-gcp" +} + +resource "google_project_iam_member" "k8s-gcp-integrations-dns" { + project = local.zone_project + role = "roles/dns.admin" + member = "serviceAccount:${google_service_account.k8s-gcp-integrations.email}" +} + +resource "google_service_account_iam_binding" "k8s-gcp-integrations" { + service_account_id = google_service_account.k8s-gcp-integrations.name + role = "roles/iam.workloadIdentityUser" + members = ["serviceAccount:${module.validator.gke_cluster_workload_identity_config[0].workload_pool}[kube-system/k8s-gcp-integrations]"] +} + +resource "kubernetes_service_account" "k8s-gcp-integrations" { + metadata { + name = "k8s-gcp-integrations" + namespace = "kube-system" + annotations = { + "iam.gke.io/gcp-service-account" = google_service_account.k8s-gcp-integrations.email + } + } +} + +data "google_dns_managed_zone" "testnet" { + count = var.zone_name != "" ? 1 : 0 + name = var.zone_name + project = local.zone_project +} + +locals { + zone_project = var.zone_project != "" ? var.zone_project : var.project + dns_prefix = var.workspace_dns ? "${local.workspace_name}." : "" + domain = var.zone_name != "" ? trimsuffix("${local.dns_prefix}${data.google_dns_managed_zone.testnet[0].dns_name}", ".") : null +} + +resource "helm_release" "external-dns" { + count = var.zone_name != "" ? 1 : 0 + name = "external-dns" + repository = "https://kubernetes-sigs.github.io/external-dns" + chart = "external-dns" + version = "1.11.0" + namespace = "kube-system" + max_history = 5 + wait = false + + values = [ + jsonencode({ + serviceAccount = { + create = false + name = kubernetes_service_account.k8s-gcp-integrations.metadata[0].name + } + provider = "google" + domainFilters = var.zone_name != "" ? [data.google_dns_managed_zone.testnet[0].dns_name] : [] + extraArgs = [ + "--google-project=${local.zone_project}", + "--txt-owner-id=aptos-${local.workspace_name}", + # "--txt-prefix=aptos-", + ] + }) + ] +} + +resource "google_compute_global_address" "testnet-addons-ingress" { + count = var.zone_name != "" ? 1 : 0 + project = var.project + name = "aptos-${local.workspace_name}-testnet-addons-ingress" +} + +resource "helm_release" "testnet-addons" { + count = var.enable_forge ? 0 : 1 + name = "testnet-addons" + chart = local.testnet_addons_helm_chart_path + max_history = 5 + wait = false + + values = [ + jsonencode({ + cloud = "GKE" + imageTag = var.image_tag + # The addons need to be able to refer to the Genesis parameters + genesis = { + era = var.era + username_prefix = local.aptos_node_helm_prefix + chain_id = var.chain_id + numValidators = var.num_validators + } + service = { + domain = local.domain + } + ingress = { + gce_static_ip = "aptos-${local.workspace_name}-testnet-addons-ingress" + gce_managed_certificate = "aptos-${local.workspace_name}-${var.zone_name}-testnet-addons" + } + load_test = { + fullnodeGroups = try(var.aptos_node_helm_values.fullnode.groups, []) + config = { + numFullnodeGroups = var.num_fullnode_groups + } + } + }), + jsonencode(var.testnet_addons_helm_values) + ] + dynamic "set" { + for_each = var.manage_via_tf ? toset([""]) : toset([]) + content { + # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change. + name = "chart_sha1" + value = sha1(join("", [for f in fileset(local.testnet_addons_helm_chart_path, "**") : filesha1("${local.testnet_addons_helm_chart_path}/${f}")])) + } + } +} diff --git a/terraform/aptos-node-testnet/gcp/main.tf b/terraform/aptos-node-testnet/gcp/main.tf index 3a512ca20dfc3..d9b7193b0457c 100644 --- a/terraform/aptos-node-testnet/gcp/main.tf +++ b/terraform/aptos-node-testnet/gcp/main.tf @@ -26,12 +26,15 @@ module "validator" { region = var.region # DNS - zone_name = var.zone_name # keep empty if you don't want a DNS name - zone_project = var.zone_project - record_name = var.record_name + zone_name = var.zone_name # keep empty if you don't want a DNS name + zone_project = var.zone_project + record_name = var.record_name + workspace_dns = var.workspace_dns + # dns_prefix_name = var.dns_prefix_name # do not create the main fullnode and validator DNS records # instead, rely on external-dns from the testnet-addons - create_dns_records = false + create_dns_records = var.create_dns_records + dns_ttl = var.dns_ttl # General chain config era = var.era @@ -67,6 +70,8 @@ module "validator" { enable_monitoring = var.enable_monitoring enable_node_exporter = var.enable_prometheus_node_exporter monitoring_helm_values = var.monitoring_helm_values + + gke_maintenance_policy = var.gke_maintenance_policy } locals { @@ -99,6 +104,7 @@ resource "helm_release" "genesis" { genesis = { numValidators = var.num_validators username_prefix = local.aptos_node_helm_prefix + domain = local.domain validator = { enable_onchain_discovery = false } diff --git a/terraform/aptos-node-testnet/gcp/variables.tf b/terraform/aptos-node-testnet/gcp/variables.tf index 638924c5c9cf8..f76ea1231f4c5 100644 --- a/terraform/aptos-node-testnet/gcp/variables.tf +++ b/terraform/aptos-node-testnet/gcp/variables.tf @@ -50,6 +50,16 @@ variable "image_tag" { ### DNS config +variable "workspace_dns" { + description = "Include Terraform workspace name in DNS records" + default = true +} + +variable "dns_prefix_name" { + description = "DNS prefix for fullnode url" + default = "fullnode" +} + variable "zone_name" { description = "Zone name of GCP Cloud DNS zone to create records in" default = "" @@ -65,6 +75,16 @@ variable "record_name" { default = ".aptos" } +variable "create_dns_records" { + description = "Creates DNS records in var.zone_name that point to k8s service, as opposed to using external-dns or other means" + default = true +} + +variable "dns_ttl" { + description = "Time-to-Live for the Validator and Fullnode DNS records" + default = 300 +} + ### Testnet config variable "workspace_name_override" { @@ -134,7 +154,7 @@ variable "enable_forge" { variable "enable_monitoring" { description = "Enable monitoring helm chart" - default = true + default = false } variable "monitoring_helm_values" { @@ -145,7 +165,13 @@ variable "monitoring_helm_values" { variable "enable_prometheus_node_exporter" { description = "Enable prometheus-node-exporter within monitoring helm chart" - default = true + default = false +} + +variable "testnet_addons_helm_values" { + description = "Map of values to pass to testnet-addons helm chart" + type = any + default = {} } ### Autoscaling @@ -181,3 +207,21 @@ variable "cluster_ipv4_cidr_block" { description = "The IP address range of the container pods in this cluster, in CIDR notation. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#cluster_ipv4_cidr_block" default = "" } + +variable "gke_maintenance_policy" { + description = "The maintenance policy to use for the cluster. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#maintenance_policy" + type = object({ + recurring_window = object({ + start_time = string + end_time = string + recurrence = string + }) + }) + default = { + recurring_window = { + start_time = "2023-06-15T00:00:00Z" + end_time = "2023-06-15T23:59:00Z" + recurrence = "FREQ=DAILY" + } + } +} diff --git a/terraform/aptos-node/aws/kubernetes.tf b/terraform/aptos-node/aws/kubernetes.tf index 4884f63c91af5..92b8219c48c7c 100644 --- a/terraform/aptos-node/aws/kubernetes.tf +++ b/terraform/aptos-node/aws/kubernetes.tf @@ -83,9 +83,9 @@ resource "kubernetes_namespace" "tigera-operator" { resource "helm_release" "calico" { count = var.enable_calico ? 1 : 0 name = "calico" - repository = "https://docs.projectcalico.org/charts" + repository = "https://docs.tigera.io/calico/charts" chart = "tigera-operator" - version = "3.23.3" + version = "3.26.0" namespace = "tigera-operator" } diff --git a/terraform/aptos-node/gcp/cluster.tf b/terraform/aptos-node/gcp/cluster.tf index 5bf9d08c5c15e..66275bcf8eb0f 100644 --- a/terraform/aptos-node/gcp/cluster.tf +++ b/terraform/aptos-node/gcp/cluster.tf @@ -13,6 +13,10 @@ resource "google_container_cluster" "aptos" { channel = "REGULAR" } + pod_security_policy_config { + enabled = false + } + master_auth { client_certificate_config { issue_client_certificate = false @@ -68,6 +72,17 @@ resource "google_container_cluster" "aptos" { } } } + + maintenance_policy { + dynamic "recurring_window" { + for_each = var.gke_maintenance_policy.recurring_window != null ? [1] : [] + content { + start_time = var.gke_maintenance_policy.recurring_window.start_time + end_time = var.gke_maintenance_policy.recurring_window.end_time + recurrence = var.gke_maintenance_policy.recurring_window.recurrence + } + } + } } resource "google_container_node_pool" "utilities" { diff --git a/terraform/aptos-node/gcp/dns.tf b/terraform/aptos-node/gcp/dns.tf index 9cc8286e5239b..b0518237b831b 100644 --- a/terraform/aptos-node/gcp/dns.tf +++ b/terraform/aptos-node/gcp/dns.tf @@ -10,13 +10,14 @@ resource "random_string" "validator-dns" { } locals { + dns_prefix = var.workspace_dns ? "${local.workspace_name}." : "" record_name = replace(var.record_name, "", local.workspace_name) + domain = var.zone_name != "" ? "${local.dns_prefix}${data.google_dns_managed_zone.aptos[0].dns_name}" : null } data "kubernetes_service" "validator-lb" { count = var.zone_name != "" && var.create_dns_records ? 1 : 0 metadata { - # This is the main validator LB service that is created by the aptos-node helm chart name = "${local.workspace_name}-aptos-node-0-validator-lb" } depends_on = [time_sleep.lb_creation] @@ -25,7 +26,6 @@ data "kubernetes_service" "validator-lb" { data "kubernetes_service" "fullnode-lb" { count = var.zone_name != "" && var.create_dns_records ? 1 : 0 metadata { - # This is the main fullnode LB service that is created by the aptos-node helm chart name = "${local.workspace_name}-aptos-node-0-fullnode-lb" } depends_on = [time_sleep.lb_creation] @@ -43,7 +43,7 @@ resource "google_dns_record_set" "validator" { project = data.google_dns_managed_zone.aptos[0].project name = "${random_string.validator-dns.result}.${local.record_name}.${data.google_dns_managed_zone.aptos[0].dns_name}" type = "A" - ttl = 3600 + ttl = var.dns_ttl rrdatas = [data.kubernetes_service.validator-lb[0].status[0].load_balancer[0].ingress[0].ip] } @@ -53,7 +53,7 @@ resource "google_dns_record_set" "fullnode" { project = data.google_dns_managed_zone.aptos[0].project name = "${local.record_name}.${data.google_dns_managed_zone.aptos[0].dns_name}" type = "A" - ttl = 3600 + ttl = var.dns_ttl rrdatas = [data.kubernetes_service.fullnode-lb[0].status[0].load_balancer[0].ingress[0].ip] } diff --git a/terraform/aptos-node/gcp/kubernetes.tf b/terraform/aptos-node/gcp/kubernetes.tf index 00bda9f43e2d8..970b4487b94c8 100644 --- a/terraform/aptos-node/gcp/kubernetes.tf +++ b/terraform/aptos-node/gcp/kubernetes.tf @@ -77,6 +77,14 @@ resource "helm_release" "validator" { effect = "NoExecute" }] } + haproxy = { + nodeSelector = var.gke_enable_node_autoprovisioning ? {} : { + "cloud.google.com/gke-nodepool" = google_container_node_pool.utilities.name + } + } + service = { + domain = local.domain + } }), var.helm_values_file != "" ? file(var.helm_values_file) : "{}", jsonencode(var.helm_values), diff --git a/terraform/aptos-node/gcp/outputs.tf b/terraform/aptos-node/gcp/outputs.tf index 6bee1c3500050..f44a9de4df365 100644 --- a/terraform/aptos-node/gcp/outputs.tf +++ b/terraform/aptos-node/gcp/outputs.tf @@ -9,3 +9,7 @@ output "gke_cluster_endpoint" { output "gke_cluster_ca_certificate" { value = google_container_cluster.aptos.master_auth[0].cluster_ca_certificate } + +output "gke_cluster_workload_identity_config" { + value = google_container_cluster.aptos.workload_identity_config +} diff --git a/terraform/aptos-node/gcp/security.tf b/terraform/aptos-node/gcp/security.tf index e2c74e54f273f..c230c9edca577 100644 --- a/terraform/aptos-node/gcp/security.tf +++ b/terraform/aptos-node/gcp/security.tf @@ -1,44 +1,20 @@ # Security-related resources -data "kubernetes_all_namespaces" "all" { - count = var.cluster_bootstrap ? 0 : 1 -} - locals { - kubernetes_master_version = substr(google_container_cluster.aptos.master_version, 0, 4) - baseline_pss_labels = { + # Enforce "privileged" PSS (i.e. allow everything), but warn about + # infractions of "baseline" profile + privileged_pss_labels = { "pod-security.kubernetes.io/audit" = "baseline" "pod-security.kubernetes.io/warn" = "baseline" "pod-security.kubernetes.io/enforce" = "privileged" } } -# FIXME: Remove after migration to K8s 1.25 -resource "kubernetes_role_binding" "disable-psp" { - for_each = toset(var.cluster_bootstrap ? [] : local.kubernetes_master_version <= "1.24" ? data.kubernetes_all_namespaces.all[0].namespaces : []) - metadata { - name = "privileged-psp" - namespace = each.value - } - - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = "gce:podsecuritypolicy:privileged" - } - - subject { - api_group = "rbac.authorization.k8s.io" - kind = "Group" - name = "system:serviceaccounts:${each.value}" - } -} - resource "kubernetes_labels" "pss-default" { api_version = "v1" kind = "Namespace" metadata { name = "default" } - labels = local.baseline_pss_labels + labels = local.privileged_pss_labels } diff --git a/terraform/aptos-node/gcp/variables.tf b/terraform/aptos-node/gcp/variables.tf index 81f73c42d1510..c5dd42daf4f58 100644 --- a/terraform/aptos-node/gcp/variables.tf +++ b/terraform/aptos-node/gcp/variables.tf @@ -46,26 +46,6 @@ variable "image_tag" { default = "devnet" } -variable "zone_name" { - description = "Zone name of GCP Cloud DNS zone to create records in" - default = "" -} - -variable "zone_project" { - description = "GCP project which the DNS zone is in (if different)" - default = "" -} - -variable "record_name" { - description = "DNS record name to use ( is replaced with the TF workspace name)" - default = ".aptos" -} - -variable "create_dns_records" { - description = "Creates DNS records in var.zone_name that point to k8s service, as opposed to using external-dns or other means" - default = true -} - variable "helm_chart" { description = "Path to aptos-validator Helm chart file" default = "" @@ -171,8 +151,39 @@ variable "manage_via_tf" { default = true } -### Autoscaling +### DNS +variable "zone_name" { + description = "Zone name of GCP Cloud DNS zone to create records in" + default = "" +} + +variable "zone_project" { + description = "GCP project which the DNS zone is in (if different)" + default = "" +} + +variable "workspace_dns" { + description = "Include Terraform workspace name in DNS records" + default = true +} + +variable "record_name" { + description = "DNS record name to use ( is replaced with the TF workspace name)" + default = ".aptos" +} + +variable "create_dns_records" { + description = "Creates DNS records in var.zone_name that point to k8s service, as opposed to using external-dns or other means" + default = true +} + +variable "dns_ttl" { + description = "Time-to-Live for the Validator and Fullnode DNS records" + default = 300 +} + +### Autoscaling variable "gke_enable_node_autoprovisioning" { description = "Enable node autoprovisioning for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning" @@ -229,3 +240,21 @@ variable "num_fullnode_groups" { description = "The number of fullnode groups to create" default = 1 } + +variable "gke_maintenance_policy" { + description = "The maintenance policy to use for the cluster. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#maintenance_policy" + type = object({ + recurring_window = object({ + start_time = string + end_time = string + recurrence = string + }) + }) + default = { + recurring_window = { + start_time = "2023-06-15T00:00:00Z" + end_time = "2023-06-15T23:59:00Z" + recurrence = "FREQ=DAILY" + } + } +} diff --git a/terraform/aptos-node/gcp/versions.tf b/terraform/aptos-node/gcp/versions.tf index f88de6cbc2a7e..2b8786efb55aa 100644 --- a/terraform/aptos-node/gcp/versions.tf +++ b/terraform/aptos-node/gcp/versions.tf @@ -2,10 +2,12 @@ terraform { required_version = "~> 1.3.6" required_providers { google = { - source = "hashicorp/google" + source = "hashicorp/google" + version = "~> 4.54.0" } google-beta = { - source = "hashicorp/google-beta" + source = "hashicorp/google-beta" + version = "~> 4.54.0" } helm = { source = "hashicorp/helm" diff --git a/terraform/fullnode/gcp/cluster.tf b/terraform/fullnode/gcp/cluster.tf index 8d0eebdc4b4bf..23972cc4b0257 100644 --- a/terraform/fullnode/gcp/cluster.tf +++ b/terraform/fullnode/gcp/cluster.tf @@ -74,6 +74,10 @@ resource "google_container_cluster" "aptos" { maximum = resource_limits.value } } + auto_provisioning_defaults { + oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + service_account = google_service_account.gke.email + } } } diff --git a/terraform/helm/aptos-node/templates/_helpers.tpl b/terraform/helm/aptos-node/templates/_helpers.tpl index f805d33b0e49b..bda9d558f6223 100644 --- a/terraform/helm/aptos-node/templates/_helpers.tpl +++ b/terraform/helm/aptos-node/templates/_helpers.tpl @@ -50,6 +50,8 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} Multicluster labels. `multiclusterLabels` takes in a tuple of context and index as arguments. It should be invoked as `aptos-validator.multiclusterLabels (tuple $ $i)` where $i is the index of the statefulset. + +The logic below assigns a target cluster to each statefulset replica in a round-robin fashion. */}} {{- define "aptos-validator.multiclusterLabels" -}} {{- $ctx := index $ 0 -}} diff --git a/terraform/helm/aptos-node/templates/validator.yaml b/terraform/helm/aptos-node/templates/validator.yaml index 77dff71d9a79e..9b6514e81c46f 100644 --- a/terraform/helm/aptos-node/templates/validator.yaml +++ b/terraform/helm/aptos-node/templates/validator.yaml @@ -105,10 +105,6 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - - name: KUBERNETES_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - name: RUST_BACKTRACE value: "0" {{- end }} diff --git a/terraform/helm/aptos-node/values.yaml b/terraform/helm/aptos-node/values.yaml index da6cd16b84d4b..bd7fe2ab43641 100644 --- a/terraform/helm/aptos-node/values.yaml +++ b/terraform/helm/aptos-node/values.yaml @@ -16,8 +16,8 @@ numFullnodeGroups: 1 # -- Options for multicluster mode. This is *experimental only*. multicluster: - enabled: false - targetClusters: ["cluster1", "cluster2", "cluster3"] + enabled: false + targetClusters: ["forge-multiregion-1", "forge-multiregion-2", "forge-multiregion-3"] # -- Specify validator and fullnode NodeConfigs via named ConfigMaps, rather than the generated ones from this chart. overrideNodeConfig: false @@ -151,7 +151,7 @@ service: # -- Enable the REST API on the validator enableRestApi: true # -- Enable the metrics port on the validator - enableMetricsPort: true + enableMetricsPort: false fullnode: external: # -- The Kubernetes ServiceType to use for fullnodes' HAProxy @@ -167,7 +167,7 @@ service: # -- Enable the REST API on fullnodes enableRestApi: true # -- Enable the metrics port on fullnodes - enableMetricsPort: true + enableMetricsPort: false serviceAccount: # -- Specifies whether a service account should be created diff --git a/terraform/helm/autoscaling/Chart.lock b/terraform/helm/autoscaling/Chart.lock index 68cae46b3ddaa..b62abaaf3221d 100644 --- a/terraform/helm/autoscaling/Chart.lock +++ b/terraform/helm/autoscaling/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: metrics-server repository: https://kubernetes-sigs.github.io/metrics-server/ - version: 3.8.2 -digest: sha256:fa1a19fa0f1ff4bae7f9e397277af3a832718ba50351e6ddf3b72a398d17fd0a -generated: "2022-04-12T17:19:04.312907-07:00" + version: 3.10.0 +digest: sha256:e5771e2fb7d8cee664fa3f7fbde4bb626f6ce4e8ba12504a85da3e8261a19d9a +generated: "2023-06-09T17:24:05.737993-04:00" diff --git a/terraform/helm/autoscaling/Chart.yaml b/terraform/helm/autoscaling/Chart.yaml index 896fe206aa75d..623c9ee16c41b 100644 --- a/terraform/helm/autoscaling/Chart.yaml +++ b/terraform/helm/autoscaling/Chart.yaml @@ -4,5 +4,5 @@ version: 0.1.0 dependencies: - name: metrics-server - version: 3.8.2 + version: 3.10.0 repository: "https://kubernetes-sigs.github.io/metrics-server/" diff --git a/terraform/helm/autoscaling/charts/metrics-server-3.10.0.tgz b/terraform/helm/autoscaling/charts/metrics-server-3.10.0.tgz new file mode 100644 index 0000000000000..2b38fd615daa7 Binary files /dev/null and b/terraform/helm/autoscaling/charts/metrics-server-3.10.0.tgz differ diff --git a/terraform/helm/autoscaling/charts/metrics-server-3.8.2.tgz b/terraform/helm/autoscaling/charts/metrics-server-3.8.2.tgz deleted file mode 100644 index 4b4a7899a6655..0000000000000 Binary files a/terraform/helm/autoscaling/charts/metrics-server-3.8.2.tgz and /dev/null differ diff --git a/terraform/helm/autoscaling/values.yaml b/terraform/helm/autoscaling/values.yaml index 4cec0a80935ff..89c2175891dd3 100644 --- a/terraform/helm/autoscaling/values.yaml +++ b/terraform/helm/autoscaling/values.yaml @@ -17,7 +17,7 @@ autoscaler: scaleDownDelayAfterAdd: 5m image: repo: k8s.gcr.io/autoscaling/cluster-autoscaler - tag: v1.23.0 + tag: v1.25.2 resources: requests: cpu: 1 diff --git a/terraform/helm/fullnode/templates/backup-verify.yaml b/terraform/helm/fullnode/templates/backup-verify.yaml index 2622aa2513a4c..d5900203888d8 100644 --- a/terraform/helm/fullnode/templates/backup-verify.yaml +++ b/terraform/helm/fullnode/templates/backup-verify.yaml @@ -1,3 +1,4 @@ +{{ $backup_verify_cronjob := lookup "batch/v1" "CronJob" $.Release.Namespace (print (include "backup.fullname" .) "-backup-verify")}} apiVersion: batch/v1 kind: CronJob metadata: @@ -23,8 +24,11 @@ spec: terminationGracePeriodSeconds: 0 containers: - name: backup-verify - # use the same image with the backup sts + {{- if and $backup_verify_cronjob (not $.Values.manageImages) }} # if the statefulset already exists and we do not want helm to simply overwrite the image, use the existing image + image: {{ (first $backup_verify_cronjob.spec.jobTemplate.spec.template.spec.containers).image }} + {{- else }} image: {{ .Values.backup.image.repo }}:{{ .Values.backup.image.tag | default .Values.imageTag }} + {{- end }} imagePullPolicy: {{ .Values.backup.image.pullPolicy }} command: - /usr/local/bin/aptos-db-tool @@ -74,15 +78,15 @@ spec: fsGroup: 6180 {{- with .nodeSelector }} nodeSelector: - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 12 }} {{- end }} {{- with .affinity }} affinity: - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 12 }} {{- end }} {{- with .tolerations }} tolerations: - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 12 }} {{- end }} {{- end }} volumes: diff --git a/terraform/helm/fullnode/templates/backup.yaml b/terraform/helm/fullnode/templates/backup.yaml index a0f6f41d0d025..35946bd323ff8 100644 --- a/terraform/helm/fullnode/templates/backup.yaml +++ b/terraform/helm/fullnode/templates/backup.yaml @@ -8,7 +8,7 @@ data: {{ (.Files.Glob "files/backup/*.yaml").AsConfig | indent 2 }} --- - +{{ $backup_statefulset := lookup "apps/v1" "StatefulSet" $.Release.Namespace (print (include "backup.fullname" .) "-backup")}} apiVersion: apps/v1 kind: StatefulSet metadata: @@ -35,7 +35,11 @@ spec: terminationGracePeriodSeconds: 0 containers: - name: backup + {{- if and $backup_statefulset (not $.Values.manageImages) }} # if the statefulset already exists and we do not want helm to simply overwrite the image, use the existing image + image: {{ (first $backup_statefulset.spec.template.spec.containers).image }} + {{- else }} image: {{ .Values.backup.image.repo }}:{{ .Values.backup.image.tag | default .Values.imageTag }} + {{- end }} imagePullPolicy: {{ .Values.backup.image.pullPolicy }} resources: {{- toYaml .Values.backup.resources | nindent 10 }} diff --git a/terraform/helm/fullnode/templates/restore.yaml b/terraform/helm/fullnode/templates/restore.yaml index f4bf79a109d4e..a53ce9f5190a4 100644 --- a/terraform/helm/fullnode/templates/restore.yaml +++ b/terraform/helm/fullnode/templates/restore.yaml @@ -1,7 +1,9 @@ +{{ $restore_job_suffix := randAlpha 4 | lower }} +{{ $backup_restore_job := lookup "batch/v1" "Job" $.Release.Namespace (print (include "backup.fullname" .) "-restore-" $restore_job_suffix) }} apiVersion: batch/v1 kind: Job metadata: - name: {{ include "backup.fullname" . }}-restore-{{ randAlpha 4 | lower }} + name: {{ include "backup.fullname" . }}-restore-{{ $restore_job_suffix }} labels: {{- include "backup.labels" . | nindent 4 }} app.kubernetes.io/name: restore @@ -20,7 +22,11 @@ spec: {{- with .Values.restore }} containers: - name: restore + {{- if and $backup_restore_job (not $.Values.manageImages) }} # if the statefulset already exists and we do not want helm to simply overwrite the image, use the existing image + image: {{ (first $backup_restore_job.spec.template.spec.containers).image }} + {{- else }} image: {{ .image.repo }}:{{ .image.tag | default $.Values.imageTag }} + {{- end }} imagePullPolicy: {{ .image.pullPolicy }} resources: {{- toYaml .resources | nindent 10 }} diff --git a/terraform/helm/kube-state-metrics/Chart.lock b/terraform/helm/kube-state-metrics/Chart.lock new file mode 100644 index 0000000000000..943e8cd6b914f --- /dev/null +++ b/terraform/helm/kube-state-metrics/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.7.0 +digest: sha256:6c8144333bebb7a2956d27f0438b11920b0a914c18fe8c7381adee0a6041044f +generated: "2023-06-07T17:17:42.178703-04:00" diff --git a/terraform/helm/kube-state-metrics/Chart.yaml b/terraform/helm/kube-state-metrics/Chart.yaml new file mode 100644 index 0000000000000..fc6ded4fff28c --- /dev/null +++ b/terraform/helm/kube-state-metrics/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +name: aptos-kube-state-metrics +version: 5.7.0 + +dependencies: + - name: kube-state-metrics + version: 5.7.0 + repository: "https://prometheus-community.github.io/helm-charts" diff --git a/terraform/helm/kube-state-metrics/charts/kube-state-metrics-5.7.0.tgz b/terraform/helm/kube-state-metrics/charts/kube-state-metrics-5.7.0.tgz new file mode 100644 index 0000000000000..4891bf5b08348 Binary files /dev/null and b/terraform/helm/kube-state-metrics/charts/kube-state-metrics-5.7.0.tgz differ diff --git a/terraform/helm/kube-state-metrics/values.yaml b/terraform/helm/kube-state-metrics/values.yaml new file mode 100644 index 0000000000000..e5a42c59e6715 --- /dev/null +++ b/terraform/helm/kube-state-metrics/values.yaml @@ -0,0 +1,5 @@ +kube-state-metrics: + namespaceOverride: monitoring + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" diff --git a/terraform/helm/prometheus-node-exporter/Chart.lock b/terraform/helm/prometheus-node-exporter/Chart.lock new file mode 100644 index 0000000000000..a2787d23f6fac --- /dev/null +++ b/terraform/helm/prometheus-node-exporter/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.17.5 +digest: sha256:fa78ac7db5c879ed613904daf0f4049b10b5268a1d44f846facf59297b0a1f75 +generated: "2023-06-07T17:17:49.635213-04:00" diff --git a/terraform/helm/prometheus-node-exporter/Chart.yaml b/terraform/helm/prometheus-node-exporter/Chart.yaml new file mode 100644 index 0000000000000..52351e84cb28d --- /dev/null +++ b/terraform/helm/prometheus-node-exporter/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +name: aptos-prometheus-node-exporter +version: 4.17.5 + +dependencies: + - name: prometheus-node-exporter + version: 4.17.5 + repository: "https://prometheus-community.github.io/helm-charts" diff --git a/terraform/helm/prometheus-node-exporter/charts/prometheus-node-exporter-4.17.5.tgz b/terraform/helm/prometheus-node-exporter/charts/prometheus-node-exporter-4.17.5.tgz new file mode 100644 index 0000000000000..9db215da1342b Binary files /dev/null and b/terraform/helm/prometheus-node-exporter/charts/prometheus-node-exporter-4.17.5.tgz differ diff --git a/terraform/helm/prometheus-node-exporter/values.yaml b/terraform/helm/prometheus-node-exporter/values.yaml new file mode 100644 index 0000000000000..553614808afc3 --- /dev/null +++ b/terraform/helm/prometheus-node-exporter/values.yaml @@ -0,0 +1,5 @@ +prometheus-node-exporter: + namespaceOverride: monitoring + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9100" diff --git a/terraform/helm/testnet-addons/README.md b/terraform/helm/testnet-addons/README.md index 3e787d3fac08d..2e898549d11a2 100644 --- a/terraform/helm/testnet-addons/README.md +++ b/terraform/helm/testnet-addons/README.md @@ -14,6 +14,7 @@ Additional components for aptos-nodes testnet | Key | Type | Default | Description | |-----|------|---------|-------------| +| cloud | string | `"EKS"` | | | genesis.chain_id | string | `nil` | Aptos Chain ID | | genesis.numValidators | string | `nil` | Number of validators deployed in this testnet | | genesis.username_prefix | string | `"aptos-node"` | Validator username prefix, used to get genesis secrets. This should be the fullname for the aptos-node helm release | @@ -21,6 +22,8 @@ Additional components for aptos-nodes testnet | ingress.acm_certificate | string | `nil` | The ACM certificate to install on the ingress | | ingress.cookieDurationSeconds | int | `86400` | If stickiness is enabled, how long the session cookie should last | | ingress.enableStickyness | bool | `true` | Whether to enable session stickiness on the underlying load balancer | +| ingress.gce_managed_certificate | string | `nil` | The GCE certificate to install on the ingress | +| ingress.gce_static_ip | string | `nil` | The GCE static IP to install on the ingress | | ingress.loadBalancerSourceRanges | string | `nil` | List of CIDRs to accept traffic from | | ingress.wafAclArn | string | `nil` | The ARN of the WAF ACL to install on the ingress | | load_test.affinity | object | `{}` | | diff --git a/terraform/helm/testnet-addons/templates/ingress.yaml b/terraform/helm/testnet-addons/templates/ingress.yaml index 865cf19c0f0bc..632a9fc2fe545 100644 --- a/terraform/helm/testnet-addons/templates/ingress.yaml +++ b/terraform/helm/testnet-addons/templates/ingress.yaml @@ -5,15 +5,17 @@ metadata: labels: {{- include "testnet-addons.labels" . | nindent 4 }} annotations: + {{- if .Values.service.domain }} + external-dns.alpha.kubernetes.io/hostname: {{ .Values.service.domain }} + {{- end }} + # EKS annotations + {{- if eq .Values.cloud "EKS" }} kubernetes.io/ingress.class: alb alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/tags: {{ .Values.service.aws_tags | quote }} {{- if .Values.ingress.loadBalancerSourceRanges }} alb.ingress.kubernetes.io/inbound-cidrs: {{ join "," .Values.ingress.loadBalancerSourceRanges }} {{- end }} - {{- if .Values.service.domain }} - external-dns.alpha.kubernetes.io/hostname: {{ .Values.service.domain }} - {{- end }} {{- if .Values.ingress.acm_certificate }} alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.acm_certificate }} alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]' @@ -27,6 +29,16 @@ metadata: alb.ingress.kubernetes.io/target-group-attributes: stickiness.enabled=true,stickiness.lb_cookie.duration_seconds={{ .Values.ingress.cookieDurationSeconds }} alb.ingress.kubernetes.io/target-type: ip {{- end }} + {{- end }} # "EKS" + # GKE annotations + {{- if eq .Values.cloud "GKE" }} + kubernetes.io/ingress.class: "gce" + # Allow HTTP but always return 301 because we have redirectToHttps enabled + kubernetes.io/ingress.allow-http: "true" + kubernetes.io/ingress.global-static-ip-name: {{ .Values.ingress.gce_static_ip }} + networking.gke.io/managed-certificates: {{ .Values.ingress.gce_managed_certificate }} + networking.gke.io/v1beta1.FrontendConfig: {{ include "testnet-addons.fullname" . }} + {{- end }} # "GKE" spec: rules: {{- if .Values.service.domain }} @@ -41,7 +53,8 @@ spec: port: number: 80 {{- end }} - - http: + - host: {{ .Values.service.domain }} + http: paths: - path: /waypoint.txt pathType: Exact @@ -64,3 +77,23 @@ spec: name: {{ include "testnet-addons.fullname" . }}-api port: number: 80 +--- +{{- if eq .Values.cloud "GKE" }} +apiVersion: networking.gke.io/v1beta1 +kind: FrontendConfig +metadata: + name: {{ include "testnet-addons.fullname" . }} +spec: + redirectToHttps: + enabled: true +--- +apiVersion: networking.gke.io/v1 +kind: ManagedCertificate +metadata: + name: {{ .Values.ingress.gce_managed_certificate }} +spec: + domains: + - {{ .Values.service.domain }} + - api.{{ .Values.service.domain }} +--- +{{- end }} diff --git a/terraform/helm/testnet-addons/templates/loadtest.yaml b/terraform/helm/testnet-addons/templates/loadtest.yaml index 845a87579a4b7..c1a61feca8f44 100644 --- a/terraform/helm/testnet-addons/templates/loadtest.yaml +++ b/terraform/helm/testnet-addons/templates/loadtest.yaml @@ -16,8 +16,6 @@ spec: labels: {{- include "testnet-addons.selectorLabels" . | nindent 12 }} app.kubernetes.io/name: load-test - annotations: - seccomp.security.alpha.kubernetes.io/pod: runtime/default spec: restartPolicy: Never priorityClassName: {{ include "testnet-addons.fullname" . }}-high @@ -26,7 +24,7 @@ spec: image: {{ .Values.load_test.image.repo }}:{{ .Values.load_test.image.tag | default .Values.imageTag }} imagePullPolicy: {{ .Values.load_test.image.pullPolicy }} command: - - transaction-emitter + - aptos-transaction-emitter - emit-tx - --mint-key={{ .Values.load_test.config.mint_key }} - --chain-id={{ .Values.genesis.chain_id }} @@ -34,25 +32,36 @@ spec: {{- $numTargets := 0 }} {{- $targetSuffix := "" }} {{- $targetGroups := list }} - {{- if $.Values.load_test.config.use_validators }} + {{- if $.Values.load_test.config.use_pfns }} + {{- $numTargets = $.Values.load_test.config.numFullnodeGroups }} + {{- $targetSuffix = "fullnode" }} + {{- $targetGroups = list }} + {{- else if $.Values.load_test.config.use_validators }} {{- $numTargets = $.Values.genesis.numValidators }} {{- $targetSuffix = "validator" }} {{- $targetGroups = list }} {{- else }} {{- $numTargets = $.Values.load_test.config.numFullnodeGroups }} {{- $targetSuffix = "fullnode" }} - {{- $targetGroups = $.Values.load_test.fullnodeGroups }} + {{- $targetGroups = $.Values.load_test.fullnode.groups }} {{- end }} - {{- range $i := until (int $numTargets) }} - {{- $port := 80 }} - {{- if $targetGroups }} - {{- range $group := $targetGroups }} - {{- $nodeName := join "-" (list $.Values.genesis.username_prefix $i $group.name "lb") }} - - --targets=http://{{ $nodeName }}:{{ $port }} + {{- if $.Values.load_test.config.use_pfns }} + {{- range $i := until (int $numTargets) }} + - --targets=http://{{ printf "fullnode%d.%s" $i $.Values.service.domain }} + # - --targets=https://{{ printf "%s" $.Values.service.domain }} {{- end }} - {{- else }} - {{- $nodeName := join "-" (list $.Values.genesis.username_prefix $i $targetSuffix "lb") }} + {{- else }} + {{- range $i := until (int $numTargets) }} + {{- $port := 80 }} + {{- if $targetGroups }} + {{- range $group := $targetGroups }} + {{- $nodeName := join "-" (list $.Values.genesis.username_prefix $i $group.name "lb") }} + - --targets=http://{{ $nodeName }}:{{ $port }} + {{- end }} + {{- else }} + {{- $nodeName := join "-" (list $.Values.genesis.username_prefix $i $targetSuffix "lb") }} - --targets=http://{{ $nodeName }}:{{ $port }} + {{- end }} {{- end }} {{- end }} {{- with .Values.load_test }} @@ -63,10 +72,14 @@ spec: - --mempool-backlog={{ .config.mempool_backlog }} {{- end }} - --duration={{ .config.duration }} + # - --delay-after-minting=300 + - --expected-max-txns={{ .config.expected_max_txns }} - --txn-expiration-time-secs={{ .config.txn_expiration_time_secs }} + - --max-transactions-per-account={{ .config.max_transactions_per_account }} + - --transaction-type={{ .config.transaction_type }} env: - name: RUST_BACKTRACE - value: "1" + value: "full" - name: REUSE_ACC value: "1" {{- with .resources }} @@ -79,6 +92,8 @@ spec: capabilities: drop: - ALL + seccompProfile: + type: RuntimeDefault {{- with .nodeSelector }} nodeSelector: {{- toYaml . | nindent 12 }} @@ -96,6 +111,9 @@ spec: runAsUser: 6180 runAsGroup: 6180 fsGroup: 6180 + # sysctls: + # - name: net.ipv4.tcp_tw_reuse + # value: "1" {{- end }} serviceAccountName: {{ include "testnet-addons.serviceAccountName" . }} {{- if .Values.imagePullSecret }} diff --git a/terraform/helm/testnet-addons/templates/service.yaml b/terraform/helm/testnet-addons/templates/service.yaml index aeb9028060907..74416c999ce53 100644 --- a/terraform/helm/testnet-addons/templates/service.yaml +++ b/terraform/helm/testnet-addons/templates/service.yaml @@ -6,7 +6,13 @@ metadata: labels: {{- include "testnet-addons.labels" . | nindent 4 }} annotations: + {{- if eq .Values.cloud "EKS" }} alb.ingress.kubernetes.io/healthcheck-path: /v1/-/healthy + {{- end }} + {{- if eq .Values.cloud "GKE" }} + cloud.google.com/backend-config: '{"default":"{{ include "testnet-addons.fullname" . }}-api"}' + cloud.google.com/neg: '{"ingress": true}' + {{- end }} spec: selector: app.kubernetes.io/part-of: aptos-node @@ -16,3 +22,22 @@ spec: targetPort: 8080 type: NodePort externalTrafficPolicy: Local +--- +{{- if eq .Values.cloud "GKE" }} +apiVersion: cloud.google.com/v1 +kind: BackendConfig +metadata: + name: {{ include "testnet-addons.fullname" . }}-api + namespace: default +spec: + healthCheck: + checkIntervalSec: 30 + timeoutSec: 5 + healthyThreshold: 1 + unhealthyThreshold: 2 + type: HTTP + requestPath: /v1/-/healthy + # container targetPort + port: 8080 +{{- end }} +--- diff --git a/terraform/helm/testnet-addons/templates/waypoint.yaml b/terraform/helm/testnet-addons/templates/waypoint.yaml index f7df236e432b1..154fa3f986ed5 100644 --- a/terraform/helm/testnet-addons/templates/waypoint.yaml +++ b/terraform/helm/testnet-addons/templates/waypoint.yaml @@ -6,7 +6,13 @@ metadata: {{- include "testnet-addons.labels" . | nindent 4 }} app: {{ include "testnet-addons.fullname" . }}-waypoint annotations: - alb.ingress.kubernetes.io/healthcheck-path: /health + {{- if eq .Values.cloud "EKS" }} + alb.ingress.kubernetes.io/healthcheck-path: /waypoint.txt + {{- end }} + {{- if eq .Values.cloud "GKE" }} + cloud.google.com/backend-config: '{"default":"{{ include "testnet-addons.fullname" . }}-waypoint"}' + cloud.google.com/neg: '{"ingress": true}' + {{- end }} spec: selector: {{- include "testnet-addons.selectorLabels" . | nindent 4 }} @@ -15,9 +21,25 @@ spec: - port: 80 targetPort: 8080 type: NodePort - --- - +{{- if eq .Values.cloud "GKE" }} +apiVersion: cloud.google.com/v1 +kind: BackendConfig +metadata: + name: {{ include "testnet-addons.fullname" . }}-waypoint + namespace: default +spec: + healthCheck: + checkIntervalSec: 30 + timeoutSec: 5 + healthyThreshold: 1 + unhealthyThreshold: 2 + type: HTTP + requestPath: /waypoint.txt + # container targetPort + port: 8080 +{{- end }} +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/terraform/helm/testnet-addons/values.yaml b/terraform/helm/testnet-addons/values.yaml index 7c9b15bfb8750..bd5d5b9900935 100644 --- a/terraform/helm/testnet-addons/values.yaml +++ b/terraform/helm/testnet-addons/values.yaml @@ -1,3 +1,6 @@ +# Cloud provider +cloud: EKS + # -- Default image tag to use for all aptos images imageTag: devnet @@ -50,7 +53,7 @@ load_test: # -- The fullnode groups to target fullnode: groups: - - name: fullnode + - name: fullnode config: # -- The number of fullnode groups to run traffic against numFullnodeGroups: @@ -66,6 +69,12 @@ load_test: txn_expiration_time_secs: 30 # -- Whether to submit transactions through validator REST API use_validators: false + # -- If true, run $numFullnodeGroups parallel load tests + use_pfns: true + # -- Default 20k * $duration + expected_max_txns: 6000000 + max_transactions_per_account: 5 + transaction_type: coin-transfer serviceAccount: # -- Specifies whether a service account should be created @@ -80,6 +89,10 @@ service: ingress: # -- The ACM certificate to install on the ingress acm_certificate: + # -- The GCE static IP to install on the ingress + gce_static_ip: + # -- The GCE certificate to install on the ingress + gce_managed_certificate: # -- The ARN of the WAF ACL to install on the ingress wafAclArn: # -- List of CIDRs to accept traffic from diff --git a/testsuite/find_latest_image.py b/testsuite/find_latest_image.py index 4cbfac2d88cce..4ea90743a59d5 100644 --- a/testsuite/find_latest_image.py +++ b/testsuite/find_latest_image.py @@ -13,6 +13,7 @@ from forge import find_recent_images, image_exists from test_framework.shell import LocalShell from test_framework.git import Git +from test_framework.cluster import Cloud # gh output logic from determinator from determinator import GithubOutput, write_github_output @@ -34,7 +35,7 @@ def main() -> None: "--image-name", "-i", help="The name of the image to search for", - default="aptos/validator-testing", + default="validator-testing", ) parser.add_argument( "--variant", @@ -44,13 +45,22 @@ def main() -> None: dest="variants", default=[], ) + parser.add_argument( + "--cloud", + "-c", + help="The cloud to use", + choices=[c.value for c in Cloud], + default=Cloud.GCP.value, + ) args = parser.parse_args() image_name = args.image_name + cloud = Cloud(args.cloud) + log.info(f"Using cloud: {cloud}") # If the IMAGE_TAG environment variable is set, check that if IMAGE_TAG_ENV in os.environ and os.environ[IMAGE_TAG_ENV]: image_tag = os.environ[IMAGE_TAG_ENV] - if not image_exists(shell, image_name, image_tag): + if not image_exists(shell, image_name, image_tag, cloud=cloud): sys.exit(1) variants = args.variants @@ -63,7 +73,9 @@ def main() -> None: # Find the latest image from git history num_images_to_find = 1 # for the purposes of this script, this is always 1 images = list( - find_recent_images(shell, git, num_images_to_find, image_name, variant_prefixes) + find_recent_images( + shell, git, num_images_to_find, image_name, variant_prefixes, cloud=cloud + ) ) log.info(f"Found latest images: {images}") diff --git a/testsuite/fixtures/forge-test-runner-template.fixture b/testsuite/fixtures/forge-test-runner-template.fixture index 0bc4c019e890a..f65f8d53d1c22 100644 --- a/testsuite/fixtures/forge-test-runner-template.fixture +++ b/testsuite/fixtures/forge-test-runner-template.fixture @@ -4,64 +4,75 @@ metadata: name: forge-potato-1659078000-asdf labels: app.kubernetes.io/name: forge + app.kubernetes.io/part-of: forge-test-runner forge-namespace: forge-potato forge-image-tag: forge_asdf spec: restartPolicy: Never serviceAccountName: forge containers: - - name: main - image: 123.dkr.ecr.banana-east-1.amazonaws.com/aptos/forge:forge_asdf - imagePullPolicy: Always - command: - - /bin/bash - - -c - - | - ulimit -n 1048576 - forge --suite banana --duration-secs 123 --num-validators 10 --num-validator-fullnodes 20 --forge-cli-arg test k8s-swarm --image-tag asdf --upgrade-image-tag upgrade_asdf --namespace forge-potato --test-arg - resources: - limits: - cpu: 15.5 - memory: 26Gi - requests: - cpu: 15 - memory: 26Gi - env: - - name: FORGE_TRIGGERED_BY - value: github-actions - - name: PROMETHEUS_URL - valueFrom: - secretKeyRef: - name: prometheus-read-only - key: url - optional: true - - name: PROMETHEUS_TOKEN - valueFrom: - secretKeyRef: - name: prometheus-read-only - key: token - optional: true - - name: RUST_BACKTRACE - value: "1" - # - name: RUST_LOG - # value: debug + - name: main + image: 123.dkr.ecr.banana-east-1.amazonaws.com/aptos/forge:forge_asdf + imagePullPolicy: Always + command: + - /bin/bash + - -c + - | + ulimit -n 1048576 + forge --suite banana --duration-secs 123 --num-validators 10 --num-validator-fullnodes 20 --forge-cli-arg test k8s-swarm --image-tag asdf --upgrade-image-tag upgrade_asdf --namespace forge-potato --test-arg + resources: + limits: + cpu: 15.5 + memory: 26Gi + requests: + cpu: 15 + memory: 26Gi + env: + - name: FORGE_TRIGGERED_BY + value: github-actions + - name: PROMETHEUS_URL + valueFrom: + secretKeyRef: + name: prometheus-read-only + key: url + optional: true + - name: PROMETHEUS_TOKEN + valueFrom: + secretKeyRef: + name: prometheus-read-only + key: token + optional: true + - name: RUST_BACKTRACE + value: "1" + - name: KUBECONFIG + value: /etc/multiregion-kubeconfig/kubeconfig + # - name: RUST_LOG + # value: debug + volumeMounts: + - name: multiregion-kubeconfig + readOnly: true + mountPath: /etc/multiregion-kubeconfig affinity: # avoid scheduling with other forge or validator/fullnode pods podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app.kubernetes.io/name - operator: In - values: ["validator", "fullnode", "forge"] - - key: run - operator: Exists - topologyKey: "kubernetes.io/hostname" + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: ["validator", "fullnode", "forge"] + - key: run + operator: Exists + topologyKey: "kubernetes.io/hostname" # schedule on a k8s worker node in the "validators" nodegroup # to access more compute - nodeSelector: - eks.amazonaws.com/nodegroup: validators + nodeSelector: eks.amazonaws.com/nodegroup: validators tolerations: - - effect: NoExecute - key: aptos.org/nodepool - value: validators + - effect: NoExecute + key: aptos.org/nodepool + value: validators + volumes: + - name: multiregion-kubeconfig + secret: + secretName: multiregion-kubeconfig + optional: true diff --git a/testsuite/fixtures/testMain.fixture b/testsuite/fixtures/testMain.fixture index 233d7d5c07293..44e1fa949cd63 100644 --- a/testsuite/fixtures/testMain.fixture +++ b/testsuite/fixtures/testMain.fixture @@ -1,9 +1,13 @@ Looking for cluster aptos-forge-big-1 in cloud AWS Found cluster: Cloud.AWS/us-west-2/aptos-forge-big-1 +Checking if image exists in GCP: aptos/validator-testing:banana Using the following image tags: forge: banana swarm: banana swarm upgrade (if applicable): banana +Checking if image exists in GCP: aptos/validator-testing:banana +Checking if image exists in GCP: aptos/validator-testing:banana +Checking if image exists in GCP: aptos/forge:banana === Start temp-pre-comment === ### Forge is running suite `banana-test` on `banana` * [Grafana dashboard (auto-refresh)](https://aptoslabs.grafana.net/d/overview/overview?orgId=1&refresh=10s&var-Datasource=VictoriaMetrics%20Global%20%28Non-mainnet%29&var-BigQuery=Google%20BigQuery&var-namespace=forge-perry-1659078000&var-metrics_source=All&var-chain_name=forge-big-1&refresh=10s&from=now-15m&to=now) @@ -11,6 +15,8 @@ Using the following image tags: * [Test runner output](None/None/actions/runs/None) * Test run is land-blocking === End temp-pre-comment === +Deleting forge pod for namespace forge-perry-1659078000 +Deleting forge pod for namespace forge-perry-1659078000 === Start temp-report === Forge test runner terminated: Trailing Log Lines: diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index 76dd744fd6885..56f83625589ae 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{format_err, Context, Result}; -use aptos_config::config::ConsensusConfig; +use aptos_config::config::{ChainHealthBackoffValues, ConsensusConfig, PipelineBackpressureValues}; use aptos_forge::{ args::TransactionTypeArg, success_criteria::{LatencyType, StateProgressThreshold, SuccessCriteria}, @@ -21,8 +21,8 @@ use aptos_testcases::{ fullnode_reboot_stress_test::FullNodeRebootStressTest, generate_traffic, load_vs_perf_benchmark::{LoadVsPerfBenchmark, TransactionWorkload, Workloads}, - modifiers::{ExecutionDelayConfig, ExecutionDelayTest}, - multi_region_simulation_test::MultiRegionMultiCloudSimulationTest, + modifiers::{CpuChaosTest, ExecutionDelayConfig, ExecutionDelayTest}, + multi_region_network_test::MultiRegionNetworkEmulationTest, network_bandwidth_test::NetworkBandwidthTest, network_loss_test::NetworkLossTest, network_partition_test::NetworkPartitionTest, @@ -216,7 +216,6 @@ fn random_namespace(dictionary: Vec, rng: &mut R) -> Result>(); Ok(format!("forge-{}", random_words.join("-"))) @@ -263,7 +262,7 @@ fn main() -> Result<()> { match test_cmd { TestCommand::LocalSwarm(local_cfg) => { // Loosen all criteria for local runs - test_suite.get_success_criteria_mut().avg_tps = 400; + test_suite.get_success_criteria_mut().min_avg_tps = 400; let previous_emit_job = test_suite.get_emit_job().clone(); let test_suite = test_suite.with_emit_job(previous_emit_job.mode(EmitJobMode::MaxLoad { @@ -362,7 +361,7 @@ fn main() -> Result<()> { pub fn run_forge( global_duration: Duration, - tests: ForgeConfig<'_>, + tests: ForgeConfig, factory: F, options: &Options, logs: Option>, @@ -443,82 +442,84 @@ fn get_changelog(prev_commit: Option<&String>, upstream_commit: &str) -> String } } -fn get_test_suite(suite_name: &str, duration: Duration) -> Result> { +fn get_test_suite(suite_name: &str, duration: Duration) -> Result { match suite_name { - "land_blocking" => Ok(land_blocking_test_suite(duration)), - "land_blocking_three_region" => Ok(land_blocking_three_region_test_suite(duration)), "local_test_suite" => Ok(local_test_suite()), "pre_release" => Ok(pre_release_suite()), "run_forever" => Ok(run_forever()), // TODO(rustielin): verify each test suite "k8s_suite" => Ok(k8s_test_suite()), "chaos" => Ok(chaos_test_suite(duration)), - single_test => single_test_suite(single_test), + single_test => single_test_suite(single_test, duration), } } /// Provides a forge config that runs the swarm forever (unless killed) -fn run_forever() -> ForgeConfig<'static> { +fn run_forever() -> ForgeConfig { ForgeConfig::default() - .with_admin_tests(vec![&GetMetadata]) + .add_admin_test(GetMetadata) .with_genesis_module_bundle(aptos_cached_packages::head_release_bundle().clone()) - .with_aptos_tests(vec![&RunForever]) + .add_aptos_test(RunForever) } -fn local_test_suite() -> ForgeConfig<'static> { +fn local_test_suite() -> ForgeConfig { ForgeConfig::default() - .with_aptos_tests(vec![&FundAccount, &TransferCoins]) - .with_admin_tests(vec![&GetMetadata]) - .with_network_tests(vec![&RestartValidator, &EmitTransaction]) + .add_aptos_test(FundAccount) + .add_aptos_test(TransferCoins) + .add_admin_test(GetMetadata) + .add_network_test(RestartValidator) + .add_network_test(EmitTransaction) .with_genesis_module_bundle(aptos_cached_packages::head_release_bundle().clone()) } -fn k8s_test_suite() -> ForgeConfig<'static> { +fn k8s_test_suite() -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(30).unwrap()) - .with_aptos_tests(vec![&FundAccount, &TransferCoins]) - .with_admin_tests(vec![&GetMetadata]) - .with_network_tests(vec![ - &EmitTransaction, - &SimpleValidatorUpgrade, - &PerformanceBenchmark, - ]) -} - -fn single_test_suite(test_name: &str) -> Result> { - let config = - ForgeConfig::default().with_initial_validator_count(NonZeroUsize::new(30).unwrap()); + .add_aptos_test(FundAccount) + .add_aptos_test(TransferCoins) + .add_admin_test(GetMetadata) + .add_network_test(EmitTransaction) + .add_network_test(SimpleValidatorUpgrade) + .add_network_test(PerformanceBenchmark) +} + +fn single_test_suite(test_name: &str, duration: Duration) -> Result { let single_test_suite = match test_name { - "epoch_changer_performance" => epoch_changer_performance(config), - "state_sync_perf_fullnodes_apply_outputs" => { - state_sync_perf_fullnodes_apply_outputs(config) - }, + // Land-blocking tests to be run on every PR: + "land_blocking" => land_blocking_test_suite(duration), // to remove land_blocking, superseeded by the below + "realistic_env_max_load" => realistic_env_max_load_test(duration), + "compat" => compat(), + "framework_upgrade" => upgrade(), + // Rest of the tests: + "realistic_env_load_sweep" => realistic_env_load_sweep_test(), + "realistic_env_graceful_overload" => realistic_env_graceful_overload(), + "realistic_network_tuned_for_throughput" => realistic_network_tuned_for_throughput_test(), + "epoch_changer_performance" => epoch_changer_performance(), + "state_sync_perf_fullnodes_apply_outputs" => state_sync_perf_fullnodes_apply_outputs(), "state_sync_perf_fullnodes_execute_transactions" => { - state_sync_perf_fullnodes_execute_transactions(config) + state_sync_perf_fullnodes_execute_transactions() }, - "state_sync_perf_fullnodes_fast_sync" => state_sync_perf_fullnodes_fast_sync(config), - "state_sync_perf_validators" => state_sync_perf_validators(config), - "validators_join_and_leave" => validators_join_and_leave(config), - "compat" => compat(config), - "framework_upgrade" => upgrade(config), - "config" => config.with_network_tests(vec![&ReconfigurationTest]), - "network_partition" => network_partition(config), - "three_region_simulation" => three_region_simulation(config), + "state_sync_perf_fullnodes_fast_sync" => state_sync_perf_fullnodes_fast_sync(), + "state_sync_perf_validators" => state_sync_perf_validators(), + "validators_join_and_leave" => validators_join_and_leave(), + "config" => ForgeConfig::default().add_network_test(ReconfigurationTest), + "network_partition" => network_partition(), + "three_region_simulation" => three_region_simulation(), "three_region_simulation_with_different_node_speed" => { - three_region_simulation_with_different_node_speed(config) + three_region_simulation_with_different_node_speed() }, - "network_bandwidth" => network_bandwidth(config), - "setup_test" => setup_test(config), - "single_vfn_perf" => single_vfn_perf(config), - "validator_reboot_stress_test" => validator_reboot_stress_test(config), - "fullnode_reboot_stress_test" => fullnode_reboot_stress_test(config), + "network_bandwidth" => network_bandwidth(), + "setup_test" => setup_test(), + "single_vfn_perf" => single_vfn_perf(), + "validator_reboot_stress_test" => validator_reboot_stress_test(), + "fullnode_reboot_stress_test" => fullnode_reboot_stress_test(), + "workload_mix" => workload_mix_test(), "account_creation" | "nft_mint" | "publishing" | "module_loading" - | "write_new_resource" => individual_workload_tests(test_name.into(), config), - "graceful_overload" => graceful_overload(config), - "three_region_simulation_graceful_overload" => three_region_sim_graceful_overload(config), + | "write_new_resource" => individual_workload_tests(test_name.into()), + "graceful_overload" => graceful_overload(), // not scheduled on continuous - "load_vs_perf_benchmark" => load_vs_perf_benchmark(config), - "workload_vs_perf_benchmark" => workload_vs_perf_benchmark(config), + "load_vs_perf_benchmark" => load_vs_perf_benchmark(), + "workload_vs_perf_benchmark" => workload_vs_perf_benchmark(), // maximizing number of rounds and epochs within a given time, to stress test consensus // so using small constant traffic, small blocks and fast rounds, and short epochs. // reusing changing_working_quorum_test just for invariants/asserts, but with max_down_nodes = 0. @@ -530,31 +531,39 @@ fn single_test_suite(test_name: &str) -> Result> { "different_node_speed_and_reliability_test" => different_node_speed_and_reliability_test(), "state_sync_slow_processing_catching_up" => state_sync_slow_processing_catching_up(), "state_sync_failures_catching_up" => state_sync_failures_catching_up(), - "twin_validator_test" => twin_validator_test(config), + "twin_validator_test" => twin_validator_test(), "large_db_simple_test" => large_db_simple_test(), - "consensus_only_perf_benchmark" => run_consensus_only_perf_test(config), - "consensus_only_three_region_simulation" => { - run_consensus_only_three_region_simulation(config) - }, - "quorum_store_reconfig_enable_test" => quorum_store_reconfig_enable_test(config), - "multi_region_multi_cloud_simulation_test" => { - multi_region_multi_cloud_simulation_test(config) - }, - "multiregion_benchmark_test" => multiregion_benchmark_test(config), + "consensus_only_perf_benchmark" => run_consensus_only_perf_test(), + "consensus_only_three_region_simulation" => run_consensus_only_three_region_simulation(), + "quorum_store_reconfig_enable_test" => quorum_store_reconfig_enable_test(), + "mainnet_like_simulation_test" => mainnet_like_simulation_test(), + "multiregion_benchmark_test" => multiregion_benchmark_test(), _ => return Err(format_err!("Invalid --suite given: {:?}", test_name)), }; Ok(single_test_suite) } -fn run_consensus_only_three_region_simulation(config: ForgeConfig) -> ForgeConfig { - config +fn wrap_with_realistic_env(test: T) -> CompositeNetworkTest { + CompositeNetworkTest::new_with_two_wrappers( + MultiRegionNetworkEmulationTest { + override_config: None, + }, + CpuChaosTest { + override_config: None, + }, + test, + ) +} + +fn run_consensus_only_three_region_simulation() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_emit_job( EmitJobRequest::default() .mode(EmitJobMode::ConstTps { tps: 30000 }) .txn_expiration_time_secs(5 * 60), ) - .with_network_tests(vec![&ThreeRegionSameCloudSimulationTest]) + .add_network_test(ThreeRegionSameCloudSimulationTest) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); @@ -590,14 +599,16 @@ fn run_consensus_only_three_region_simulation(config: ForgeConfig) -> ForgeConfi ) } -fn run_consensus_only_perf_test(config: ForgeConfig) -> ForgeConfig { +fn run_consensus_only_perf_test() -> ForgeConfig { + let config = ForgeConfig::default(); let emit_job = config.get_emit_job().clone(); config .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) - .with_network_tests(vec![&LoadVsPerfBenchmark { - test: &PerformanceBenchmark, + .add_network_test(LoadVsPerfBenchmark { + test: Box::new(PerformanceBenchmark), workloads: Workloads::TPS(&[30000]), - }]) + criteria: vec![], + }) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); @@ -635,15 +646,15 @@ fn run_consensus_only_perf_test(config: ForgeConfig) -> ForgeConfig { ) } -fn large_db_simple_test() -> ForgeConfig<'static> { +fn large_db_simple_test() -> ForgeConfig { large_db_test(10, 500, 300, "10-validators".to_string()) } -fn twin_validator_test(config: ForgeConfig) -> ForgeConfig { - config - .with_network_tests(vec![&TwinValidatorTest]) +fn twin_validator_test() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_initial_fullnode_count(5) + .add_network_test(TwinValidatorTest) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 300.into(); })) @@ -664,7 +675,7 @@ fn twin_validator_test(config: ForgeConfig) -> ForgeConfig { ) } -fn state_sync_failures_catching_up() -> ForgeConfig<'static> { +fn state_sync_failures_catching_up() -> ForgeConfig { changing_working_quorum_test_helper( 10, 300, @@ -672,7 +683,7 @@ fn state_sync_failures_catching_up() -> ForgeConfig<'static> { 2500, true, false, - &ChangingWorkingQuorumTest { + ChangingWorkingQuorumTest { min_tps: 1500, always_healthy_nodes: 2, max_down_nodes: 1, @@ -683,7 +694,7 @@ fn state_sync_failures_catching_up() -> ForgeConfig<'static> { ) } -fn state_sync_slow_processing_catching_up() -> ForgeConfig<'static> { +fn state_sync_slow_processing_catching_up() -> ForgeConfig { changing_working_quorum_test_helper( 10, 300, @@ -691,7 +702,7 @@ fn state_sync_slow_processing_catching_up() -> ForgeConfig<'static> { 2500, true, true, - &ChangingWorkingQuorumTest { + ChangingWorkingQuorumTest { min_tps: 750, always_healthy_nodes: 2, max_down_nodes: 0, @@ -702,8 +713,8 @@ fn state_sync_slow_processing_catching_up() -> ForgeConfig<'static> { ) } -fn different_node_speed_and_reliability_test() -> ForgeConfig<'static> { - changing_working_quorum_test_helper(20, 120, 70, 50, true, false, &ChangingWorkingQuorumTest { +fn different_node_speed_and_reliability_test() -> ForgeConfig { + changing_working_quorum_test_helper(20, 120, 70, 50, true, false, ChangingWorkingQuorumTest { min_tps: 30, always_healthy_nodes: 6, max_down_nodes: 5, @@ -713,27 +724,19 @@ fn different_node_speed_and_reliability_test() -> ForgeConfig<'static> { }) } -fn large_test_only_few_nodes_down() -> ForgeConfig<'static> { - changing_working_quorum_test_helper( - 60, - 120, - 100, - 70, - false, - false, - &ChangingWorkingQuorumTest { - min_tps: 50, - always_healthy_nodes: 40, - max_down_nodes: 10, - num_large_validators: 0, - add_execution_delay: false, - check_period_s: 27, - }, - ) +fn large_test_only_few_nodes_down() -> ForgeConfig { + changing_working_quorum_test_helper(60, 120, 100, 70, false, false, ChangingWorkingQuorumTest { + min_tps: 50, + always_healthy_nodes: 40, + max_down_nodes: 10, + num_large_validators: 0, + add_execution_delay: false, + check_period_s: 27, + }) } -fn changing_working_quorum_test_high_load() -> ForgeConfig<'static> { - changing_working_quorum_test_helper(20, 120, 500, 300, true, true, &ChangingWorkingQuorumTest { +fn changing_working_quorum_test_high_load() -> ForgeConfig { + changing_working_quorum_test_helper(20, 120, 500, 300, true, true, ChangingWorkingQuorumTest { min_tps: 50, always_healthy_nodes: 0, max_down_nodes: 20, @@ -745,8 +748,8 @@ fn changing_working_quorum_test_high_load() -> ForgeConfig<'static> { }) } -fn changing_working_quorum_test() -> ForgeConfig<'static> { - changing_working_quorum_test_helper(20, 120, 100, 70, true, true, &ChangingWorkingQuorumTest { +fn changing_working_quorum_test() -> ForgeConfig { + changing_working_quorum_test_helper(20, 120, 100, 70, true, true, ChangingWorkingQuorumTest { min_tps: 15, always_healthy_nodes: 0, max_down_nodes: 20, @@ -758,8 +761,8 @@ fn changing_working_quorum_test() -> ForgeConfig<'static> { }) } -fn consensus_stress_test() -> ForgeConfig<'static> { - changing_working_quorum_test_helper(10, 60, 100, 80, true, false, &ChangingWorkingQuorumTest { +fn consensus_stress_test() -> ForgeConfig { + changing_working_quorum_test_helper(10, 60, 100, 80, true, false, ChangingWorkingQuorumTest { min_tps: 50, always_healthy_nodes: 10, max_down_nodes: 0, @@ -769,16 +772,60 @@ fn consensus_stress_test() -> ForgeConfig<'static> { }) } -fn load_vs_perf_benchmark(config: ForgeConfig) -> ForgeConfig { - config +fn realistic_env_load_sweep_test() -> ForgeConfig { + ForgeConfig::default() + .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) + .with_initial_fullnode_count(10) + .add_network_test(wrap_with_realistic_env(LoadVsPerfBenchmark { + test: Box::new(PerformanceBenchmark), + workloads: Workloads::TPS(&[10, 100, 1000, 3000, 5000]), + criteria: [ + (9, 1.5, 3.), + (95, 1.5, 3.), + (950, 2., 3.), + (2750, 2.5, 4.), + (4600, 3., 5.), + ] + .into_iter() + .map(|(min_tps, max_lat_p50, max_lat_p99)| { + SuccessCriteria::new(min_tps) + .add_max_expired_tps(0) + .add_max_failed_submission_tps(0) + .add_latency_threshold(max_lat_p50, LatencyType::P50) + .add_latency_threshold(max_lat_p99, LatencyType::P99) + }) + .collect(), + })) + // Test inherits the main EmitJobRequest, so update here for more precise latency measurements + .with_emit_job( + EmitJobRequest::default().latency_polling_interval(Duration::from_millis(100)), + ) + .with_genesis_helm_config_fn(Arc::new(|helm_values| { + // no epoch change. + helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); + })) + .with_success_criteria( + SuccessCriteria::new(0) + .add_no_restarts() + .add_wait_for_catchup_s(60) + .add_chain_progress(StateProgressThreshold { + max_no_progress_secs: 30.0, + max_round_gap: 10, + }), + ) +} + +fn load_vs_perf_benchmark() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_initial_fullnode_count(10) - .with_network_tests(vec![&LoadVsPerfBenchmark { - test: &PerformanceBenchmark, + .add_network_test(LoadVsPerfBenchmark { + test: Box::new(PerformanceBenchmark), workloads: Workloads::TPS(&[ 200, 1000, 3000, 5000, 7000, 7500, 8000, 9000, 10000, 12000, 15000, ]), - }]) + criteria: Vec::new(), + }) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); @@ -794,8 +841,8 @@ fn load_vs_perf_benchmark(config: ForgeConfig) -> ForgeConfig { ) } -fn workload_vs_perf_benchmark(config: ForgeConfig) -> ForgeConfig { - config +fn workload_vs_perf_benchmark() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(7).unwrap()) .with_initial_fullnode_count(7) .with_node_helm_config_fn(Arc::new(move |helm_values| { @@ -805,8 +852,8 @@ fn workload_vs_perf_benchmark(config: ForgeConfig) -> ForgeConfig { // .with_emit_job(EmitJobRequest::default().mode(EmitJobMode::MaxLoad { // mempool_backlog: 10000, // })) - .with_network_tests(vec![&LoadVsPerfBenchmark { - test: &PerformanceBenchmark, + .add_network_test(LoadVsPerfBenchmark { + test: Box::new(PerformanceBenchmark), workloads: Workloads::TRANSACTIONS(&[ TransactionWorkload { transaction_type: TransactionTypeArg::NoOp, @@ -849,7 +896,8 @@ fn workload_vs_perf_benchmark(config: ForgeConfig) -> ForgeConfig { unique_senders: true, }, ]), - }]) + criteria: Vec::new(), + }) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); @@ -865,31 +913,27 @@ fn workload_vs_perf_benchmark(config: ForgeConfig) -> ForgeConfig { ) } -fn graceful_overload(config: ForgeConfig) -> ForgeConfig { - config +fn graceful_overload() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(10).unwrap()) // if we have full nodes for subset of validators, TPS drops. - // Validators without VFN are proposing almost empty blocks, + // Validators without VFN are not creating batches, // as no useful transaction reach their mempool. // something to potentially improve upon. // So having VFNs for all validators .with_initial_fullnode_count(10) - .with_network_tests(vec![&TwoTrafficsTest { - inner_tps: 15000, - inner_gas_price: aptos_global_constants::GAS_UNIT_PRICE, - inner_init_gas_price_multiplier: 20, - // because it is static, cannot use TransactionTypeArg::materialize method - inner_transaction_type: TransactionType::CoinTransfer { - invalid_transaction_ratio: 0, - sender_use_account_pool: false, - }, + .add_network_test(TwoTrafficsTest { + inner_traffic: EmitJobRequest::default() + .mode(EmitJobMode::ConstTps { tps: 10000 }) + .init_gas_price_multiplier(20), + // Additionally - we are not really gracefully handling overlaods, // setting limits based on current reality, to make sure they // don't regress, but something to investigate - avg_tps: 3400, - latency_thresholds: &[], - }]) - // First start higher gas-fee traffic, to not cause issues with TxnEmitter setup - account creation + inner_success_criteria: SuccessCriteria::new(3400), + }) + // First start non-overload (higher gas-fee) traffic, + // to not cause issues with TxnEmitter setup - account creation .with_emit_job( EmitJobRequest::default() .mode(EmitJobMode::ConstTps { tps: 1000 }) @@ -917,39 +961,34 @@ fn graceful_overload(config: ForgeConfig) -> ForgeConfig { ) } -fn three_region_sim_graceful_overload(config: ForgeConfig) -> ForgeConfig { - config +fn realistic_env_graceful_overload() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) // if we have full nodes for subset of validators, TPS drops. - // Validators without VFN are proposing almost empty blocks, + // Validators without VFN are not creating batches, // as no useful transaction reach their mempool. // something to potentially improve upon. // So having VFNs for all validators .with_initial_fullnode_count(20) - .with_network_tests(vec![&CompositeNetworkTest { - wrapper: &ThreeRegionSameCloudSimulationTest, - test: &TwoTrafficsTest { - inner_tps: 15000, - inner_gas_price: aptos_global_constants::GAS_UNIT_PRICE, - inner_init_gas_price_multiplier: 20, - // Cannot use TransactionTypeArg::materialize, as this needs to be static - inner_transaction_type: TransactionType::CoinTransfer { - invalid_transaction_ratio: 0, - sender_use_account_pool: false, - }, - // Additionally - we are not really gracefully handling overlaods, - // setting limits based on current reality, to make sure they - // don't regress, but something to investigate - avg_tps: 1200, - latency_thresholds: &[], - }, - }]) + .add_network_test(wrap_with_realistic_env(TwoTrafficsTest { + inner_traffic: EmitJobRequest::default() + .mode(EmitJobMode::ConstTps { tps: 15000 }) + .init_gas_price_multiplier(20), + // Additionally - we are not really gracefully handling overlaods, + // setting limits based on current reality, to make sure they + // don't regress, but something to investigate + inner_success_criteria: SuccessCriteria::new(3400), + })) // First start higher gas-fee traffic, to not cause issues with TxnEmitter setup - account creation .with_emit_job( EmitJobRequest::default() .mode(EmitJobMode::ConstTps { tps: 1000 }) .gas_price(5 * aptos_global_constants::GAS_UNIT_PRICE), ) + .with_node_helm_config_fn(Arc::new(move |helm_values| { + helm_values["validator"]["config"]["execution"] + ["processed_transactions_detailed_counters"] = true.into(); + })) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 300.into(); })) @@ -972,14 +1011,86 @@ fn three_region_sim_graceful_overload(config: ForgeConfig) -> ForgeConfig { ) } -fn individual_workload_tests(test_name: String, config: ForgeConfig) -> ForgeConfig { +fn workload_mix_test() -> ForgeConfig { + ForgeConfig::default() + .with_initial_validator_count(NonZeroUsize::new(5).unwrap()) + .with_initial_fullnode_count(3) + .add_network_test(PerformanceBenchmark) + .with_node_helm_config_fn(Arc::new(move |helm_values| { + helm_values["validator"]["config"]["execution"] + ["processed_transactions_detailed_counters"] = true.into(); + })) + .with_emit_job( + EmitJobRequest::default() + .mode(EmitJobMode::MaxLoad { + mempool_backlog: 10000, + }) + .transaction_mix(vec![ + ( + TransactionTypeArg::AccountGeneration.materialize_default(), + 5, + ), + (TransactionTypeArg::NoOp5Signers.materialize_default(), 1), + (TransactionTypeArg::CoinTransfer.materialize_default(), 1), + (TransactionTypeArg::PublishPackage.materialize_default(), 1), + ( + TransactionTypeArg::AccountResource32B.materialize(1, true), + 1, + ), + // ( + // TransactionTypeArg::AccountResource10KB.materialize(1, true), + // 1, + // ), + ( + TransactionTypeArg::ModifyGlobalResource.materialize(1, false), + 1, + ), + // ( + // TransactionTypeArg::ModifyGlobalResource.materialize(10, false), + // 1, + // ), + ( + TransactionTypeArg::Batch100Transfer.materialize_default(), + 1, + ), + // ( + // TransactionTypeArg::TokenV1NFTMintAndTransferSequential + // .materialize_default(), + // 1, + // ), + // ( + // TransactionTypeArg::TokenV1NFTMintAndTransferParallel.materialize_default(), + // 1, + // ), + // ( + // TransactionTypeArg::TokenV1FTMintAndTransfer.materialize_default(), + // 1, + // ), + ( + TransactionTypeArg::TokenV2AmbassadorMint.materialize_default(), + 1, + ), + ]), + ) + .with_success_criteria( + SuccessCriteria::new(100) + .add_no_restarts() + .add_wait_for_catchup_s(240) + .add_chain_progress(StateProgressThreshold { + max_no_progress_secs: 20.0, + max_round_gap: 6, + }), + ) +} + +fn individual_workload_tests(test_name: String) -> ForgeConfig { let job = EmitJobRequest::default().mode(EmitJobMode::MaxLoad { mempool_backlog: 30000, }); - config - .with_network_tests(vec![&PerformanceBenchmark]) + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(5).unwrap()) .with_initial_fullnode_count(3) + .add_network_test(PerformanceBenchmark) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) @@ -1037,24 +1148,24 @@ fn individual_workload_tests(test_name: String, config: ForgeConfig) -> ForgeCon ) } -fn fullnode_reboot_stress_test(config: ForgeConfig) -> ForgeConfig { - config +fn fullnode_reboot_stress_test() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(10).unwrap()) .with_initial_fullnode_count(10) - .with_network_tests(vec![&FullNodeRebootStressTest]) + .add_network_test(FullNodeRebootStressTest) .with_emit_job(EmitJobRequest::default().mode(EmitJobMode::ConstTps { tps: 5000 })) .with_success_criteria(SuccessCriteria::new(2000).add_wait_for_catchup_s(600)) } -fn validator_reboot_stress_test(config: ForgeConfig) -> ForgeConfig { - config +fn validator_reboot_stress_test() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(15).unwrap()) .with_initial_fullnode_count(1) - .with_network_tests(vec![&ValidatorRebootStressTest { + .add_network_test(ValidatorRebootStressTest { num_simultaneously: 3, down_time_secs: 5.0, pause_secs: 5.0, - }]) + }) .with_success_criteria(SuccessCriteria::new(2000).add_wait_for_catchup_s(600)) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 120.into(); @@ -1066,11 +1177,11 @@ fn apply_quorum_store_configs_for_single_node(helm_values: &mut serde_yaml::Valu ["dynamic_max_txn_per_s"] = 5500.into(); } -fn single_vfn_perf(config: ForgeConfig) -> ForgeConfig { - config +fn single_vfn_perf() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(1).unwrap()) .with_initial_fullnode_count(1) - .with_network_tests(vec![&PerformanceBenchmark]) + .add_network_test(PerformanceBenchmark) .with_success_criteria( SuccessCriteria::new(5000) .add_no_restarts() @@ -1081,33 +1192,34 @@ fn single_vfn_perf(config: ForgeConfig) -> ForgeConfig { })) } -fn setup_test(config: ForgeConfig) -> ForgeConfig { - config +fn setup_test() -> ForgeConfig { + ForgeConfig::default() + .with_initial_validator_count(NonZeroUsize::new(1).unwrap()) .with_initial_fullnode_count(1) - .with_network_tests(vec![&ForgeSetupTest]) + .add_network_test(ForgeSetupTest) } -fn network_bandwidth(config: ForgeConfig) -> ForgeConfig { - config +fn network_bandwidth() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(8).unwrap()) - .with_network_tests(vec![&NetworkBandwidthTest]) + .add_network_test(NetworkBandwidthTest) } -fn three_region_simulation_with_different_node_speed(config: ForgeConfig) -> ForgeConfig { - config +fn three_region_simulation_with_different_node_speed() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(30).unwrap()) .with_initial_fullnode_count(30) .with_emit_job(EmitJobRequest::default().mode(EmitJobMode::ConstTps { tps: 5000 })) - .with_network_tests(vec![&CompositeNetworkTest { - wrapper: &ExecutionDelayTest { + .add_network_test(CompositeNetworkTest::new( + ExecutionDelayTest { add_execution_delay: ExecutionDelayConfig { inject_delay_node_fraction: 0.5, inject_delay_max_transaction_percentage: 40, inject_delay_per_transaction_ms: 2, }, }, - test: &ThreeRegionSameCloudSimulationTest, - }]) + ThreeRegionSameCloudSimulationTest, + )) .with_node_helm_config_fn(Arc::new(move |helm_values| { helm_values["validator"]["config"]["api"]["failpoints_enabled"] = true.into(); // helm_values["validator"]["config"]["consensus"]["max_sending_block_txns"] = @@ -1130,12 +1242,12 @@ fn three_region_simulation_with_different_node_speed(config: ForgeConfig) -> For ) } -fn three_region_simulation(config: ForgeConfig) -> ForgeConfig { - config +fn three_region_simulation() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(12).unwrap()) .with_initial_fullnode_count(12) .with_emit_job(EmitJobRequest::default().mode(EmitJobMode::ConstTps { tps: 5000 })) - .with_network_tests(vec![&ThreeRegionSameCloudSimulationTest]) + .add_network_test(ThreeRegionSameCloudSimulationTest) // TODO(rustielin): tune these success criteria after we have a better idea of the test behavior .with_success_criteria( SuccessCriteria::new(3000) @@ -1148,10 +1260,10 @@ fn three_region_simulation(config: ForgeConfig) -> ForgeConfig { ) } -fn network_partition(config: ForgeConfig) -> ForgeConfig { - config +fn network_partition() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(10).unwrap()) - .with_network_tests(vec![&NetworkPartitionTest]) + .add_network_test(NetworkPartitionTest) .with_success_criteria( SuccessCriteria::new(2500) .add_no_restarts() @@ -1162,50 +1274,48 @@ fn network_partition(config: ForgeConfig) -> ForgeConfig { })) } -fn compat(config: ForgeConfig) -> ForgeConfig { - config +fn compat() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(5).unwrap()) - .with_network_tests(vec![&SimpleValidatorUpgrade]) + .add_network_test(SimpleValidatorUpgrade) .with_success_criteria(SuccessCriteria::new(5000).add_wait_for_catchup_s(240)) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 30.into(); })) } -fn upgrade(config: ForgeConfig) -> ForgeConfig { - config +fn upgrade() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(5).unwrap()) - .with_network_tests(vec![&FrameworkUpgrade]) + .add_network_test(FrameworkUpgrade) .with_success_criteria(SuccessCriteria::new(5000).add_wait_for_catchup_s(240)) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 30.into(); })) } -fn epoch_changer_performance(config: ForgeConfig) -> ForgeConfig { - config - .with_network_tests(vec![&PerformanceBenchmark]) +fn epoch_changer_performance() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(5).unwrap()) .with_initial_fullnode_count(2) + .add_network_test(PerformanceBenchmark) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 60.into(); })) } /// A default config for running various state sync performance tests -fn state_sync_perf_fullnodes_config(forge_config: ForgeConfig<'static>) -> ForgeConfig<'static> { - forge_config +fn state_sync_perf_fullnodes_config() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(4).unwrap()) .with_initial_fullnode_count(4) } /// The config for running a state sync performance test when applying /// transaction outputs in fullnodes. -fn state_sync_perf_fullnodes_apply_outputs( - forge_config: ForgeConfig<'static>, -) -> ForgeConfig<'static> { - state_sync_perf_fullnodes_config(forge_config) - .with_network_tests(vec![&StateSyncFullnodePerformance]) +fn state_sync_perf_fullnodes_apply_outputs() -> ForgeConfig { + state_sync_perf_fullnodes_config() + .add_network_test(StateSyncFullnodePerformance) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) @@ -1220,11 +1330,9 @@ fn state_sync_perf_fullnodes_apply_outputs( /// The config for running a state sync performance test when executing /// transactions in fullnodes. -fn state_sync_perf_fullnodes_execute_transactions( - forge_config: ForgeConfig<'static>, -) -> ForgeConfig<'static> { - state_sync_perf_fullnodes_config(forge_config) - .with_network_tests(vec![&StateSyncFullnodePerformance]) +fn state_sync_perf_fullnodes_execute_transactions() -> ForgeConfig { + state_sync_perf_fullnodes_config() + .add_network_test(StateSyncFullnodePerformance) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) @@ -1239,9 +1347,9 @@ fn state_sync_perf_fullnodes_execute_transactions( /// The config for running a state sync performance test when fast syncing /// to the latest epoch. -fn state_sync_perf_fullnodes_fast_sync(forge_config: ForgeConfig<'static>) -> ForgeConfig<'static> { - state_sync_perf_fullnodes_config(forge_config) - .with_network_tests(vec![&StateSyncFullnodeFastSyncPerformance]) +fn state_sync_perf_fullnodes_fast_sync() -> ForgeConfig { + state_sync_perf_fullnodes_config() + .add_network_test(StateSyncFullnodeFastSyncPerformance) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 180.into(); // Frequent epochs })) @@ -1262,8 +1370,8 @@ fn state_sync_perf_fullnodes_fast_sync(forge_config: ForgeConfig<'static>) -> Fo /// The config for running a state sync performance test when applying /// transaction outputs in failed validators. -fn state_sync_perf_validators(forge_config: ForgeConfig<'static>) -> ForgeConfig<'static> { - forge_config +fn state_sync_perf_validators() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(7).unwrap()) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); @@ -1274,19 +1382,19 @@ fn state_sync_perf_validators(forge_config: ForgeConfig<'static>) -> ForgeConfig helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] ["continuous_syncing_mode"] = "ApplyTransactionOutputs".into(); })) - .with_network_tests(vec![&StateSyncValidatorPerformance]) + .add_network_test(StateSyncValidatorPerformance) .with_success_criteria(SuccessCriteria::new(5000)) } /// The config for running a validator join and leave test. -fn validators_join_and_leave(forge_config: ForgeConfig<'static>) -> ForgeConfig<'static> { - forge_config +fn validators_join_and_leave() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 60.into(); helm_values["chain"]["allow_new_validators"] = true.into(); })) - .with_network_tests(vec![&ValidatorJoinLeaveTest]) + .add_network_test(ValidatorJoinLeaveTest) .with_success_criteria( SuccessCriteria::new(5000) .add_no_restarts() @@ -1304,11 +1412,11 @@ fn validators_join_and_leave(forge_config: ForgeConfig<'static>) -> ForgeConfig< ) } -fn land_blocking_test_suite(duration: Duration) -> ForgeConfig<'static> { +fn land_blocking_test_suite(duration: Duration) -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_initial_fullnode_count(10) - .with_network_tests(vec![&PerformanceBenchmark]) + .add_network_test(PerformanceBenchmark) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // Have single epoch change in land blocking helm_values["chain"]["epoch_duration_secs"] = 300.into(); @@ -1340,17 +1448,33 @@ fn land_blocking_test_suite(duration: Duration) -> ForgeConfig<'static> { } // TODO: Replace land_blocking when performance reaches on par with current land_blocking -fn land_blocking_three_region_test_suite(duration: Duration) -> ForgeConfig<'static> { +fn realistic_env_max_load_test(duration: Duration) -> ForgeConfig { + let duration_secs = duration.as_secs(); ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_initial_fullnode_count(10) - .with_network_tests(vec![&ThreeRegionSameCloudSimulationTest]) - .with_genesis_helm_config_fn(Arc::new(|helm_values| { - // Have single epoch change in land blocking - helm_values["chain"]["epoch_duration_secs"] = 300.into(); + .add_network_test(wrap_with_realistic_env(TwoTrafficsTest { + inner_traffic: EmitJobRequest::default() + .mode(EmitJobMode::MaxLoad { + mempool_backlog: 40000, + }) + .init_gas_price_multiplier(20), + inner_success_criteria: SuccessCriteria::new(5000), + })) + .with_genesis_helm_config_fn(Arc::new(move |helm_values| { + // Have single epoch change in land blocking, and a few on long-running + helm_values["chain"]["epoch_duration_secs"] = + (if duration_secs >= 1800 { 600 } else { 300 }).into(); })) + // First start higher gas-fee traffic, to not cause issues with TxnEmitter setup - account creation + .with_emit_job( + EmitJobRequest::default() + .mode(EmitJobMode::ConstTps { tps: 100 }) + .gas_price(5 * aptos_global_constants::GAS_UNIT_PRICE) + .latency_polling_interval(Duration::from_millis(100)), + ) .with_success_criteria( - SuccessCriteria::new(3500) + SuccessCriteria::new(95) .add_no_restarts() .add_wait_for_catchup_s( // Give at least 60s for catchup, give 10% of the run for longer durations. @@ -1362,6 +1486,67 @@ fn land_blocking_three_region_test_suite(duration: Duration) -> ForgeConfig<'sta // Check that we don't use more than 10 GB of memory for 30% of the time. MetricsThreshold::new(10 * 1024 * 1024 * 1024, 30), )) + .add_latency_threshold(3.0, LatencyType::P50) + .add_latency_threshold(5.0, LatencyType::P90) + .add_chain_progress(StateProgressThreshold { + max_no_progress_secs: 10.0, + max_round_gap: 4, + }), + ) +} + +fn realistic_network_tuned_for_throughput_test() -> ForgeConfig { + ForgeConfig::default() + .with_initial_validator_count(NonZeroUsize::new(12).unwrap()) + // if we have full nodes for subset of validators, TPS drops. + // Validators without VFN are not creating batches, + // as no useful transaction reach their mempool. + // something to potentially improve upon. + // So having VFNs for all validators + .with_initial_fullnode_count(12) + .add_network_test(MultiRegionNetworkEmulationTest { + override_config: None, + }) + .with_emit_job(EmitJobRequest::default().mode(EmitJobMode::MaxLoad { + mempool_backlog: 150000, + })) + .with_node_helm_config_fn(Arc::new(move |helm_values| { + helm_values["validator"]["config"]["consensus"] + ["max_sending_block_txns_quorum_store_override"] = 10000.into(); + helm_values["validator"]["config"]["consensus"]["pipeline_backpressure"] = + serde_yaml::to_value(Vec::::new()).unwrap(); + helm_values["validator"]["config"]["consensus"]["chain_health_backoff"] = + serde_yaml::to_value(Vec::::new()).unwrap(); + + helm_values["validator"]["config"]["consensus"] + ["wait_for_full_blocks_above_recent_fill_threshold"] = (0.8).into(); + helm_values["validator"]["config"]["consensus"] + ["wait_for_full_blocks_above_pending_blocks"] = 8.into(); + + helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] + ["backlog_txn_limit_count"] = 100000.into(); + helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] + ["backlog_per_validator_batch_limit_count"] = 10.into(); + + helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] + ["dynamic_max_txn_per_s"] = 6000.into(); + + // Experimental storage optimizations + helm_values["validator"]["config"]["storage"]["rocksdb_configs"]["split_ledger_db"] = + true.into(); + helm_values["validator"]["config"]["storage"]["rocksdb_configs"] + ["use_sharded_state_merkle_db"] = true.into(); + })) + .with_success_criteria( + SuccessCriteria::new(8000) + .add_no_restarts() + .add_wait_for_catchup_s(60) + .add_system_metrics_threshold(SystemMetricsThreshold::new( + // Check that we don't use more than 12 CPU cores for 30% of the time. + MetricsThreshold::new(12, 30), + // Check that we don't use more than 10 GB of memory for 30% of the time. + MetricsThreshold::new(10 * 1024 * 1024 * 1024, 30), + )) .add_chain_progress(StateProgressThreshold { max_no_progress_secs: 10.0, max_round_gap: 4, @@ -1369,20 +1554,18 @@ fn land_blocking_three_region_test_suite(duration: Duration) -> ForgeConfig<'sta ) } -fn pre_release_suite() -> ForgeConfig<'static> { +fn pre_release_suite() -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(30).unwrap()) - .with_network_tests(vec![&NetworkBandwidthTest]) + .add_network_test(NetworkBandwidthTest) } -fn chaos_test_suite(duration: Duration) -> ForgeConfig<'static> { +fn chaos_test_suite(duration: Duration) -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(30).unwrap()) - .with_network_tests(vec![ - &NetworkBandwidthTest, - &ThreeRegionSameCloudSimulationTest, - &NetworkLossTest, - ]) + .add_network_test(NetworkBandwidthTest) + .add_network_test(ThreeRegionSameCloudSimulationTest) + .add_network_test(NetworkLossTest) .with_success_criteria( SuccessCriteria::new( if duration > Duration::from_secs(1200) { @@ -1408,20 +1591,21 @@ fn changing_working_quorum_test_helper( min_avg_tps: usize, apply_txn_outputs: bool, use_chain_backoff: bool, - test: &'static ChangingWorkingQuorumTest, -) -> ForgeConfig<'static> { + test: ChangingWorkingQuorumTest, +) -> ForgeConfig { let config = ForgeConfig::default(); let num_large_validators = test.num_large_validators; + let max_down_nodes = test.max_down_nodes; config .with_initial_validator_count(NonZeroUsize::new(num_validators).unwrap()) .with_initial_fullnode_count( - if test.max_down_nodes == 0 { + if max_down_nodes == 0 { 0 } else { std::cmp::max(2, target_tps / 1000) }, ) - .with_network_tests(vec![test]) + .add_network_test(test) .with_genesis_helm_config_fn(Arc::new(move |helm_values| { helm_values["chain"]["epoch_duration_secs"] = epoch_duration.into(); helm_values["genesis"]["validator"]["num_validators_with_larger_stake"] = @@ -1496,10 +1680,10 @@ fn changing_working_quorum_test_helper( .add_no_restarts() .add_wait_for_catchup_s(30) .add_chain_progress(StateProgressThreshold { - max_no_progress_secs: if test.max_down_nodes == 0 { + max_no_progress_secs: if max_down_nodes == 0 { // very aggressive if no nodes are expected to be down 3.0 - } else if test.max_down_nodes * 3 + 1 + 2 < num_validators { + } else if max_down_nodes * 3 + 1 + 2 < num_validators { // number of down nodes is at least 2 below the quorum limit, so // we can still be reasonably aggressive 15.0 @@ -1519,12 +1703,12 @@ fn large_db_test( target_tps: usize, min_avg_tps: usize, existing_db_tag: String, -) -> ForgeConfig<'static> { +) -> ForgeConfig { let config = ForgeConfig::default(); config .with_initial_validator_count(NonZeroUsize::new(num_validators).unwrap()) .with_initial_fullnode_count(std::cmp::max(2, target_tps / 1000)) - .with_network_tests(vec![&PerformanceBenchmark]) + .add_network_test(PerformanceBenchmark) .with_existing_db(existing_db_tag.clone()) .with_node_helm_config_fn(Arc::new(move |helm_values| { helm_values["validator"]["storage"]["labels"]["tag"] = existing_db_tag.clone().into(); @@ -1561,11 +1745,11 @@ fn large_db_test( ) } -fn quorum_store_reconfig_enable_test(forge_config: ForgeConfig<'static>) -> ForgeConfig<'static> { - forge_config +fn quorum_store_reconfig_enable_test() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_initial_fullnode_count(20) - .with_network_tests(vec![&QuorumStoreOnChainEnableTest {}]) + .add_network_test(QuorumStoreOnChainEnableTest {}) .with_success_criteria( SuccessCriteria::new(5000) .add_no_restarts() @@ -1583,9 +1767,9 @@ fn quorum_store_reconfig_enable_test(forge_config: ForgeConfig<'static>) -> Forg ) } -fn multi_region_multi_cloud_simulation_test(config: ForgeConfig<'static>) -> ForgeConfig<'static> { - config - .with_initial_validator_count(NonZeroUsize::new(100).unwrap()) +fn mainnet_like_simulation_test() -> ForgeConfig { + ForgeConfig::default() + .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_emit_job( EmitJobRequest::default() .mode(EmitJobMode::MaxLoad { @@ -1593,7 +1777,14 @@ fn multi_region_multi_cloud_simulation_test(config: ForgeConfig<'static>) -> For }) .txn_expiration_time_secs(5 * 60), ) - .with_network_tests(vec![&MultiRegionMultiCloudSimulationTest {}]) + .add_network_test(CompositeNetworkTest::new( + MultiRegionNetworkEmulationTest { + override_config: None, + }, + CpuChaosTest { + override_config: None, + }, + )) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); @@ -1610,10 +1801,13 @@ fn multi_region_multi_cloud_simulation_test(config: ForgeConfig<'static>) -> For ) } -fn multiregion_benchmark_test(config: ForgeConfig<'static>) -> ForgeConfig<'static> { - config +/// This test runs a network test in a real multi-region setup. It configures +/// genesis and node helm values to enable certain configurations needed to run in +/// the multiregion forge cluster. +fn multiregion_benchmark_test() -> ForgeConfig { + ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) - .with_network_tests(vec![&PerformanceBenchmark]) + .add_network_test(PerformanceBenchmark) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // Have single epoch change in land blocking helm_values["chain"]["epoch_duration_secs"] = 300.into(); @@ -1684,7 +1878,7 @@ impl Test for GetMetadata { } impl AdminTest for GetMetadata { - fn run<'t>(&self, ctx: &mut AdminContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut AdminContext<'_>) -> Result<()> { let client = ctx.rest_client(); let runtime = Runtime::new().unwrap(); runtime.block_on(client.get_aptos_version()).unwrap(); @@ -1773,7 +1967,7 @@ impl Test for RestartValidator { } impl NetworkTest for RestartValidator { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let runtime = Runtime::new()?; runtime.block_on(async { let node = ctx.swarm().validators_mut().next().unwrap(); @@ -1798,7 +1992,7 @@ impl Test for EmitTransaction { } impl NetworkTest for EmitTransaction { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let duration = Duration::from_secs(10); let all_validators = ctx .swarm() @@ -1806,8 +2000,7 @@ impl NetworkTest for EmitTransaction { .map(|v| v.peer_id()) .collect::>(); let stats = generate_traffic(ctx, &all_validators, duration).unwrap(); - ctx.report - .report_txn_stats(self.name().to_string(), &stats, duration); + ctx.report.report_txn_stats(self.name().to_string(), &stats); Ok(()) } diff --git a/testsuite/forge-test-runner-template.yaml b/testsuite/forge-test-runner-template.yaml index 4bbdaa2c682f9..1ea06e59661f2 100644 --- a/testsuite/forge-test-runner-template.yaml +++ b/testsuite/forge-test-runner-template.yaml @@ -4,64 +4,75 @@ metadata: name: {FORGE_POD_NAME} labels: app.kubernetes.io/name: forge + app.kubernetes.io/part-of: forge-test-runner forge-namespace: {FORGE_NAMESPACE} forge-image-tag: {FORGE_IMAGE_TAG} spec: restartPolicy: Never serviceAccountName: forge containers: - - name: main - image: {FORGE_IMAGE_REPO}:{FORGE_IMAGE_TAG} - imagePullPolicy: Always - command: - - /bin/bash - - -c - - | - ulimit -n 1048576 - {FORGE_ARGS} - resources: - limits: - cpu: 15.5 - memory: 26Gi - requests: - cpu: 15 - memory: 26Gi - env: - - name: FORGE_TRIGGERED_BY - value: {FORGE_TRIGGERED_BY} - - name: PROMETHEUS_URL - valueFrom: - secretKeyRef: - name: prometheus-read-only - key: url - optional: true - - name: PROMETHEUS_TOKEN - valueFrom: - secretKeyRef: - name: prometheus-read-only - key: token - optional: true - - name: RUST_BACKTRACE - value: "1" - # - name: RUST_LOG - # value: debug + - name: main + image: {FORGE_IMAGE} + imagePullPolicy: Always + command: + - /bin/bash + - -c + - | + ulimit -n 1048576 + {FORGE_ARGS} + resources: + limits: + cpu: 15.5 + memory: 26Gi + requests: + cpu: 15 + memory: 26Gi + env: + - name: FORGE_TRIGGERED_BY + value: {FORGE_TRIGGERED_BY} + - name: PROMETHEUS_URL + valueFrom: + secretKeyRef: + name: prometheus-read-only + key: url + optional: true + - name: PROMETHEUS_TOKEN + valueFrom: + secretKeyRef: + name: prometheus-read-only + key: token + optional: true + - name: RUST_BACKTRACE + value: "1" + - name: KUBECONFIG + value: {KUBECONFIG} + # - name: RUST_LOG + # value: debug + volumeMounts: + - name: multiregion-kubeconfig + readOnly: true + mountPath: {MULTIREGION_KUBECONFIG_DIR} affinity: # avoid scheduling with other forge or validator/fullnode pods podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app.kubernetes.io/name - operator: In - values: ["validator", "fullnode", "forge"] - - key: run - operator: Exists - topologyKey: "kubernetes.io/hostname" + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: ["validator", "fullnode", "forge"] + - key: run + operator: Exists + topologyKey: "kubernetes.io/hostname" # schedule on a k8s worker node in the "validators" nodegroup # to access more compute - nodeSelector: - {VALIDATOR_NODE_SELECTOR} + nodeSelector: {VALIDATOR_NODE_SELECTOR} tolerations: - - effect: NoExecute - key: aptos.org/nodepool - value: validators + - effect: NoExecute + key: aptos.org/nodepool + value: validators + volumes: + - name: multiregion-kubeconfig + secret: + secretName: multiregion-kubeconfig + optional: true diff --git a/testsuite/forge.py b/testsuite/forge.py index fccc4a101386a..17ddf81701c58 100644 --- a/testsuite/forge.py +++ b/testsuite/forge.py @@ -47,15 +47,20 @@ "release": "", # the default release profile has no tag prefix } -VALIDATOR_IMAGE_NAME = "aptos/validator" -VALIDATOR_TESTING_IMAGE_NAME = "aptos/validator-testing" -FORGE_IMAGE_NAME = "aptos/forge" +VALIDATOR_IMAGE_NAME = "validator" +VALIDATOR_TESTING_IMAGE_NAME = "validator-testing" +FORGE_IMAGE_NAME = "forge" +ECR_REPO_PREFIX = "aptos" DEFAULT_CONFIG = "forge-wrapper-config" DEFAULT_CONFIG_KEY = "forge-wrapper-config.json" FORGE_TEST_RUNNER_TEMPLATE_PATH = "forge-test-runner-template.yaml" +MULTIREGION_KUBECONFIG_DIR = "/etc/multiregion-kubeconfig" +MULTIREGION_KUBECONFIG_PATH = f"{MULTIREGION_KUBECONFIG_DIR}/kubeconfig" +GAR_REPO_NAME = "us-west1-docker.pkg.dev/aptos-global/aptos-internal" + @dataclass class RunResult: @@ -631,16 +636,15 @@ def run(self, context: ForgeContext) -> ForgeResult: class K8sForgeRunner(ForgeRunner): - def run(self, context: ForgeContext) -> ForgeResult: - forge_pod_name = sanitize_forge_resource_name( - f"{context.forge_namespace}-{context.time.epoch()}-{context.image_tag}" - ) + def delete_forge_runner_pod(self, context: ForgeContext): + log.info(f"Deleting forge pod for namespace {context.forge_namespace}") assert context.forge_cluster.kubeconf is not None, "kubeconf is required" context.shell.run( [ "kubectl", "--kubeconfig", context.forge_cluster.kubeconf, + *context.forge_cluster.kubectl_create_context_arg, "delete", "pod", "-n", @@ -664,6 +668,16 @@ def run(self, context: ForgeContext) -> ForgeResult: f"forge-namespace={context.forge_namespace}", ] ) + + def run(self, context: ForgeContext) -> ForgeResult: + forge_pod_name = sanitize_forge_resource_name( + f"{context.forge_namespace}-{context.time.epoch()}-{context.image_tag}", + max_length=52 if context.forge_cluster.is_multiregion else 63, + ) + assert context.forge_cluster.kubeconf is not None, "kubeconf is required" + + self.delete_forge_runner_pod(context) + if context.filesystem.exists(FORGE_TEST_RUNNER_TEMPLATE_PATH): template = context.filesystem.read(FORGE_TEST_RUNNER_TEMPLATE_PATH) else: @@ -676,14 +690,12 @@ def run(self, context: ForgeContext) -> ForgeResult: # determine the interal image repos based on the context of where the cluster is located if context.cloud == Cloud.AWS: - forge_image_repo = f"{context.aws_account_num}.dkr.ecr.{context.aws_region}.amazonaws.com/aptos/forge" + forge_image_full = f"{context.aws_account_num}.dkr.ecr.{context.aws_region}.amazonaws.com/{ECR_REPO_PREFIX}/forge:{context.forge_image_tag}" validator_node_selector = "eks.amazonaws.com/nodegroup: validators" elif ( context.cloud == Cloud.GCP ): # the GCP project for images is separate than the cluster - forge_image_repo = ( - f"us-west1-docker.pkg.dev/aptos-global/aptos-internal/forge" - ) + forge_image_full = f"{GAR_REPO_NAME}/forge:{context.forge_image_tag}" validator_node_selector = "" # no selector # TODO: also no NAP node selector yet # TODO: also registries need to be set up such that the default compute service account can access it: $PROJECT_ID-compute@developer.gserviceaccount.com @@ -695,11 +707,13 @@ def run(self, context: ForgeContext) -> ForgeResult: FORGE_IMAGE_TAG=context.forge_image_tag, IMAGE_TAG=context.image_tag, UPGRADE_IMAGE_TAG=context.upgrade_image_tag, - FORGE_IMAGE_REPO=forge_image_repo, + FORGE_IMAGE=forge_image_full, FORGE_NAMESPACE=context.forge_namespace, FORGE_ARGS=" ".join(context.forge_args), FORGE_TRIGGERED_BY=forge_triggered_by, VALIDATOR_NODE_SELECTOR=validator_node_selector, + KUBECONFIG=MULTIREGION_KUBECONFIG_PATH, + MULTIREGION_KUBECONFIG_DIR=MULTIREGION_KUBECONFIG_DIR, ) with ForgeResult.with_context(context) as forge_result: @@ -710,6 +724,7 @@ def run(self, context: ForgeContext) -> ForgeResult: "kubectl", "--kubeconfig", context.forge_cluster.kubeconf, + *context.forge_cluster.kubectl_create_context_arg, "apply", "-n", "default", @@ -796,6 +811,9 @@ def run(self, context: ForgeContext) -> ForgeResult: forge_result.set_state(state) + # cleanup the pod manually + self.delete_forge_runner_pod(context) + return forge_result @@ -854,6 +872,7 @@ def find_recent_images_by_profile_or_features( num_images: int, enable_failpoints: Optional[bool], enable_performance_profile: Optional[bool], + cloud: Cloud = Cloud.GCP, ) -> Sequence[str]: image_tag_prefix = "" if enable_failpoints and enable_performance_profile: @@ -872,6 +891,7 @@ def find_recent_images_by_profile_or_features( num_images, image_name=VALIDATOR_TESTING_IMAGE_NAME, image_tag_prefixes=[image_tag_prefix], + cloud=cloud, ) @@ -882,6 +902,7 @@ def find_recent_images( image_name: str, image_tag_prefixes: List[str] = [""], commit_threshold: int = 100, + cloud: Cloud = Cloud.GCP, ) -> Sequence[str]: """ Find the last `num_images` images built from the current git repo by searching the git commit history @@ -903,7 +924,7 @@ def find_recent_images( temp_ret = [] # count variants for this revision for prefix in image_tag_prefixes: image_tag = f"{prefix}{revision}" - exists = image_exists(shell, image_name, image_tag) + exists = image_exists(shell, image_name, image_tag, cloud=cloud) if exists: temp_ret.append(image_tag) if len(temp_ret) >= num_variants: @@ -918,26 +939,46 @@ def find_recent_images( return ret -def image_exists(shell: Shell, image_name: str, image_tag: str) -> bool: - result = shell.run( - [ - "aws", - "ecr", - "describe-images", - "--repository-name", - f"{image_name}", - "--image-ids", - f"imageTag={image_tag}", - ] - ) - return result.exit_code == 0 +def image_exists( + shell: Shell, + image_name: str, + image_tag: str, + cloud: Cloud = Cloud.GCP, +) -> bool: + """Check if an image exists in a given repository""" + if cloud == Cloud.GCP: + full_image = f"{GAR_REPO_NAME}/{image_name}:{image_tag}" + return shell.run( + [ + "crane", + "manifest", + full_image, + ], + stream_output=True, + ).succeeded() + elif cloud == Cloud.AWS: + full_image = f"{ECR_REPO_PREFIX}/{image_name}:{image_tag}" + log.info(f"Checking if image exists in GCP: {full_image}") + return shell.run( + [ + "aws", + "ecr", + "describe-images", + "--repository-name", + f"{ECR_REPO_PREFIX}/{image_name}", + "--image-ids", + f"imageTag={image_tag}", + ], + stream_output=True, + ).succeeded() + else: + raise Exception(f"Unknown cloud repo type: {cloud}") -def sanitize_forge_resource_name(forge_resource: str) -> str: +def sanitize_forge_resource_name(forge_resource: str, max_length: int = 63) -> str: """ Sanitize the intended forge resource name to be a valid k8s resource name """ - max_length = 63 sanitized_namespace = "" for i, c in enumerate(forge_resource): if i >= max_length: @@ -1299,13 +1340,14 @@ def test( else: cloud_enum = Cloud.GCP - if forge_cluster_name == "multiregion": + if forge_cluster_name == "forge-multiregion": log.info("Using multiregion cluster") forge_cluster = ForgeCluster( name=forge_cluster_name, cloud=Cloud.GCP, region="multiregion", kubeconf=context.filesystem.mkstemp(), + is_multiregion=True, ) else: log.info( @@ -1340,6 +1382,7 @@ def test( 2, enable_failpoints=enable_failpoints, enable_performance_profile=enable_performance_profile, + cloud=cloud_enum, ) ) # This might not work as intended because we dont know if that revision @@ -1356,6 +1399,7 @@ def test( 1, enable_failpoints=enable_failpoints, enable_performance_profile=enable_performance_profile, + cloud=cloud_enum, )[0] image_tag = image_tag or default_latest_image @@ -1380,13 +1424,13 @@ def test( # finally, whether we've derived the image tags or used the user-inputted ones, check if they exist assert image_exists( - shell, VALIDATOR_TESTING_IMAGE_NAME, image_tag + shell, VALIDATOR_TESTING_IMAGE_NAME, image_tag, cloud=cloud_enum ), f"swarm (validator) image does not exist: {image_tag}" assert image_exists( - shell, VALIDATOR_TESTING_IMAGE_NAME, upgrade_image_tag + shell, VALIDATOR_TESTING_IMAGE_NAME, upgrade_image_tag, cloud=cloud_enum ), f"swarm upgrade (validator) image does not exist: {upgrade_image_tag}" assert image_exists( - shell, FORGE_IMAGE_NAME, forge_image_tag + shell, FORGE_IMAGE_NAME, forge_image_tag, cloud=cloud_enum ), f"forge (test runner) image does not exist: {forge_image_tag}" forge_args = create_forge_command( diff --git a/testsuite/forge/Cargo.toml b/testsuite/forge/Cargo.toml index 1d25cafdcce8e..a19789c5b50de 100644 --- a/testsuite/forge/Cargo.toml +++ b/testsuite/forge/Cargo.toml @@ -29,6 +29,7 @@ aptos-rest-client = { workspace = true } aptos-retrier = { workspace = true } aptos-sdk = { workspace = true } aptos-secure-storage = { workspace = true } +aptos-short-hex-str = { workspace = true } aptos-state-sync-driver = { workspace = true } aptos-transaction-emitter-lib = { workspace = true } aptos-transaction-generator-lib = { workspace = true } @@ -41,8 +42,14 @@ hyper = { workspace = true } hyper-tls = { workspace = true } itertools = { workspace = true } json-patch = { workspace = true } -k8s-openapi = { version = "0.13.1", default-features = false, features = ["v1_22"] } -kube = { version = "0.65.0", default-features = false, features = ["jsonpatch", "client", "rustls-tls"] } +k8s-openapi = { version = "0.13.1", default-features = false, features = [ + "v1_22", +] } +kube = { version = "0.65.0", default-features = false, features = [ + "jsonpatch", + "client", + "rustls-tls", +] } num_cpus = { workspace = true } once_cell = { workspace = true } prometheus-http-query = { workspace = true } @@ -50,7 +57,7 @@ rand = { workspace = true } rayon = { workspace = true } regex = { workspace = true } reqwest = { workspace = true } -serde ={ workspace = true } +serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } structopt = { workspace = true } diff --git a/testsuite/forge/src/backend/k8s/chaos.rs b/testsuite/forge/src/backend/k8s/chaos.rs index b07b2a3a6a08d..96723e013402f 100644 --- a/testsuite/forge/src/backend/k8s/chaos.rs +++ b/testsuite/forge/src/backend/k8s/chaos.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - dump_string_to_file, K8sSwarm, Result, Swarm, SwarmChaos, SwarmNetworkBandwidth, - SwarmNetworkDelay, SwarmNetworkLoss, SwarmNetworkPartition, KUBECTL_BIN, + dump_string_to_file, K8sSwarm, Result, Swarm, SwarmChaos, SwarmCpuStress, SwarmNetEm, + SwarmNetworkBandwidth, SwarmNetworkDelay, SwarmNetworkLoss, SwarmNetworkPartition, KUBECTL_BIN, }; use anyhow::bail; use aptos_logger::info; @@ -32,6 +32,18 @@ macro_rules! NETWORK_LOSS_CHAOS_TEMPLATE { }; } +macro_rules! NETEM_CHAOS_TEMPLATE { + () => { + "chaos/netem.yaml" + }; +} + +macro_rules! CPU_STRESS_CHAOS_TEMPLATE { + () => { + "chaos/cpu_stress.yaml" + }; +} + impl K8sSwarm { /// Injects the SwarmChaos into the specified namespace pub fn inject_swarm_chaos(&self, chaos: &SwarmChaos) -> Result<()> { @@ -166,12 +178,96 @@ impl K8sSwarm { )) } + fn create_netem_template(&self, swarm_netem: &SwarmNetEm) -> Result { + let mut network_chaos_specs = vec![]; + + for group_netem in &swarm_netem.group_netems { + let source_instance_labels = group_netem + .source_nodes + .iter() + .map(|node| { + if let Some(v) = self.validator(*node) { + v.name() + } else { + "invalid-node" + } + }) + .collect::>() + .join(","); + + let target_instance_labels = group_netem + .target_nodes + .iter() + .map(|node| { + if let Some(v) = self.validator(*node) { + v.name() + } else { + "invalid-node" + } + }) + .collect::>() + .join(","); + + network_chaos_specs.push(format!( + include_str!(NETEM_CHAOS_TEMPLATE!()), + name = &group_netem.name, + namespace = self.kube_namespace, + delay_latency_ms = group_netem.delay_latency_ms, + delay_jitter_ms = group_netem.delay_jitter_ms, + delay_correlation_percentage = group_netem.delay_correlation_percentage, + loss_percentage = group_netem.loss_percentage, + loss_correlation_percentage = group_netem.loss_correlation_percentage, + instance_labels = &source_instance_labels, + target_instance_labels = &target_instance_labels, + rate = group_netem.rate_in_mbps, + )); + } + + Ok(network_chaos_specs.join("\n---\n")) + } + + /// Creates the CPU stress template, which can be used to inject CPU stress into a pod. + /// This can be used to simulate nodes with different available CPU resource even though the + /// nodes have identical hardware. For example, a node with 4 cores can be simulated as a node + /// with 2 cores by setting num_workers to 2. + fn create_cpu_stress_template(&self, swarm_cpu_stress: &SwarmCpuStress) -> Result { + let mut cpu_stress_specs = vec![]; + + for group_cpu_stress in &swarm_cpu_stress.group_cpu_stresses { + let instance_labels = group_cpu_stress + .target_nodes + .iter() + .map(|node| { + if let Some(v) = self.validator(*node) { + v.name() + } else { + "invalid-node" + } + }) + .collect::>() + .join(","); + + cpu_stress_specs.push(format!( + include_str!(CPU_STRESS_CHAOS_TEMPLATE!()), + name = &group_cpu_stress.name, + namespace = self.kube_namespace, + num_workers = group_cpu_stress.num_workers, + load_per_worker = group_cpu_stress.load_per_worker, + instance_labels = &instance_labels, + )); + } + + Ok(cpu_stress_specs.join("\n---\n")) + } + fn create_chaos_template(&self, chaos: &SwarmChaos) -> Result { match chaos { SwarmChaos::Delay(c) => self.create_network_delay_template(c), SwarmChaos::Partition(c) => self.create_network_partition_template(c), SwarmChaos::Bandwidth(c) => self.create_network_bandwidth_template(c), SwarmChaos::Loss(c) => self.create_network_loss_template(c), + SwarmChaos::NetEm(c) => self.create_netem_template(c), + SwarmChaos::CpuStress(c) => self.create_cpu_stress_template(c), } } diff --git a/testsuite/forge/src/backend/k8s/chaos/cpu_stress.yaml b/testsuite/forge/src/backend/k8s/chaos/cpu_stress.yaml new file mode 100644 index 0000000000000..64d771ce5a0ee --- /dev/null +++ b/testsuite/forge/src/backend/k8s/chaos/cpu_stress.yaml @@ -0,0 +1,16 @@ +apiVersion: chaos-mesh.org/v1alpha1 +kind: StressChaos +metadata: + namespace: {namespace} + name: {name} +spec: + mode: all + selector: + namespaces: + - {namespace} + expressionSelectors: + - {{ key: app.kubernetes.io/instance, operator: In, values: [{instance_labels}] }} + stressors: + cpu: + workers: {num_workers} + load: {load_per_worker} \ No newline at end of file diff --git a/testsuite/forge/src/backend/k8s/chaos/netem.yaml b/testsuite/forge/src/backend/k8s/chaos/netem.yaml new file mode 100644 index 0000000000000..1957df33f898e --- /dev/null +++ b/testsuite/forge/src/backend/k8s/chaos/netem.yaml @@ -0,0 +1,32 @@ +apiVersion: chaos-mesh.org/v1alpha1 +kind: NetworkChaos +metadata: + namespace: {namespace} + name: {name} +spec: + action: netem + mode: all + selector: + namespaces: + - {namespace} + expressionSelectors: + - {{ key: app.kubernetes.io/instance, operator: In, values: [{instance_labels}] }} + delay: + latency: "{delay_latency_ms}ms" + correlation: "{delay_correlation_percentage}" + jitter: "{delay_jitter_ms}ms" + loss: + loss: "{loss_percentage}" + correlation: "{loss_correlation_percentage}" + bandwidth: + rate: "{rate}mbps" + limit: 20971520 # placeholder value. not supported by tc netem + buffer: 10000 # placeholder value. not supported by tc netem + direction: both + target: + selector: + namespaces: + - {namespace} + expressionSelectors: + - {{ key: app.kubernetes.io/instance, operator: In, values: [{target_instance_labels}] }} + mode: all diff --git a/testsuite/forge/src/backend/k8s/cluster_helper.rs b/testsuite/forge/src/backend/k8s/cluster_helper.rs index 0ade62e0b393d..662aa5e115f70 100644 --- a/testsuite/forge/src/backend/k8s/cluster_helper.rs +++ b/testsuite/forge/src/backend/k8s/cluster_helper.rs @@ -4,8 +4,8 @@ use crate::{ get_fullnodes, get_validators, k8s_wait_genesis_strategy, k8s_wait_nodes_strategy, - nodes_healthcheck, wait_stateful_set, Create, ForgeRunnerMode, GenesisConfigFn, K8sApi, - K8sNode, NodeConfigFn, Result, APTOS_NODE_HELM_CHART_PATH, APTOS_NODE_HELM_RELEASE_NAME, + nodes_healthcheck, wait_stateful_set, ForgeRunnerMode, GenesisConfigFn, K8sApi, K8sNode, + NodeConfigFn, ReadWrite, Result, APTOS_NODE_HELM_CHART_PATH, APTOS_NODE_HELM_RELEASE_NAME, DEFAULT_ROOT_KEY, FORGE_KEY_SEED, FULLNODE_HAPROXY_SERVICE_SUFFIX, FULLNODE_SERVICE_SUFFIX, GENESIS_HELM_CHART_PATH, GENESIS_HELM_RELEASE_NAME, HELM_BIN, KUBECTL_BIN, MANAGEMENT_CONFIGMAP_PREFIX, NAMESPACE_CLEANUP_THRESHOLD_SECS, POD_CLEANUP_THRESHOLD_SECS, @@ -23,7 +23,7 @@ use k8s_openapi::api::{ use kube::{ api::{Api, DeleteParams, ListParams, ObjectMeta, Patch, PatchParams, PostParams}, client::Client as K8sClient, - config::Kubeconfig, + config::{KubeConfigOptions, Kubeconfig}, Config, Error as KubeError, ResourceExt, }; use rand::Rng; @@ -145,10 +145,8 @@ async fn wait_nodes_stateful_set( ) -> Result<()> { // wait for all nodes healthy for node in nodes.values() { - // retry exponentially until 1 min, then every 1 min until ~22 min - let retry_policy = RetryPolicy::exponential(Duration::from_secs(5)) - .with_max_retries(25) - .with_max_delay(Duration::from_secs(60)); + // retry every 10 seconds for 20 minutes + let retry_policy = RetryPolicy::fixed(Duration::from_secs(10)).with_max_retries(120); wait_stateful_set( kube_client, kube_namespace, @@ -197,6 +195,9 @@ pub(crate) async fn delete_k8s_resources(client: K8sClient, kube_namespace: &str let testnet_addons_helm_selector = "app.kubernetes.io/part-of=testnet-addons"; let genesis_helm_selector = "app.kubernetes.io/part-of=aptos-genesis"; + // selector for manually created resources from Forge + let forge_pfn_selector = "app.kubernetes.io/part-of=forge-pfn"; + // delete all deployments and statefulsets // cross this with all the compute resources created by aptos-node helm chart let deployments: Api = Api::namespaced(client.clone(), kube_namespace); @@ -210,6 +211,7 @@ pub(crate) async fn delete_k8s_resources(client: K8sClient, kube_namespace: &str aptos_node_helm_selector, testnet_addons_helm_selector, genesis_helm_selector, + forge_pfn_selector, ] { info!("Deleting k8s resources with selector: {}", selector); delete_k8s_collection(deployments.clone(), "Deployments", selector).await?; @@ -497,6 +499,8 @@ pub async fn check_persistent_volumes( Ok(()) } +/// Installs a testnet in a k8s namespace by first running genesis, and the installing the aptos-nodes via helm +/// Returns the current era, as well as a mapping of validators and fullnodes pub async fn install_testnet_resources( kube_namespace: String, num_validators: usize, @@ -508,7 +512,7 @@ pub async fn install_testnet_resources( enable_haproxy: bool, genesis_helm_config_fn: Option, node_helm_config_fn: Option, -) -> Result<(HashMap, HashMap)> { +) -> Result<(String, HashMap, HashMap)> { let kube_client = create_k8s_client().await?; // get deployment-specific helm values and cache it @@ -597,7 +601,7 @@ pub async fn install_testnet_resources( ) .await?; - Ok((validators, fullnodes)) + Ok((new_era.clone(), validators, fullnodes)) } pub fn construct_node_helm_values( @@ -713,10 +717,27 @@ pub async fn collect_running_nodes( Ok((validators, fullnodes)) } +/// Returns a [Config] object reading the KUBECONFIG environment variable or infering from the +/// environment. Differently from [`Config::infer()`], this will look at the +/// `KUBECONFIG` env var first, and only then infer from the environment. +async fn make_kube_client_config() -> Result { + match Config::from_kubeconfig(&KubeConfigOptions::default()).await { + Ok(config) => Ok(config), + Err(kubeconfig_err) => { + Config::infer() + .await + .map_err(|infer_err| + anyhow::anyhow!("Unable to construct Config. Failed to infer config {:?}. Failed to read KUBECONFIG {:?}", infer_err, kubeconfig_err) + ) + } + } +} + pub async fn create_k8s_client() -> Result { - let mut config = Config::infer().await?; + let mut config = make_kube_client_config().await?; + let cluster_name = Kubeconfig::read() - .map(|k| k.current_context.unwrap()) + .map(|k| k.current_context.unwrap_or_default()) .unwrap_or_else(|_| config.cluster_url.to_string()); config.accept_invalid_certs = true; @@ -797,7 +818,7 @@ enum ApiError { } async fn create_namespace( - namespace_api: Arc>, + namespace_api: Arc>, kube_namespace: String, ) -> Result<(), ApiError> { let kube_namespace_name = kube_namespace.clone(); @@ -1032,36 +1053,7 @@ pub fn make_k8s_label(value: String) -> String { #[cfg(test)] mod tests { use super::*; - use async_trait::async_trait; - use hyper::http::StatusCode; - use kube::error::ErrorResponse; - - struct FailedNamespacesApi { - status_code: u16, - } - - impl FailedNamespacesApi { - fn from_status_code(status_code: u16) -> Self { - FailedNamespacesApi { status_code } - } - } - - #[async_trait] - impl Create for FailedNamespacesApi { - async fn create( - &self, - _pp: &PostParams, - _namespace: &Namespace, - ) -> Result { - let status = StatusCode::from_u16(self.status_code).unwrap(); - Err(KubeError::Api(ErrorResponse { - status: status.to_string(), - code: status.as_u16(), - message: "Failed to create namespace".to_string(), - reason: "Failed to parse error data".into(), - })) - } - } + use crate::FailedNamespacesApi; #[tokio::test] async fn test_create_namespace_final_error() { diff --git a/testsuite/forge/src/backend/k8s/constants.rs b/testsuite/forge/src/backend/k8s/constants.rs index 8781d928bf5da..086ad561d622f 100644 --- a/testsuite/forge/src/backend/k8s/constants.rs +++ b/testsuite/forge/src/backend/k8s/constants.rs @@ -43,3 +43,8 @@ pub const FULLNODE_SERVICE_SUFFIX: &str = "fullnode"; pub const VALIDATOR_HAPROXY_SERVICE_SUFFIX: &str = "validator-lb"; pub const FULLNODE_HAPROXY_SERVICE_SUFFIX: &str = "fullnode-lb"; pub const HAPROXY_SERVICE_SUFFIX: &str = "lb"; + +// kubernetes resource names for validator 0, which may be used for templating +pub const VALIDATOR_0_STATEFUL_SET_NAME: &str = "aptos-node-0-validator"; +pub const VALIDATOR_0_GENESIS_SECRET_PREFIX: &str = "aptos-node-0-genesis"; +pub const VALIDATOR_0_DATA_PERSISTENT_VOLUME_CLAIM_PREFIX: &str = "aptos-node-0-validator"; diff --git a/testsuite/forge/src/backend/k8s/fullnode.rs b/testsuite/forge/src/backend/k8s/fullnode.rs new file mode 100644 index 0000000000000..371cb60807b57 --- /dev/null +++ b/testsuite/forge/src/backend/k8s/fullnode.rs @@ -0,0 +1,759 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + get_stateful_set_image, K8sNode, ReadWrite, Result, Version, REST_API_SERVICE_PORT, + VALIDATOR_0_DATA_PERSISTENT_VOLUME_CLAIM_PREFIX, VALIDATOR_0_GENESIS_SECRET_PREFIX, + VALIDATOR_0_STATEFUL_SET_NAME, +}; +use anyhow::Context; +use aptos_config::{ + config::{ + merge_node_config, ApiConfig, BaseConfig, DiscoveryMethod, ExecutionConfig, NetworkConfig, + NodeConfig, RoleType, WaypointConfig, + }, + network_id::NetworkId, +}; +use aptos_logger::info; +use aptos_sdk::types::PeerId; +use aptos_short_hex_str::AsShortHexStr; +use k8s_openapi::{ + api::{ + apps::v1::{StatefulSet, StatefulSetSpec}, + core::v1::{ + ConfigMap, ConfigMapVolumeSource, Container, PersistentVolumeClaim, + PersistentVolumeClaimSpec, PodSpec, PodTemplateSpec, ResourceRequirements, + SecretVolumeSource, Service, ServicePort, ServiceSpec, Volume, VolumeMount, + }, + }, + apimachinery::pkg::apis::meta::v1::LabelSelector, +}; +use kube::api::{ObjectMeta, PostParams}; +use std::{ + collections::BTreeMap, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + path::PathBuf, + sync::Arc, +}; +use tempfile::TempDir; + +// these are constants given by the aptos-node helm chart +// see terraform/helm/aptos-node/templates/validator.yaml + +// the name of the NodeConfig for the PFN, as well as the key in the k8s ConfigMap +// where the NodeConfig is stored +const FULLNODE_CONFIG_MAP_KEY: &str = "fullnode.yaml"; + +// the path where the genesis is mounted in the validator +const GENESIS_CONFIG_VOLUME_NAME: &str = "genesis-config"; +const GENESIS_CONFIG_VOLUME_PATH: &str = "/opt/aptos/genesis"; + +// the path where the config file is mounted in the fullnode +const APTOS_CONFIG_VOLUME_NAME: &str = "aptos-config"; +const APTOS_CONFIG_VOLUME_PATH: &str = "/opt/aptos/etc"; + +// the path where the data volume is mounted in the fullnode +const APTOS_DATA_VOLUME_NAME: &str = "aptos-data"; +const APTOS_DATA_VOLUME_PATH: &str = "/opt/aptos/data"; + +/// Derive the fullnode image from the validator image. They will share the same image repo (validator), but not necessarily the version (image tag) +fn get_fullnode_image_from_validator_image( + validator_stateful_set: &StatefulSet, + version: &Version, +) -> Result { + let fullnode_kube_image = get_stateful_set_image(validator_stateful_set)?; + let fullnode_image_repo = fullnode_kube_image.name; + + // fullnode uses the validator image, with a different image tag + Ok(format!("{}:{}", fullnode_image_repo, version)) +} + +/// Create a ConfigMap with the given NodeConfig, with a constant key +async fn create_node_config_configmap( + node_config_config_map_name: String, + node_config: &NodeConfig, +) -> Result { + let mut data: BTreeMap = BTreeMap::new(); + data.insert( + FULLNODE_CONFIG_MAP_KEY.to_string(), + serde_yaml::to_string(&node_config)?, + ); + let node_config_config_map = ConfigMap { + binary_data: None, + data: Some(data.clone()), + metadata: ObjectMeta { + name: Some(node_config_config_map_name), + ..ObjectMeta::default() + }, + immutable: None, + }; + Ok(node_config_config_map) +} + +/// Create a PFN data volume by using the validator data volume as a template +fn create_fullnode_persistent_volume_claim( + validator_data_volume: PersistentVolumeClaim, +) -> Result { + let volume_requests = validator_data_volume + .spec + .as_ref() + .expect("Could not get volume spec from validator data volume") + .resources + .as_ref() + .expect("Could not get volume resources from validator data volume") + .requests + .clone(); + + Ok(PersistentVolumeClaim { + metadata: ObjectMeta { + name: Some(APTOS_DATA_VOLUME_NAME.to_string()), + ..ObjectMeta::default() + }, + spec: Some(PersistentVolumeClaimSpec { + access_modes: Some(vec!["ReadWriteOnce".to_string()]), + resources: Some(ResourceRequirements { + requests: volume_requests, + ..ResourceRequirements::default() + }), + ..PersistentVolumeClaimSpec::default() + }), + ..PersistentVolumeClaim::default() + }) +} + +fn create_fullnode_labels(fullnode_name: String) -> BTreeMap { + [ + ("app.kubernetes.io/name".to_string(), "fullnode".to_string()), + ("app.kubernetes.io/instance".to_string(), fullnode_name), + ( + "app.kubernetes.io/part-of".to_string(), + "forge-pfn".to_string(), + ), + ] + .iter() + .cloned() + .collect() +} + +fn create_fullnode_service(fullnode_name: String) -> Result { + Ok(Service { + metadata: ObjectMeta { + name: Some(fullnode_name.clone()), + ..ObjectMeta::default() + }, + spec: Some(ServiceSpec { + selector: Some(create_fullnode_labels(fullnode_name)), + // for now, only expose the REST API + ports: Some(vec![ServicePort { + port: REST_API_SERVICE_PORT as i32, + ..ServicePort::default() + }]), + ..ServiceSpec::default() + }), + ..Service::default() + }) +} + +fn create_fullnode_container( + fullnode_image: String, + validator_container: &Container, +) -> Result { + Ok(Container { + image: Some(fullnode_image), + command: Some(vec![ + "/usr/local/bin/aptos-node".to_string(), + "-f".to_string(), + format!("/opt/aptos/etc/{}", FULLNODE_CONFIG_MAP_KEY), + ]), + volume_mounts: Some(vec![ + VolumeMount { + mount_path: APTOS_CONFIG_VOLUME_PATH.to_string(), + name: APTOS_CONFIG_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + mount_path: APTOS_DATA_VOLUME_PATH.to_string(), + name: APTOS_DATA_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + mount_path: GENESIS_CONFIG_VOLUME_PATH.to_string(), + name: GENESIS_CONFIG_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }, + ]), + // specifically, inherit resources, env,ports, securityContext from the validator's container + ..validator_container.clone() + }) +} + +fn create_fullnode_volumes( + fullnode_genesis_secret_name: String, + fullnode_node_config_config_map_name: String, +) -> Vec { + vec![ + Volume { + name: GENESIS_CONFIG_VOLUME_NAME.to_string(), + secret: Some(SecretVolumeSource { + secret_name: Some(fullnode_genesis_secret_name), + ..SecretVolumeSource::default() + }), + ..Volume::default() + }, + Volume { + name: APTOS_CONFIG_VOLUME_NAME.to_string(), + config_map: Some(ConfigMapVolumeSource { + name: Some(fullnode_node_config_config_map_name), + ..ConfigMapVolumeSource::default() + }), + ..Volume::default() + }, + ] +} + +/// Create a fullnode StatefulSet given some templates from the validator +fn create_fullnode_stateful_set( + fullnode_name: String, + fullnode_image: String, + fullnode_genesis_secret_name: String, + fullnode_node_config_config_map_name: String, + validator_stateful_set: StatefulSet, + validator_data_volume: PersistentVolumeClaim, +) -> Result { + // extract some useful structs from the validator + let validator_stateful_set_spec = validator_stateful_set + .spec + .as_ref() + .context("Validator StatefulSet does not have spec")? + .clone(); + let validator_stateful_set_pod_spec = validator_stateful_set_spec + .template + .spec + .as_ref() + .context("Validator StatefulSet does not have spec.template.spec")? + .clone(); + + let validator_container = validator_stateful_set_pod_spec + .containers + .first() + .context("Validator StatefulSet does not have any containers")?; + + // common labels + let labels_map: BTreeMap = create_fullnode_labels(fullnode_name.clone()); + + // create the fullnode data volume + let data_volume = create_fullnode_persistent_volume_claim(validator_data_volume)?; + + // create the fullnode container + let fullnode_container = create_fullnode_container(fullnode_image, validator_container)?; + + // create the fullnode volumes + let fullnode_volumes = create_fullnode_volumes( + fullnode_genesis_secret_name, + fullnode_node_config_config_map_name, + ); + + // build the fullnode stateful set + let mut fullnode_stateful_set = StatefulSet::default(); + fullnode_stateful_set.metadata.name = Some(fullnode_name.clone()); + fullnode_stateful_set.metadata.labels = Some(labels_map.clone()); + fullnode_stateful_set.spec = Some(StatefulSetSpec { + service_name: fullnode_name, // the name of the service is the same as that of the fullnode + selector: LabelSelector { + match_labels: Some(labels_map.clone()), + ..LabelSelector::default() + }, + volume_claim_templates: Some(vec![data_volume]), // a PVC that is created directly by the StatefulSet, and owned by it + template: PodTemplateSpec { + metadata: Some(ObjectMeta { + labels: Some(labels_map), + ..ObjectMeta::default() + }), + spec: Some(PodSpec { + containers: vec![fullnode_container], + volumes: Some(fullnode_volumes), + // specifically, inherit nodeSelector, affinity, tolerations, securityContext, serviceAccountName from the validator's PodSpec + ..validator_stateful_set_pod_spec.clone() + }), + }, + ..validator_stateful_set_spec + }); + Ok(fullnode_stateful_set) +} + +/// Create a default PFN NodeConfig that uses the genesis, waypoint, and data paths expected in k8s +pub fn get_default_pfn_node_config() -> NodeConfig { + let mut waypoint_path = PathBuf::from(GENESIS_CONFIG_VOLUME_PATH); + waypoint_path.push("waypoint.txt"); + + let mut genesis_path = PathBuf::from(GENESIS_CONFIG_VOLUME_PATH); + genesis_path.push("genesis.blob"); + + NodeConfig { + base: BaseConfig { + role: RoleType::FullNode, + data_dir: PathBuf::from(APTOS_DATA_VOLUME_PATH), + waypoint: WaypointConfig::FromFile(waypoint_path), + ..BaseConfig::default() + }, + execution: ExecutionConfig { + genesis_file_location: genesis_path, + ..ExecutionConfig::default() + }, + full_node_networks: vec![NetworkConfig { + network_id: NetworkId::Public, + discovery_method: DiscoveryMethod::Onchain, + // defaults to listening on "/ip4/0.0.0.0/tcp/6180" + ..NetworkConfig::default() + }], + api: ApiConfig { + // API defaults to listening on "127.0.0.1:8080". Override with 0.0.0.0:8080 + address: SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(0, 0, 0, 0), + REST_API_SERVICE_PORT as u16, + )), + ..ApiConfig::default() + }, + ..NodeConfig::default() + } +} + +/// Create a PFN stateful set workload +/// This function assumes that the swarm has already been set up (e.g. there are already validators running) as it borrows +/// some artifacts such as genesis from the 0th validator +/// The given NodeConfig will be merged with the default PFN NodeConfig for Forge +pub async fn install_public_fullnode<'a>( + stateful_set_api: Arc>, + configmap_api: Arc>, + persistent_volume_claim_api: Arc>, + service_api: Arc>, + version: &'a Version, + node_config: &'a NodeConfig, + era: String, + namespace: String, + use_port_forward: bool, +) -> Result<(PeerId, K8sNode)> { + let default_node_config = get_default_pfn_node_config(); + + let merged_node_config = + merge_node_config(default_node_config, serde_yaml::to_value(node_config)?)?; + + let node_peer_id = node_config.get_peer_id().unwrap_or_else(PeerId::random); + let fullnode_name = format!("fullnode-{}", node_peer_id.short_str()); + + // create the NodeConfig configmap + let fullnode_node_config_config_map_name = format!("{}-config", fullnode_name.clone()); + let fullnode_node_config_config_map = create_node_config_configmap( + fullnode_node_config_config_map_name.clone(), + &merged_node_config, + ) + .await?; + configmap_api + .create(&PostParams::default(), &fullnode_node_config_config_map) + .await?; + + // assume that the validator workload (val0) has already been created (not necessarily running yet) + // get its spec so we can inherit some of its properties + let validator_stateful_set = stateful_set_api.get(VALIDATOR_0_STATEFUL_SET_NAME).await?; + + // get the fullnode image + let fullnode_image_full = + get_fullnode_image_from_validator_image(&validator_stateful_set, version)?; + + // borrow genesis secret from the first validator. it follows this naming convention + let fullnode_genesis_secret_name = format!("{}-e{}", VALIDATOR_0_GENESIS_SECRET_PREFIX, era); + let validator_data_persistent_volume_claim_name = format!( + "{}-e{}", + VALIDATOR_0_DATA_PERSISTENT_VOLUME_CLAIM_PREFIX, era + ); + + // create the data volume + let validator_data_volume = persistent_volume_claim_api + .get(validator_data_persistent_volume_claim_name.as_str()) + .await + .map_err(|e| { + anyhow::anyhow!( + "Could not get validator data volume to inherit from {:?}: {:?}", + validator_data_persistent_volume_claim_name, + e + ) + })?; + + let fullnode_stateful_set = create_fullnode_stateful_set( + fullnode_name.clone(), + fullnode_image_full, + fullnode_genesis_secret_name, + fullnode_node_config_config_map_name, + validator_stateful_set, + validator_data_volume, + )?; + + // check that all the labels are the same + let fullnode_metadata_labels = fullnode_stateful_set + .metadata + .labels + .as_ref() + .context("Validator StatefulSet does not have metadata.labels")?; + let fullnode_spec_selector_match_labels = fullnode_stateful_set + .spec + .as_ref() + .context("Validator StatefulSet does not have spec")? + .selector + .match_labels + .as_ref() + .context("Validator StatefulSet does not have spec.selector.match_labels")?; + let fullnode_spec_template_metadata_labels = fullnode_stateful_set + .spec + .as_ref() + .context("Validator StatefulSet does not have spec")? + .template + .metadata + .as_ref() + .context("Validator StatefulSet does not have spec.template.metadata")? + .labels + .as_ref() + .context("Validator StatefulSet does not have spec.template.metadata.labels")?; + + let labels = [ + fullnode_metadata_labels, + fullnode_spec_selector_match_labels, + fullnode_spec_template_metadata_labels, + ]; + for label1 in labels.into_iter() { + for label2 in labels.into_iter() { + assert_eq!(label1, label2); + } + } + + let fullnode_service = create_fullnode_service(fullnode_name.clone())?; + + // write the spec to file + let tmp_dir = TempDir::new().expect("Could not create temp dir"); + let fullnode_config_path = tmp_dir.path().join("fullnode.yaml"); + let fullnode_config_file = std::fs::File::create(&fullnode_config_path) + .with_context(|| format!("Could not create file {:?}", fullnode_config_path))?; + serde_yaml::to_writer(fullnode_config_file, &fullnode_stateful_set)?; + + let fullnode_service_path = tmp_dir.path().join("service.yaml"); + let fullnode_service_file = std::fs::File::create(&fullnode_service_path) + .with_context(|| format!("Could not create file {:?}", fullnode_service_path))?; + serde_yaml::to_writer(fullnode_service_file, &fullnode_service)?; + info!("Wrote fullnode k8s specs to path: {:?}", &tmp_dir); + + // create the StatefulSet + let sts = stateful_set_api + .create(&PostParams::default(), &fullnode_stateful_set) + .await?; + let fullnode_stateful_set_str = serde_yaml::to_string(&fullnode_stateful_set)?; + info!( + "Created fullnode StatefulSet:\n---{}\n---", + &fullnode_stateful_set_str + ); + // and its service + service_api + .create(&PostParams::default(), &fullnode_service) + .await?; + let fullnode_service_str = serde_yaml::to_string(&fullnode_service)?; + info!( + "Created fullnode Service:\n---{}\n---", + fullnode_service_str + ); + + let service_name = &fullnode_service + .metadata + .name + .context("Fullnode Service does not have metadata.name")?; + + let full_service_name = format!("{}.{}.svc", service_name, &namespace); // this is the full name that includes the namespace + + // Append the cluster name if its a multi-cluster deployment + let full_service_name = if let Some(target_cluster_name) = sts + .metadata + .labels + .as_ref() + .and_then(|labels| labels.get("multicluster/targetcluster")) + { + format!("{}.{}", &full_service_name, &target_cluster_name) + } else { + full_service_name + }; + + let ret_node = K8sNode { + name: fullnode_name.clone(), + stateful_set_name: fullnode_stateful_set + .metadata + .name + .context("Fullnode StatefulSet does not have metadata.name")?, + peer_id: node_peer_id, + index: 0, + service_name: full_service_name, + version: version.clone(), + namespace, + haproxy_enabled: false, + + port_forward_enabled: use_port_forward, + rest_api_port: REST_API_SERVICE_PORT, // in the case of port-forward, this port will be changed at runtime + }; + + Ok((node_peer_id, ret_node)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + MockConfigMapApi, MockPersistentVolumeClaimApi, MockServiceApi, MockStatefulSetApi, + }; + use aptos_config::config::Identity; + use aptos_sdk::crypto::{x25519::PrivateKey, Uniform}; + use k8s_openapi::apimachinery::pkg::api::resource::Quantity; + + /// Get a dummy validator persistent volume claim that looks like one created by terraform/helm/aptos-node/templates/validator.yaml + fn get_dummy_validator_persistent_volume_claim() -> PersistentVolumeClaim { + PersistentVolumeClaim { + metadata: ObjectMeta { + name: Some("aptos-node-0-validator-e42069".to_string()), + ..ObjectMeta::default() + }, + spec: Some(PersistentVolumeClaimSpec { + access_modes: Some(vec!["ReadWriteOnce".to_string()]), + resources: Some(ResourceRequirements { + requests: Some( + [ + ("storage".to_string(), Quantity("1Gi".to_string())), + ("storage2".to_string(), Quantity("2Gi".to_string())), + ] + .iter() + .cloned() + .collect(), + ), + ..ResourceRequirements::default() + }), + ..PersistentVolumeClaimSpec::default() + }), + ..PersistentVolumeClaim::default() + } + } + + /// Get a dummy validator stateful set that looks like one created by terraform/helm/aptos-node/templates/validator.yaml + fn get_dummy_validator_stateful_set() -> StatefulSet { + let labels: BTreeMap = [ + ( + "app.kubernetes.io/name".to_string(), + "validator".to_string(), + ), + ( + "app.kubernetes.io/instance".to_string(), + "aptos-node-0-validator-0".to_string(), + ), + ( + "app.kubernetes.io/part-of".to_string(), + "forge-pfn".to_string(), + ), + ] + .iter() + .cloned() + .collect(); + StatefulSet { + metadata: ObjectMeta { + name: Some("aptos-node-0-validator".to_string()), + labels: Some(labels.clone()), + ..ObjectMeta::default() + }, + spec: Some(StatefulSetSpec { + replicas: Some(1), + template: PodTemplateSpec { + metadata: Some(ObjectMeta { + labels: Some(labels), + ..ObjectMeta::default() + }), + spec: Some(PodSpec { + containers: vec![Container { + name: "validator".to_string(), + image: Some( + "banana.fruit.aptos/potato/validator:banana_image_tag".to_string(), + ), + command: Some(vec![ + "/usr/local/bin/aptos-node".to_string(), + "-f".to_string(), + "/opt/aptos/etc/validator.yaml".to_string(), + ]), + volume_mounts: Some(vec![ + VolumeMount { + mount_path: APTOS_CONFIG_VOLUME_PATH.to_string(), + name: APTOS_CONFIG_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + mount_path: APTOS_DATA_VOLUME_PATH.to_string(), + name: APTOS_DATA_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + mount_path: GENESIS_CONFIG_VOLUME_PATH.to_string(), + name: GENESIS_CONFIG_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }, + ]), + ..Container::default() + }], + ..PodSpec::default() + }), + }, + ..StatefulSetSpec::default() + }), + ..StatefulSet::default() + } + } + + #[tokio::test] + /// Test that we can create a node config configmap and that it contains the node config at a known data key + async fn test_create_node_config_map() { + let config_map_name = "aptos-node-0-validator-0-config".to_string(); + let node_config = NodeConfig::default(); + + // expect that the one we get is the same as the one we created + let created_config_map = + create_node_config_configmap(config_map_name.clone(), &node_config) + .await + .unwrap(); + + let regenerated_node_config = serde_yaml::from_str::( + created_config_map + .data + .unwrap() + .get(FULLNODE_CONFIG_MAP_KEY) + .unwrap(), + ) + .unwrap(); + assert_eq!(regenerated_node_config, node_config); + } + + #[test] + /// Test that we can create a data volume from an existing validator data volume, and that we inherit the resource requests + fn test_create_persistent_volume_claim() { + let requests = Some( + [ + ("storage".to_string(), Quantity("1Gi".to_string())), + ("storage2".to_string(), Quantity("2Gi".to_string())), + ] + .iter() + .cloned() + .collect(), + ); + let pvc = PersistentVolumeClaim { + metadata: ObjectMeta { + name: Some(APTOS_DATA_VOLUME_NAME.to_string()), + ..ObjectMeta::default() + }, + spec: Some(PersistentVolumeClaimSpec { + access_modes: Some(vec!["ReadWriteOnce".to_string()]), + resources: Some(ResourceRequirements { + requests, + ..ResourceRequirements::default() + }), + ..PersistentVolumeClaimSpec::default() + }), + ..PersistentVolumeClaim::default() + }; + let created_pvc = create_fullnode_persistent_volume_claim(pvc.clone()); + + // assert that the resources are the same + assert_eq!( + created_pvc.unwrap().spec.unwrap().resources, + pvc.spec.unwrap().resources + ); + } + + #[test] + /// Test that the created StatefulSet and Service are connected + fn test_create_fullnode_stateful_set_and_service_connected() { + // top level args + let era = 42069; + let peer_id = PeerId::random(); + let fullnode_name = "fullnode-".to_string() + &peer_id.to_string(); // everything should be keyed on this + let fullnode_image = "fruit.com/banana:latest".to_string(); + let fullnode_genesis_secret_name = format!("aptos-node-0-genesis-e{}", era); + let fullnode_node_config_config_map_name = format!("{}-config", fullnode_name); + + let fullnode_stateful_set = create_fullnode_stateful_set( + fullnode_name.clone(), + fullnode_image, + fullnode_genesis_secret_name, + fullnode_node_config_config_map_name, + get_dummy_validator_stateful_set(), + get_dummy_validator_persistent_volume_claim(), + ) + .unwrap(); + + let fullnode_service = create_fullnode_service(fullnode_name.clone()).unwrap(); + + // assert that the StatefulSet has the correct name + assert_eq!( + fullnode_stateful_set.metadata.name, + Some(fullnode_name.clone()) + ); + // assert that the Service has the correct name + assert_eq!(fullnode_service.metadata.name, Some(fullnode_name.clone())); + // assert that the StatefulSet has a serviceName that matches the Service + assert_eq!( + fullnode_stateful_set.spec.unwrap().service_name, + fullnode_name + ); + // assert that the labels in the Service match the StatefulSet + assert_eq!( + fullnode_service.spec.unwrap().selector, + fullnode_stateful_set.metadata.labels + ); + } + + #[tokio::test] + /// Full PFN installation test, checking that the resulting resources created are as expected + async fn test_install_public_fullnode() { + // top level args + let peer_id = PeerId::random(); + let version = Version::new(0, "banana".to_string()); + let _fullnode_name = "fullnode-".to_string() + &peer_id.to_string(); + + // create APIs + let stateful_set_api = Arc::new(MockStatefulSetApi::from_stateful_set( + get_dummy_validator_stateful_set(), + )); + let configmap_api = Arc::new(MockConfigMapApi::from_config_map(ConfigMap::default())); + let persistent_volume_claim_api = + Arc::new(MockPersistentVolumeClaimApi::from_persistent_volume_claim( + get_dummy_validator_persistent_volume_claim(), + )); + let service_api = Arc::new(MockServiceApi::from_service(Service::default())); + + // get the base config and mutate it + let mut node_config = get_default_pfn_node_config(); + node_config.full_node_networks[0].identity = + Identity::from_config(PrivateKey::generate_for_testing(), peer_id); + + let era = "42069".to_string(); + let namespace = "forge42069".to_string(); + + let (created_peer_id, created_node) = install_public_fullnode( + stateful_set_api, + configmap_api, + persistent_volume_claim_api, + service_api, + &version, + &node_config, + era, + namespace, + false, + ) + .await + .unwrap(); + + // assert the created resources match some patterns + assert_eq!(created_peer_id, peer_id); + assert_eq!( + created_node.name, + format!("fullnode-{}", &peer_id.short_str()) + ); + assert!(created_node.name.len() < 64); // This is a k8s limit + } +} diff --git a/testsuite/forge/src/backend/k8s/kube_api.rs b/testsuite/forge/src/backend/k8s/kube_api.rs index 3576fb5ea44cd..3ff3103fdff28 100644 --- a/testsuite/forge/src/backend/k8s/kube_api.rs +++ b/testsuite/forge/src/backend/k8s/kube_api.rs @@ -38,31 +38,324 @@ where } #[async_trait] -pub trait Get: Send + Sync { +pub trait ReadWrite: Send + Sync { async fn get(&self, name: &str) -> Result; -} - -#[async_trait] -pub trait Create: Send + Sync { async fn create(&self, pp: &PostParams, k: &K) -> Result; } +// Implement the traits for K8sApi + #[async_trait] -impl Get for K8sApi +impl ReadWrite for K8sApi where K: k8s_openapi::Resource + Send + Sync + Clone + DeserializeOwned + Serialize + Debug, { async fn get(&self, name: &str) -> Result { self.api.get(name).await } -} -#[async_trait] -impl Create for K8sApi -where - K: k8s_openapi::Resource + Send + Sync + Clone + DeserializeOwned + Serialize + Debug, -{ async fn create(&self, pp: &PostParams, k: &K) -> Result { self.api.create(pp, k).await } } + +#[cfg(test)] +pub mod mocks { + use super::*; + use crate::Result; + use async_trait::async_trait; + use hyper::StatusCode; + use k8s_openapi::api::{ + apps::v1::StatefulSet, + core::v1::{ConfigMap, Namespace, PersistentVolumeClaim, Pod, Secret, Service}, + }; + use kube::{api::PostParams, error::ErrorResponse, Error as KubeError}; + + // Mock StatefulSet API + + pub struct MockStatefulSetApi { + stateful_set: StatefulSet, + } + + impl MockStatefulSetApi { + pub fn from_stateful_set(stateful_set: StatefulSet) -> Self { + MockStatefulSetApi { stateful_set } + } + } + + #[async_trait] + impl ReadWrite for MockStatefulSetApi { + async fn get(&self, name: &str) -> Result { + if self.stateful_set.metadata.name == Some(name.to_string()) { + return Ok(self.stateful_set.clone()); + } + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "StatefulSet with name {} could not be found in {:?}", + name, self.stateful_set + ), + reason: "not_found".to_string(), + code: 404, + })); + } + + async fn create( + &self, + _pp: &PostParams, + stateful_set: &StatefulSet, + ) -> Result { + if self.stateful_set.metadata.name == stateful_set.metadata.name { + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "StatefulSet with same name already exists in {:?}", + self.stateful_set + ), + reason: "already_exists".to_string(), + code: 409, + })); + } + Ok(self.stateful_set.clone()) + } + } + + // Mock Pod API + + pub struct MockPodApi { + pod: Pod, + } + + impl MockPodApi { + pub fn from_pod(pod: Pod) -> Self { + MockPodApi { pod } + } + } + + #[async_trait] + impl ReadWrite for MockPodApi { + async fn get(&self, _name: &str) -> Result { + Ok(self.pod.clone()) + } + + async fn create(&self, _pp: &PostParams, _pod: &Pod) -> Result { + Ok(self.pod.clone()) + } + } + + // Mock ConfigMap API + + pub struct MockConfigMapApi { + config_map: ConfigMap, + } + + impl MockConfigMapApi { + pub fn from_config_map(config_map: ConfigMap) -> Self { + MockConfigMapApi { config_map } + } + } + + #[async_trait] + impl ReadWrite for MockConfigMapApi { + async fn get(&self, name: &str) -> Result { + if self.config_map.metadata.name == Some(name.to_string()) { + return Ok(self.config_map.clone()); + } + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "ConfigMap with name {} could not be found in {:?}", + name, self.config_map + ), + reason: "not_found".to_string(), + code: 404, + })); + } + + async fn create( + &self, + _pp: &PostParams, + config_map: &ConfigMap, + ) -> Result { + if self.config_map.metadata.name == config_map.metadata.name { + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "ConfigMap with same name already exists in {:?}", + self.config_map + ), + reason: "already_exists".to_string(), + code: 409, + })); + } + Ok(self.config_map.clone()) + } + } + + // Mock PersistentVolumeClaim API + + pub struct MockPersistentVolumeClaimApi { + persistent_volume_claim: PersistentVolumeClaim, + } + + impl MockPersistentVolumeClaimApi { + pub fn from_persistent_volume_claim( + persistent_volume_claim: PersistentVolumeClaim, + ) -> Self { + MockPersistentVolumeClaimApi { + persistent_volume_claim, + } + } + } + + #[async_trait] + impl ReadWrite for MockPersistentVolumeClaimApi { + async fn get(&self, name: &str) -> Result { + if self.persistent_volume_claim.metadata.name == Some(name.to_string()) { + return Ok(self.persistent_volume_claim.clone()); + } + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "PersistentVolumeClaim with name {} could not be found in {:?}", + name, self.persistent_volume_claim + ), + reason: "not_found".to_string(), + code: 404, + })); + } + + async fn create( + &self, + _pp: &PostParams, + persistent_volume_claim: &PersistentVolumeClaim, + ) -> Result { + if self.persistent_volume_claim.metadata.name == persistent_volume_claim.metadata.name { + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "PersistentVolumeClaim with same name already exists in {:?}", + self.persistent_volume_claim + ), + reason: "already_exists".to_string(), + code: 409, + })); + } + Ok(self.persistent_volume_claim.clone()) + } + } + + // Mock Service API + + pub struct MockServiceApi { + service: Service, + } + + impl MockServiceApi { + pub fn from_service(service: Service) -> Self { + MockServiceApi { service } + } + } + + #[async_trait] + impl ReadWrite for MockServiceApi { + async fn get(&self, name: &str) -> Result { + if self.service.metadata.name == Some(name.to_string()) { + return Ok(self.service.clone()); + } + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "Service with name {} could not be found in {:?}", + name, self.service + ), + reason: "not_found".to_string(), + code: 404, + })); + } + + async fn create(&self, _pp: &PostParams, service: &Service) -> Result { + if self.service.metadata.name == service.metadata.name { + return Err(KubeError::Api(ErrorResponse { + status: "failed".to_string(), + message: format!( + "Service with same name already exists in {:?}", + self.service + ), + reason: "already_exists".to_string(), + code: 409, + })); + } + Ok(self.service.clone()) + } + } + + // Mock Service API + pub struct MockSecretApi { + secret: Option, + } + + impl MockSecretApi { + pub fn from_secret(secret: Option) -> Self { + MockSecretApi { secret } + } + } + + #[async_trait] + impl ReadWrite for MockSecretApi { + async fn get(&self, _name: &str) -> Result { + match self.secret { + Some(ref s) => Ok(s.clone()), + None => Err(KubeError::Api(ErrorResponse { + status: "status".to_string(), + message: "message".to_string(), + reason: "reason".to_string(), + code: 404, + })), + } + } + + async fn create(&self, _pp: &PostParams, secret: &Secret) -> Result { + return Ok(secret.clone()); + } + } + + // Mock API that always fails to create a new Namespace + + pub struct FailedNamespacesApi { + status_code: u16, + } + + impl FailedNamespacesApi { + pub fn from_status_code(status_code: u16) -> Self { + FailedNamespacesApi { status_code } + } + } + + #[async_trait] + impl ReadWrite for FailedNamespacesApi { + async fn get(&self, _name: &str) -> Result { + let status = StatusCode::from_u16(self.status_code).unwrap(); + Err(KubeError::Api(ErrorResponse { + status: status.to_string(), + code: status.as_u16(), + message: "Failed to get namespace".to_string(), + reason: "Failed to parse error data".into(), + })) + } + + async fn create( + &self, + _pp: &PostParams, + _namespace: &Namespace, + ) -> Result { + let status = StatusCode::from_u16(self.status_code).unwrap(); + Err(KubeError::Api(ErrorResponse { + status: status.to_string(), + code: status.as_u16(), + message: "Failed to create namespace".to_string(), + reason: "Failed to parse error data".into(), + })) + } + } +} diff --git a/testsuite/forge/src/backend/k8s/mod.rs b/testsuite/forge/src/backend/k8s/mod.rs index 929fb00b9ad2f..1f0f0057fd734 100644 --- a/testsuite/forge/src/backend/k8s/mod.rs +++ b/testsuite/forge/src/backend/k8s/mod.rs @@ -11,6 +11,7 @@ use std::{convert::TryInto, num::NonZeroUsize, time::Duration}; pub mod chaos; mod cluster_helper; pub mod constants; +mod fullnode; pub mod kube_api; pub mod node; pub mod prometheus; @@ -20,6 +21,9 @@ mod swarm; use aptos_sdk::crypto::ed25519::ED25519_PRIVATE_KEY_LENGTH; pub use cluster_helper::*; pub use constants::*; +pub use fullnode::*; +#[cfg(test)] +pub use kube_api::mocks::*; pub use kube_api::*; pub use node::K8sNode; pub use stateful_set::*; @@ -111,8 +115,8 @@ impl Factory for K8sFactory { }; let kube_client = create_k8s_client().await?; - let (validators, fullnodes) = if self.reuse { - match collect_running_nodes( + let (new_era, validators, fullnodes) = if self.reuse { + let (validators, fullnodes) = match collect_running_nodes( &kube_client, self.kube_namespace.clone(), self.use_port_forward, @@ -124,7 +128,9 @@ impl Factory for K8sFactory { Err(e) => { bail!(e); }, - } + }; + let new_era = None; // TODO: get the actual era + (new_era, validators, fullnodes) } else { // clear the cluster of resources delete_k8s_resources(kube_client.clone(), &self.kube_namespace).await?; @@ -162,7 +168,7 @@ impl Factory for K8sFactory { ) .await { - Ok(res) => res, + Ok(res) => (Some(res.0), res.1, res.2), Err(e) => { uninstall_testnet_resources(self.kube_namespace.clone()).await?; bail!(e); @@ -178,6 +184,8 @@ impl Factory for K8sFactory { validators, fullnodes, self.keep, + new_era, + self.use_port_forward, ) .await .unwrap(); diff --git a/testsuite/forge/src/backend/k8s/prometheus.rs b/testsuite/forge/src/backend/k8s/prometheus.rs index fd8b67400ae0c..b4ac6e4cdfb75 100644 --- a/testsuite/forge/src/backend/k8s/prometheus.rs +++ b/testsuite/forge/src/backend/k8s/prometheus.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{create_k8s_client, Get, K8sApi, Result}; +use crate::{create_k8s_client, K8sApi, ReadWrite, Result}; use anyhow::bail; use aptos_logger::info; use k8s_openapi::api::core::v1::Secret; @@ -20,7 +20,7 @@ pub async fn get_prometheus_client() -> Result { } async fn create_prometheus_client_from_environment( - secrets_api: Arc>, + secrets_api: Arc>, ) -> Result { let prom_url_env = std::env::var("PROMETHEUS_URL"); let prom_token_env = std::env::var("PROMETHEUS_TOKEN"); @@ -135,40 +135,15 @@ pub async fn query_with_metadata( #[cfg(test)] mod tests { use super::*; - use async_trait::async_trait; + use crate::MockSecretApi; use k8s_openapi::ByteString; - use kube::{api::ObjectMeta, error::ErrorResponse, Error as KubeError}; + use kube::api::ObjectMeta; use prometheus_http_query::Error as PrometheusError; use std::{ env, time::{SystemTime, UNIX_EPOCH}, }; - struct MockSecretApi { - secret: Option, - } - - impl MockSecretApi { - fn from_secret(secret: Option) -> Self { - MockSecretApi { secret } - } - } - - #[async_trait] - impl Get for MockSecretApi { - async fn get(&self, _name: &str) -> Result { - match self.secret { - Some(ref s) => Ok(s.clone()), - None => Err(KubeError::Api(ErrorResponse { - status: "status".to_string(), - message: "message".to_string(), - reason: "reason".to_string(), - code: 404, - })), - } - } - } - #[tokio::test] async fn test_create_client_secret() { let secret_api = Arc::new(MockSecretApi::from_secret(Some(Secret { diff --git a/testsuite/forge/src/backend/k8s/stateful_set.rs b/testsuite/forge/src/backend/k8s/stateful_set.rs index 74cb9d0e770df..46079e451162d 100644 --- a/testsuite/forge/src/backend/k8s/stateful_set.rs +++ b/testsuite/forge/src/backend/k8s/stateful_set.rs @@ -1,11 +1,10 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{create_k8s_client, Get, K8sApi, Result, KUBECTL_BIN}; +use crate::{create_k8s_client, k8s_wait_nodes_strategy, K8sApi, ReadWrite, Result, KUBECTL_BIN}; use again::RetryPolicy; use anyhow::bail; use aptos_logger::info; -use aptos_retrier::ExponentWithLimitDelay; use json_patch::{Patch as JsonPatch, PatchOperation, ReplaceOperation}; use k8s_openapi::api::{apps::v1::StatefulSet, core::v1::Pod}; use kube::{ @@ -86,8 +85,8 @@ pub async fn wait_stateful_set( /// Checks the status of a single K8s StatefulSet. Also inspects the pods to make sure they are all ready. async fn check_stateful_set_status( - stateful_set_api: Arc>, - pod_api: Arc>, + stateful_set_api: Arc>, + pod_api: Arc>, sts_name: &str, desired_replicas: u64, ) -> Result<(), WorkloadScalingError> { @@ -283,81 +282,46 @@ pub async fn check_for_container_restart( kube_namespace: &str, sts_name: &str, ) -> Result<()> { - aptos_retrier::retry_async( - ExponentWithLimitDelay::new(1000, 10 * 1000, 60 * 1000), - || { - let pod_api: Api = Api::namespaced(kube_client.clone(), kube_namespace); - Box::pin(async move { - // Get the StatefulSet's Pod status - let pod_name = format!("{}-0", sts_name); - if let Some(status) = pod_api.get_status(&pod_name).await?.status { - if let Some(container_statuses) = status.container_statuses { - for container_status in container_statuses { - if container_status.restart_count > 0 { - bail!( - "Container {} in pod {} restarted {} times ", - container_status.name, - &pod_name, - container_status.restart_count - ); - } + aptos_retrier::retry_async(k8s_wait_nodes_strategy(), || { + let pod_api: Api = Api::namespaced(kube_client.clone(), kube_namespace); + Box::pin(async move { + // Get the StatefulSet's Pod status + let pod_name = format!("{}-0", sts_name); + if let Some(status) = pod_api.get_status(&pod_name).await?.status { + if let Some(container_statuses) = status.container_statuses { + for container_status in container_statuses { + if container_status.restart_count > 0 { + bail!( + "Container {} in pod {} restarted {} times ", + container_status.name, + &pod_name, + container_status.restart_count + ); } - return Ok(()); } - // In case of no restarts, k8 apis returns no container statuses - Ok(()) - } else { - bail!("Can't query the pod status for {}", sts_name) + return Ok(()); } - }) - }, - ) + // In case of no restarts, k8 apis returns no container statuses + Ok(()) + } else { + bail!("Can't query the pod status for {}", sts_name) + } + }) + }) .await } #[cfg(test)] mod tests { use super::*; - use async_trait::async_trait; - use k8s_openapi::api::{ - apps::v1::{StatefulSet, StatefulSetSpec, StatefulSetStatus}, - core::v1::{ContainerState, ContainerStateWaiting, ContainerStatus, PodStatus}, + use crate::{MockPodApi, MockStatefulSetApi}; + use k8s_openapi::{ + api::{ + apps::v1::{StatefulSet, StatefulSetSpec, StatefulSetStatus}, + core::v1::{ContainerState, ContainerStateWaiting, ContainerStatus, PodStatus}, + }, + apimachinery::pkg::apis::meta::v1::ObjectMeta, }; - use kube::{api::ObjectMeta, Error as KubeError}; - - struct MockStatefulSetApi { - stateful_set: StatefulSet, - } - - impl MockStatefulSetApi { - fn from_stateful_set(stateful_set: StatefulSet) -> Self { - MockStatefulSetApi { stateful_set } - } - } - - #[async_trait] - impl Get for MockStatefulSetApi { - async fn get(&self, _name: &str) -> Result { - Ok(self.stateful_set.clone()) - } - } - - struct MockPodApi { - pod: Pod, - } - - impl MockPodApi { - fn from_pod(pod: Pod) -> Self { - MockPodApi { pod } - } - } - - #[async_trait] - impl Get for MockPodApi { - async fn get(&self, _name: &str) -> Result { - Ok(self.pod.clone()) - } - } #[tokio::test] async fn test_check_stateful_set_status() { diff --git a/testsuite/forge/src/backend/k8s/swarm.rs b/testsuite/forge/src/backend/k8s/swarm.rs index c2b3d2b92d252..1d0dde75f10a5 100644 --- a/testsuite/forge/src/backend/k8s/swarm.rs +++ b/testsuite/forge/src/backend/k8s/swarm.rs @@ -3,25 +3,28 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - check_for_container_restart, create_k8s_client, delete_all_chaos, get_free_port, - get_stateful_set_image, + check_for_container_restart, create_k8s_client, delete_all_chaos, get_default_pfn_node_config, + get_free_port, get_stateful_set_image, install_public_fullnode, interface::system_metrics::{query_prometheus_system_metrics, SystemMetricsThreshold}, node::K8sNode, prometheus::{self, query_with_metadata}, query_sequence_number, set_stateful_set_image_tag, uninstall_testnet_resources, ChainInfo, - FullNode, Node, Result, Swarm, SwarmChaos, Validator, Version, HAPROXY_SERVICE_SUFFIX, + FullNode, K8sApi, Node, Result, Swarm, SwarmChaos, Validator, Version, HAPROXY_SERVICE_SUFFIX, REST_API_HAPROXY_SERVICE_PORT, REST_API_SERVICE_PORT, }; use ::aptos_logger::*; use anyhow::{anyhow, bail, format_err}; use aptos_config::config::NodeConfig; -use aptos_retrier::ExponentWithLimitDelay; +use aptos_retrier::fixed_retry_strategy; use aptos_sdk::{ crypto::ed25519::Ed25519PrivateKey, move_types::account_address::AccountAddress, types::{chain_id::ChainId, AccountKey, LocalAccount, PeerId}, }; -use k8s_openapi::api::apps::v1::StatefulSet; +use k8s_openapi::api::{ + apps::v1::StatefulSet, + core::v1::{ConfigMap, PersistentVolumeClaim, Service}, +}; use kube::{ api::{Api, ListParams}, client::Client as K8sClient, @@ -47,6 +50,8 @@ pub struct K8sSwarm { keep: bool, chaoses: HashSet, prom_client: Option, + era: Option, + use_port_forward: bool, } impl K8sSwarm { @@ -58,6 +63,8 @@ impl K8sSwarm { validators: HashMap, fullnodes: HashMap, keep: bool, + era: Option, + use_port_forward: bool, ) -> Result { let kube_client = create_k8s_client().await?; @@ -99,6 +106,8 @@ impl K8sSwarm { keep, chaoses: HashSet::new(), prom_client, + era, + use_port_forward, }; // test hitting the configured prometheus endpoint @@ -134,6 +143,49 @@ impl K8sSwarm { fn get_kube_client(&self) -> K8sClient { self.kube_client.clone() } + + /// Installs a PFN with the given version and node config + async fn install_public_fullnode_resources<'a>( + &mut self, + version: &'a Version, + node_config: &'a NodeConfig, + ) -> Result<(PeerId, K8sNode)> { + // create APIs + let stateful_set_api: Arc> = Arc::new(K8sApi::::from_client( + self.get_kube_client(), + Some(self.kube_namespace.clone()), + )); + let configmap_api: Arc> = Arc::new(K8sApi::::from_client( + self.get_kube_client(), + Some(self.kube_namespace.clone()), + )); + let persistent_volume_claim_api: Arc> = + Arc::new(K8sApi::::from_client( + self.get_kube_client(), + Some(self.kube_namespace.clone()), + )); + let service_api: Arc> = Arc::new(K8sApi::::from_client( + self.get_kube_client(), + Some(self.kube_namespace.clone()), + )); + let (peer_id, mut k8snode) = install_public_fullnode( + stateful_set_api, + configmap_api, + persistent_volume_claim_api, + service_api, + version, + node_config, + self.era + .as_ref() + .expect("Installing PFN requires acquiring the current chain era") + .clone(), + self.kube_namespace.clone(), + self.use_port_forward, + ) + .await?; + k8snode.start().await?; // actually start the node. if port-forward is enabled, this is when it gets its ephemeral port + Ok((peer_id, k8snode)) + } } #[async_trait::async_trait] @@ -245,8 +297,13 @@ impl Swarm for K8sSwarm { todo!() } - fn add_full_node(&mut self, _version: &Version, _template: NodeConfig) -> Result { - todo!() + async fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result { + self.install_public_fullnode_resources(version, &template) + .await + .map(|(peer_id, node)| { + self.fullnodes.insert(peer_id, node); + peer_id + }) } fn remove_full_node(&mut self, _id: PeerId) -> Result<()> { @@ -374,17 +431,22 @@ impl Swarm for K8sSwarm { self.chain_id, ) } + + fn get_default_pfn_node_config(&self) -> NodeConfig { + get_default_pfn_node_config() + } } /// Amount of time to wait for genesis to complete pub fn k8s_wait_genesis_strategy() -> impl Iterator { - // FIXME: figure out why Genesis doesn't finish in 10 minutes, increasing timeout to 20. - ExponentWithLimitDelay::new(1000, 10 * 1000, 20 * 60 * 1000) + // retry every 10 seconds for 10 minutes + fixed_retry_strategy(10 * 1000, 60) } -/// Amount of time to wait for nodes to respond on the REST API +/// Amount of time to wait for nodes to spin up, from provisioning to API ready pub fn k8s_wait_nodes_strategy() -> impl Iterator { - ExponentWithLimitDelay::new(1000, 10 * 1000, 15 * 60 * 1000) + // retry every 10 seconds for 20 minutes + fixed_retry_strategy(10 * 1000, 120) } async fn list_stateful_sets(client: K8sClient, kube_namespace: &str) -> Result> { @@ -435,6 +497,18 @@ fn get_k8s_node_from_stateful_set( service_name = format!("{}.{}.svc", &service_name, &namespace); } + // Append the cluster name if its a multi-cluster deployment + let service_name = if let Some(target_cluster_name) = sts + .metadata + .labels + .as_ref() + .and_then(|labels| labels.get("multicluster/targetcluster")) + { + format!("{}.{}", &service_name, &target_cluster_name) + } else { + service_name + }; + // If HAProxy is enabled, use the port on its Service. Otherwise use the port on the validator Service let mut rest_api_port = if enable_haproxy { REST_API_HAPROXY_SERVICE_PORT diff --git a/testsuite/forge/src/backend/local/swarm.rs b/testsuite/forge/src/backend/local/swarm.rs index a0a6128f3fba5..96da051759711 100644 --- a/testsuite/forge/src/backend/local/swarm.rs +++ b/testsuite/forge/src/backend/local/swarm.rs @@ -456,7 +456,7 @@ impl LocalSwarm { impl Drop for LocalSwarm { fn drop(&mut self) { // If panicking, persist logs - if std::thread::panicking() { + if std::env::var("LOCAL_SWARM_SAVE_LOGS").is_ok() || std::thread::panicking() { eprintln!("Logs located at {}", self.logs_location()); } } @@ -548,7 +548,7 @@ impl Swarm for LocalSwarm { self.add_validator_fullnode(version, template, id) } - fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result { + async fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result { self.add_fullnode(version, template) } @@ -649,6 +649,10 @@ impl Swarm for LocalSwarm { self.chain_id, ) } + + fn get_default_pfn_node_config(&self) -> NodeConfig { + todo!() + } } #[derive(Debug)] diff --git a/testsuite/forge/src/interface/admin.rs b/testsuite/forge/src/interface/admin.rs index 1ee409ea2b53d..50d726ced5a6a 100644 --- a/testsuite/forge/src/interface/admin.rs +++ b/testsuite/forge/src/interface/admin.rs @@ -13,7 +13,7 @@ use reqwest::Url; /// of the validators or full nodes running on the network. pub trait AdminTest: Test { /// Executes the test against the given context. - fn run<'t>(&self, ctx: &mut AdminContext<'t>) -> Result<()>; + fn run(&self, ctx: &mut AdminContext<'_>) -> Result<()>; } #[derive(Debug)] diff --git a/testsuite/forge/src/interface/chaos.rs b/testsuite/forge/src/interface/chaos.rs index 11e3f26160092..cb2375f643182 100644 --- a/testsuite/forge/src/interface/chaos.rs +++ b/testsuite/forge/src/interface/chaos.rs @@ -10,6 +10,8 @@ pub enum SwarmChaos { Partition(SwarmNetworkPartition), Bandwidth(SwarmNetworkBandwidth), Loss(SwarmNetworkLoss), + NetEm(SwarmNetEm), + CpuStress(SwarmCpuStress), } #[derive(Eq, Hash, PartialEq, Debug, Clone)] @@ -79,3 +81,46 @@ impl Display for SwarmNetworkLoss { ) } } + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct SwarmNetEm { + pub group_netems: Vec, +} + +impl Display for SwarmNetEm { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "NetEm nodes {:?}", self.group_netems) + } +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct GroupNetEm { + pub name: String, + pub source_nodes: Vec, + pub target_nodes: Vec, + pub delay_latency_ms: u64, + pub delay_jitter_ms: u64, + pub delay_correlation_percentage: u64, + pub loss_percentage: u64, + pub loss_correlation_percentage: u64, + pub rate_in_mbps: u64, +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct SwarmCpuStress { + pub group_cpu_stresses: Vec, +} + +impl Display for SwarmCpuStress { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "CpuStress nodes {:?}", self.group_cpu_stresses) + } +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct GroupCpuStress { + pub name: String, + pub target_nodes: Vec, + pub num_workers: u64, + pub load_per_worker: u64, +} diff --git a/testsuite/forge/src/interface/network.rs b/testsuite/forge/src/interface/network.rs index a6a6dca6b043c..243a34995e981 100644 --- a/testsuite/forge/src/interface/network.rs +++ b/testsuite/forge/src/interface/network.rs @@ -16,12 +16,12 @@ use tokio::runtime::Runtime; /// nodes which comprise the network. pub trait NetworkTest: Test { /// Executes the test against the given context. - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()>; + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()>; } pub struct NetworkContext<'t> { core: CoreContext, - swarm: &'t mut dyn Swarm, + pub swarm: &'t mut dyn Swarm, pub report: &'t mut TestReport, pub global_duration: Duration, pub emit_job: EmitJobRequest, @@ -70,6 +70,7 @@ impl<'t> NetworkContext<'t> { .block_on(SuccessCriteriaChecker::check_for_success( &self.success_criteria, self.swarm, + self.report, stats, window, start_time, diff --git a/testsuite/forge/src/interface/node.rs b/testsuite/forge/src/interface/node.rs index 56c32396a4566..12abb066843bd 100644 --- a/testsuite/forge/src/interface/node.rs +++ b/testsuite/forge/src/interface/node.rs @@ -6,7 +6,7 @@ use crate::{Result, Version}; use anyhow::anyhow; use aptos_config::{config::NodeConfig, network_id::NetworkId}; use aptos_inspection_service::inspection_client::InspectionClient; -use aptos_rest_client::Client as RestClient; +use aptos_rest_client::{AptosBaseUrl, Client as RestClient}; use aptos_sdk::types::PeerId; use std::{ collections::HashMap, @@ -144,7 +144,9 @@ pub trait NodeExt: Node { /// Return REST API client of this Node fn rest_client_with_timeout(&self, timeout: Duration) -> RestClient { - RestClient::new_with_timeout(self.rest_api_endpoint(), timeout) + RestClient::builder(AptosBaseUrl::Custom(self.rest_api_endpoint())) + .timeout(timeout) + .build() } /// Return an InspectionClient for this Node @@ -215,8 +217,10 @@ pub trait NodeExt: Node { } async fn wait_until_healthy(&mut self, deadline: Instant) -> Result<()> { + let mut healthcheck_error = + HealthCheckError::Unknown(anyhow::anyhow!("No healthcheck performed yet")); while Instant::now() < deadline { - match self.health_check().await { + healthcheck_error = match self.health_check().await { Ok(()) => return Ok(()), Err(HealthCheckError::NotRunning(error)) => { return Err(anyhow::anyhow!( @@ -226,16 +230,17 @@ pub trait NodeExt: Node { error, )) }, - Err(_) => {}, // For other errors we'll retry - } + Err(e) => e, // For other errors we'll retry + }; tokio::time::sleep(Duration::from_millis(500)).await; } Err(anyhow::anyhow!( - "Timed out waiting for Node {}:{} to be healthy", + "Timed out waiting for Node {}:{} to be healthy: Error: {:?}", self.name(), - self.peer_id() + self.peer_id(), + healthcheck_error )) } } diff --git a/testsuite/forge/src/interface/swarm.rs b/testsuite/forge/src/interface/swarm.rs index 9fb1f653b1f53..ed9d2258c2e5d 100644 --- a/testsuite/forge/src/interface/swarm.rs +++ b/testsuite/forge/src/interface/swarm.rs @@ -64,7 +64,7 @@ pub trait Swarm: Sync { ) -> Result; /// Adds a FullNode to the swarm and returns the PeerId - fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result; + async fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result; /// Removes the FullNode with the provided PeerId fn remove_full_node(&mut self, id: PeerId) -> Result<()>; @@ -109,6 +109,8 @@ pub trait Swarm: Sync { fn aptos_public_info_for_node(&mut self, idx: usize) -> AptosPublicInfo<'_> { self.chain_info_for_node(idx).into_aptos_public_info() } + + fn get_default_pfn_node_config(&self) -> NodeConfig; } impl SwarmExt for T where T: Swarm {} diff --git a/testsuite/forge/src/report.rs b/testsuite/forge/src/report.rs index 4e082f5d8d1c9..aabaa849a09ec 100644 --- a/testsuite/forge/src/report.rs +++ b/testsuite/forge/src/report.rs @@ -2,9 +2,10 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +use aptos_logger::info; use aptos_transaction_emitter_lib::emitter::stats::TxnStats; use serde::Serialize; -use std::{fmt, time::Duration}; +use std::fmt; #[derive(Default, Debug, Serialize)] pub struct TestReport { @@ -37,32 +38,19 @@ impl TestReport { self.text.push('\n'); } self.text.push_str(&text); + info!("{}", text); } - pub fn report_txn_stats(&mut self, test_name: String, stats: &TxnStats, window: Duration) { - let submitted_txn = stats.submitted; - let expired_txn = stats.expired; - let avg_tps = stats.committed / window.as_secs(); - let avg_latency_client = if stats.committed == 0 { - 0u64 - } else { - stats.latency / stats.committed - }; - let p99_latency = stats.latency_buckets.percentile(99, 100); - self.report_metric(test_name.clone(), "submitted_txn", submitted_txn as f64); - self.report_metric(test_name.clone(), "expired_txn", expired_txn as f64); - self.report_metric(test_name.clone(), "avg_tps", avg_tps as f64); - self.report_metric(test_name.clone(), "avg_latency", avg_latency_client as f64); - self.report_metric(test_name.clone(), "p99_latency", p99_latency as f64); - let expired_text = if expired_txn == 0 { - "no expired txns".to_string() - } else { - format!("(!) expired {} out of {} txns", expired_txn, submitted_txn) - }; - self.report_text(format!( - "{} : {:.0} TPS, {:.1} ms latency, {:.1} ms p99 latency,{}", - test_name, avg_tps, avg_latency_client, p99_latency, expired_text - )); + pub fn report_txn_stats(&mut self, test_name: String, stats: &TxnStats) { + let rate = stats.rate(); + self.report_metric(test_name.clone(), "submitted_txn", stats.submitted as f64); + self.report_metric(test_name.clone(), "expired_txn", stats.expired as f64); + self.report_metric(test_name.clone(), "avg_tps", rate.committed as f64); + self.report_metric(test_name.clone(), "avg_latency", rate.latency as f64); + self.report_metric(test_name.clone(), "p50_latency", rate.p50_latency as f64); + self.report_metric(test_name.clone(), "p90_latency", rate.p90_latency as f64); + self.report_metric(test_name.clone(), "p99_latency", rate.p99_latency as f64); + self.report_text(format!("{} : {}", test_name, rate)); } pub fn print_report(&self) { diff --git a/testsuite/forge/src/runner.rs b/testsuite/forge/src/runner.rs index ab0f91234fefd..6117a2f296d6f 100644 --- a/testsuite/forge/src/runner.rs +++ b/testsuite/forge/src/runner.rs @@ -92,13 +92,14 @@ arg_enum! { } } +#[allow(clippy::derivable_impls)] // Required to overcome the limitations of arg_enum! impl Default for Format { fn default() -> Self { Format::Pretty } } -pub fn forge_main(tests: ForgeConfig<'_>, factory: F, options: &Options) -> Result<()> { +pub fn forge_main(tests: ForgeConfig, factory: F, options: &Options) -> Result<()> { let forge = Forge::new(options, tests, Duration::from_secs(30), factory); if options.list { @@ -125,10 +126,10 @@ pub enum InitialVersion { pub type NodeConfigFn = Arc; pub type GenesisConfigFn = Arc; -pub struct ForgeConfig<'cfg> { - aptos_tests: Vec<&'cfg dyn AptosTest>, - admin_tests: Vec<&'cfg dyn AdminTest>, - network_tests: Vec<&'cfg dyn NetworkTest>, +pub struct ForgeConfig { + aptos_tests: Vec>, + admin_tests: Vec>, + network_tests: Vec>, /// The initial number of validators to spawn when the test harness creates a swarm initial_validator_count: NonZeroUsize, @@ -158,22 +159,37 @@ pub struct ForgeConfig<'cfg> { existing_db_tag: Option, } -impl<'cfg> ForgeConfig<'cfg> { +impl ForgeConfig { pub fn new() -> Self { Self::default() } - pub fn with_aptos_tests(mut self, aptos_tests: Vec<&'cfg dyn AptosTest>) -> Self { + pub fn add_aptos_test(mut self, aptos_test: T) -> Self { + self.aptos_tests.push(Box::new(aptos_test)); + self + } + + pub fn with_aptos_tests(mut self, aptos_tests: Vec>) -> Self { self.aptos_tests = aptos_tests; self } - pub fn with_admin_tests(mut self, admin_tests: Vec<&'cfg dyn AdminTest>) -> Self { + pub fn add_admin_test(mut self, admin_test: T) -> Self { + self.admin_tests.push(Box::new(admin_test)); + self + } + + pub fn with_admin_tests(mut self, admin_tests: Vec>) -> Self { self.admin_tests = admin_tests; self } - pub fn with_network_tests(mut self, network_tests: Vec<&'cfg dyn NetworkTest>) -> Self { + pub fn add_network_test(mut self, network_test: T) -> Self { + self.network_tests.push(Box::new(network_test)); + self + } + + pub fn with_network_tests(mut self, network_tests: Vec>) -> Self { self.network_tests = network_tests; self } @@ -240,12 +256,55 @@ impl<'cfg> ForgeConfig<'cfg> { self.admin_tests.len() + self.network_tests.len() + self.aptos_tests.len() } - pub fn all_tests(&self) -> impl Iterator { + pub fn all_tests(&self) -> Vec>> { self.admin_tests .iter() - .map(|t| t as &dyn Test) - .chain(self.network_tests.iter().map(|t| t as &dyn Test)) - .chain(self.aptos_tests.iter().map(|t| t as &dyn Test)) + .map(|t| Box::new(AnyTestRef::Admin(t.as_ref()))) + .chain( + self.network_tests + .iter() + .map(|t| Box::new(AnyTestRef::Network(t.as_ref()))), + ) + .chain( + self.aptos_tests + .iter() + .map(|t| Box::new(AnyTestRef::Aptos(t.as_ref()))), + ) + .collect() + } +} + +// Workaround way to implement all_tests, for: +// error[E0658]: cannot cast `dyn interface::admin::AdminTest` to `dyn interface::test::Test`, trait upcasting coercion is experimental +pub enum AnyTestRef<'a> { + Aptos(&'a dyn AptosTest), + Admin(&'a dyn AdminTest), + Network(&'a dyn NetworkTest), +} + +impl<'a> Test for AnyTestRef<'a> { + fn name(&self) -> &'static str { + match self { + AnyTestRef::Aptos(t) => t.name(), + AnyTestRef::Admin(t) => t.name(), + AnyTestRef::Network(t) => t.name(), + } + } + + fn ignored(&self) -> bool { + match self { + AnyTestRef::Aptos(t) => t.ignored(), + AnyTestRef::Admin(t) => t.ignored(), + AnyTestRef::Network(t) => t.ignored(), + } + } + + fn should_fail(&self) -> ShouldFail { + match self { + AnyTestRef::Aptos(t) => t.should_fail(), + AnyTestRef::Admin(t) => t.should_fail(), + AnyTestRef::Network(t) => t.should_fail(), + } } } @@ -279,7 +338,7 @@ impl ForgeRunnerMode { } } -impl<'cfg> Default for ForgeConfig<'cfg> { +impl Default for ForgeConfig { fn default() -> Self { let forge_run_mode = ForgeRunnerMode::try_from_env().unwrap_or(ForgeRunnerMode::K8s); let success_criteria = if forge_run_mode == ForgeRunnerMode::Local { @@ -315,7 +374,7 @@ impl<'cfg> Default for ForgeConfig<'cfg> { pub struct Forge<'cfg, F> { options: &'cfg Options, - tests: ForgeConfig<'cfg>, + tests: ForgeConfig, global_duration: Duration, factory: F, } @@ -323,7 +382,7 @@ pub struct Forge<'cfg, F> { impl<'cfg, F: Factory> Forge<'cfg, F> { pub fn new( options: &'cfg Options, - tests: ForgeConfig<'cfg>, + tests: ForgeConfig, global_duration: Duration, factory: F, ) -> Self { @@ -336,7 +395,7 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { } pub fn list(&self) -> Result<()> { - for test in self.filter_tests(self.tests.all_tests()) { + for test in self.filter_tests(&self.tests.all_tests()) { println!("{}: test", test.name()); } @@ -344,7 +403,7 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { println!(); println!( "{} tests", - self.filter_tests(self.tests.all_tests()).count() + self.filter_tests(&self.tests.all_tests()).count() ); } @@ -362,8 +421,8 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { } pub fn run(&self) -> Result { - let test_count = self.filter_tests(self.tests.all_tests()).count(); - let filtered_out = test_count.saturating_sub(self.tests.all_tests().count()); + let test_count = self.filter_tests(&self.tests.all_tests()).count(); + let filtered_out = test_count.saturating_sub(self.tests.all_tests().len()); let mut report = TestReport::new(); let mut summary = TestSummary::new(test_count, filtered_out); @@ -396,7 +455,7 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { ))?; // Run AptosTests - for test in self.filter_tests(self.tests.aptos_tests.iter()) { + for test in self.filter_tests(&self.tests.aptos_tests) { let mut aptos_ctx = AptosContext::new( CoreContext::from_rng(&mut rng), swarm.chain_info().into_aptos_public_info(), @@ -408,7 +467,7 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { } // Run AdminTests - for test in self.filter_tests(self.tests.admin_tests.iter()) { + for test in self.filter_tests(&self.tests.admin_tests) { let mut admin_ctx = AdminContext::new( CoreContext::from_rng(&mut rng), swarm.chain_info(), @@ -419,7 +478,7 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { summary.handle_result(test.name().to_owned(), result)?; } - for test in self.filter_tests(self.tests.network_tests.iter()) { + for test in self.filter_tests(&self.tests.network_tests) { let mut network_ctx = NetworkContext::new( CoreContext::from_rng(&mut rng), &mut *swarm, @@ -452,11 +511,12 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { } } - fn filter_tests<'a, T: Test, I: Iterator + 'a>( + fn filter_tests<'a, T: Test + ?Sized>( &'a self, - tests: I, - ) -> impl Iterator + 'a { + tests: &'a [Box], + ) -> impl Iterator> { tests + .iter() // Filter by ignored .filter( move |test| match (self.options.include_ignored, self.options.ignored) { diff --git a/testsuite/forge/src/success_criteria.rs b/testsuite/forge/src/success_criteria.rs index 31516103c364a..51ce299e180d4 100644 --- a/testsuite/forge/src/success_criteria.rs +++ b/testsuite/forge/src/success_criteria.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{system_metrics::SystemMetricsThreshold, Swarm, SwarmExt}; +use crate::{system_metrics::SystemMetricsThreshold, Swarm, SwarmExt, TestReport}; use anyhow::{bail, Context}; use aptos::node::analyze::fetch_metadata::FetchMetadata; use aptos_sdk::types::PeerId; @@ -24,9 +24,11 @@ pub enum LatencyType { #[derive(Default, Clone, Debug)] pub struct SuccessCriteria { - pub avg_tps: usize, + pub min_avg_tps: usize, latency_thresholds: Vec<(Duration, LatencyType)>, check_no_restarts: bool, + max_expired_tps: Option, + max_failed_submission_tps: Option, wait_for_all_nodes_to_catchup: Option, // Maximum amount of CPU cores and memory bytes used by the nodes. system_metrics_threshold: Option, @@ -34,11 +36,13 @@ pub struct SuccessCriteria { } impl SuccessCriteria { - pub fn new(tps: usize) -> Self { + pub fn new(min_avg_tps: usize) -> Self { Self { - avg_tps: tps, + min_avg_tps, latency_thresholds: Vec::new(), check_no_restarts: false, + max_expired_tps: None, + max_failed_submission_tps: None, wait_for_all_nodes_to_catchup: None, system_metrics_threshold: None, chain_progress_check: None, @@ -50,6 +54,16 @@ impl SuccessCriteria { self } + pub fn add_max_expired_tps(mut self, max_expired_tps: usize) -> Self { + self.max_expired_tps = Some(max_expired_tps); + self + } + + pub fn add_max_failed_submission_tps(mut self, max_failed_submission_tps: usize) -> Self { + self.max_failed_submission_tps = Some(max_failed_submission_tps); + self + } + pub fn add_wait_for_catchup_s(mut self, duration_secs: u64) -> Self { self.wait_for_all_nodes_to_catchup = Some(Duration::from_secs(duration_secs)); self @@ -75,9 +89,34 @@ impl SuccessCriteria { pub struct SuccessCriteriaChecker {} impl SuccessCriteriaChecker { + pub fn check_core_for_success( + success_criteria: &SuccessCriteria, + _report: &mut TestReport, + stats_rate: &TxnStatsRate, + traffic_name: Option, + ) -> anyhow::Result<()> { + let traffic_name_addition = traffic_name + .map(|n| format!(" for {}", n)) + .unwrap_or_else(|| "".to_string()); + Self::check_throughput( + success_criteria.min_avg_tps, + success_criteria.max_expired_tps, + success_criteria.max_failed_submission_tps, + stats_rate, + &traffic_name_addition, + )?; + Self::check_latency( + &success_criteria.latency_thresholds, + stats_rate, + &traffic_name_addition, + )?; + Ok(()) + } + pub async fn check_for_success( success_criteria: &SuccessCriteria, swarm: &mut dyn Swarm, + report: &mut TestReport, stats: &TxnStats, window: Duration, start_time: i64, @@ -86,22 +125,19 @@ impl SuccessCriteriaChecker { end_version: u64, ) -> anyhow::Result<()> { println!( - "End to end duration: {}s, while txn emitter lasted: {}s", + "End to end duration: {}s, performance measured for: {}s", window.as_secs(), stats.lasted.as_secs() ); let stats_rate = stats.rate(); - // TODO: Add more success criteria like expired transactions, CPU, memory usage etc - let avg_tps = stats_rate.committed; - if avg_tps < success_criteria.avg_tps as u64 { - bail!( - "TPS requirement failed. Average TPS {}, minimum TPS requirement {}", - avg_tps, - success_criteria.avg_tps, - ) - } - Self::check_latency(&success_criteria.latency_thresholds, &stats_rate)?; + Self::check_throughput( + success_criteria.min_avg_tps, + success_criteria.max_expired_tps, + success_criteria.max_failed_submission_tps, + &stats_rate, + &"".to_string(), + )?; if let Some(timeout) = success_criteria.wait_for_all_nodes_to_catchup { swarm @@ -131,9 +167,15 @@ impl SuccessCriteriaChecker { } if let Some(chain_progress_threshold) = &success_criteria.chain_progress_check { - Self::check_chain_progress(swarm, chain_progress_threshold, start_version, end_version) - .await - .context("Failed check chain progress")?; + Self::check_chain_progress( + swarm, + report, + chain_progress_threshold, + start_version, + end_version, + ) + .await + .context("Failed check chain progress")?; } Ok(()) @@ -141,6 +183,7 @@ impl SuccessCriteriaChecker { async fn check_chain_progress( swarm: &mut dyn Swarm, + report: &mut TestReport, chain_progress_threshold: &StateProgressThreshold, start_version: u64, end_version: u64, @@ -212,36 +255,111 @@ impl SuccessCriteriaChecker { } let max_time_gap_secs = Duration::from_micros(max_time_gap).as_secs_f32(); + + let gap_text = format!( + "Max round gap was {} [limit {}] at version {}. Max no progress secs was {} [limit {}] at version {}.", + max_round_gap, + chain_progress_threshold.max_round_gap, + max_round_gap_version, + max_time_gap_secs, + chain_progress_threshold.max_no_progress_secs, + max_time_gap_version, + ); + if max_round_gap > chain_progress_threshold.max_round_gap || max_time_gap_secs > chain_progress_threshold.max_no_progress_secs { + bail!("Failed chain progress check. {}", gap_text); + } else { + println!("Passed progress check. {}", gap_text); + report.report_text(gap_text); + } + + Ok(()) + } + + pub fn check_tps( + min_avg_tps: usize, + stats_rate: &TxnStatsRate, + traffic_name_addition: &String, + ) -> anyhow::Result<()> { + let avg_tps = stats_rate.committed; + if avg_tps < min_avg_tps as u64 { bail!( - "Failed chain progress check. Max round gap was {} [limit {}] at version {}. Max no progress secs was {} [limit {}] at version {}.", - max_round_gap, - chain_progress_threshold.max_round_gap, - max_round_gap_version, - max_time_gap_secs, - chain_progress_threshold.max_no_progress_secs, - max_time_gap_version, + "TPS requirement{} failed. Average TPS {}, minimum TPS requirement {}. Full stats: {}", + traffic_name_addition, + avg_tps, + min_avg_tps, + stats_rate, ) } else { println!( - "Passed progress check. Max round gap was {} [limit {}] at version {}. Max no progress secs was {} [limit {}] at version {}.", - max_round_gap, - chain_progress_threshold.max_round_gap, - max_round_gap_version, - max_time_gap_secs, - chain_progress_threshold.max_no_progress_secs, - max_time_gap_version, - ) + "TPS is {} and is within limit of {}", + stats_rate.committed, min_avg_tps + ); + Ok(()) } + } + fn check_max_value( + max_config: Option, + stats_rate: &TxnStatsRate, + value: u64, + value_desc: &str, + traffic_name_addition: &String, + ) -> anyhow::Result<()> { + if let Some(max) = max_config { + if value > max as u64 { + bail!( + "{} requirement{} failed. {} TPS: average {}, maximum requirement {}. Full stats: {}", + value_desc, + traffic_name_addition, + value_desc, + value, + max, + stats_rate, + ) + } else { + println!( + "{} TPS is {} and is below max limit of {}", + value_desc, value, max + ); + Ok(()) + } + } else { + Ok(()) + } + } + + pub fn check_throughput( + min_avg_tps: usize, + max_expired_config: Option, + max_failed_submission_config: Option, + stats_rate: &TxnStatsRate, + traffic_name_addition: &String, + ) -> anyhow::Result<()> { + Self::check_tps(min_avg_tps, stats_rate, traffic_name_addition)?; + Self::check_max_value( + max_expired_config, + stats_rate, + stats_rate.expired, + "expired", + traffic_name_addition, + )?; + Self::check_max_value( + max_failed_submission_config, + stats_rate, + stats_rate.failed_submission, + "submission", + traffic_name_addition, + )?; Ok(()) } pub fn check_latency( latency_thresholds: &[(Duration, LatencyType)], stats_rate: &TxnStatsRate, + traffic_name_addition: &String, ) -> anyhow::Result<()> { let mut failures = Vec::new(); for (latency_threshold, latency_type) in latency_thresholds { @@ -255,13 +373,22 @@ impl SuccessCriteriaChecker { if latency > *latency_threshold { failures.push( format!( - "{:?} latency is {}s and exceeds limit of {}s", + "{:?} latency{} is {}s and exceeds limit of {}s", latency_type, + traffic_name_addition, latency.as_secs_f32(), latency_threshold.as_secs_f32() ) .to_string(), ); + } else { + println!( + "{:?} latency{} is {}s and is within limit of {}s", + latency_type, + traffic_name_addition, + latency.as_secs_f32(), + latency_threshold.as_secs_f32() + ); } } if !failures.is_empty() { diff --git a/testsuite/forge_test.py b/testsuite/forge_test.py index b5b3510b15de2..6932149ebd36b 100644 --- a/testsuite/forge_test.py +++ b/testsuite/forge_test.py @@ -39,6 +39,7 @@ main, sanitize_forge_resource_name, validate_forge_config, + GAR_REPO_NAME, ) from click.testing import CliRunner, Result @@ -56,6 +57,7 @@ from test_framework.shell import SpyShell, FakeShell, FakeCommand, RunResult from test_framework.time import FakeTime +from test_framework.cluster import Cloud # Show the entire diff when unittest fails assertion unittest.util._MAX_LENGTH = 2000 # type: ignore @@ -117,6 +119,7 @@ def fake_context( processes=None, time=None, mode=None, + multiregion=False, ) -> ForgeContext: return ForgeContext( shell=shell if shell else FakeShell(), @@ -145,7 +148,9 @@ def fake_context( image_tag="asdf", upgrade_image_tag="upgrade_asdf", forge_namespace="forge-potato", - forge_cluster=ForgeCluster(name="tomato", kubeconf="kubeconf"), + forge_cluster=ForgeCluster( + name="tomato", kubeconf="kubeconf", is_multiregion=multiregion + ), forge_test_suite="banana", forge_blocking=True, github_actions="false", @@ -239,6 +244,14 @@ def testK8sRunner(self) -> None: "kubectl --kubeconfig kubeconf get pods -n forge-potato", RunResult(0, b"Pods"), ), + FakeCommand( + "kubectl --kubeconfig kubeconf delete pod -n default -l forge-namespace=forge-potato --force", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf wait -n default --for=delete pod -l forge-namespace=forge-potato", + RunResult(0, b""), + ), ] ) forge_yaml = get_cwd() / "forge-test-runner-template.yaml" @@ -260,6 +273,67 @@ def testK8sRunner(self) -> None: filesystem.assert_reads(self) self.assertEqual(result.state, ForgeState.PASS, result.output) + def testK8sRunnerWithMultiregionCluster(self) -> None: + self.maxDiff = None + shell = SpyShell( + [ + FakeCommand( + "kubectl --kubeconfig kubeconf --context=karmada-apiserver delete pod -n default -l forge-namespace=forge-potato --force", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf wait -n default --for=delete pod -l forge-namespace=forge-potato", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf --context=karmada-apiserver apply -n default -f temp1", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf wait -n default --timeout=5m --for=condition=Ready pod/forge-potato-1659078000-asdf", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf logs -n default -f forge-potato-1659078000-asdf", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf get pod -n default forge-potato-1659078000-asdf -o jsonpath='{.status.phase}'", + RunResult(0, b"Succeeded"), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf get pods -n forge-potato", + RunResult(0, b"Pods"), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf --context=karmada-apiserver delete pod -n default -l forge-namespace=forge-potato --force", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig kubeconf wait -n default --for=delete pod -l forge-namespace=forge-potato", + RunResult(0, b""), + ), + ] + ) + forge_yaml = get_cwd() / "forge-test-runner-template.yaml" + template_fixture = get_fixture_path("forge-test-runner-template.fixture") + filesystem = SpyFilesystem( + { + "temp1": template_fixture.read_bytes(), + }, + { + "forge-test-runner-template.yaml": FILE_NOT_FOUND, + "testsuite/forge-test-runner-template.yaml": forge_yaml.read_bytes(), + }, + ) + context = fake_context(shell, filesystem, mode="k8s", multiregion=True) + runner = K8sForgeRunner() + result = runner.run(context) + shell.assert_commands(self) + filesystem.assert_writes(self) + filesystem.assert_reads(self) + self.assertEqual(result.state, ForgeState.PASS, result.output) + class TestFindRecentImage(unittest.TestCase): def testFindRecentImage(self) -> None: @@ -278,7 +352,31 @@ def testFindRecentImage(self) -> None: ] ) git = Git(shell) - image_tags = find_recent_images(shell, git, 1, "aptos/validator-testing") + image_tags = find_recent_images( + shell, git, 1, "validator-testing", cloud=Cloud.AWS + ) + self.assertEqual(list(image_tags), ["lychee"]) + shell.assert_commands(self) + + def testFindRecentImageGcp(self) -> None: + shell = SpyShell( + [ + FakeCommand("git rev-parse HEAD~0", RunResult(0, b"potato\n")), + FakeCommand( + f"crane manifest {GAR_REPO_NAME}/validator-testing:potato", + RunResult(1, b""), + ), + FakeCommand("git rev-parse HEAD~1", RunResult(0, b"lychee\n")), + FakeCommand( + f"crane manifest {GAR_REPO_NAME}/validator-testing:lychee", + RunResult(0, b""), + ), + ] + ) + git = Git(shell) + image_tags = find_recent_images( + shell, git, 1, "validator-testing", cloud=Cloud.GCP + ) self.assertEqual(list(image_tags), ["lychee"]) shell.assert_commands(self) @@ -294,7 +392,12 @@ def testFindRecentFailpointsImage(self) -> None: ) git = Git(shell) image_tags = find_recent_images_by_profile_or_features( - shell, git, 1, enable_performance_profile=False, enable_failpoints=True + shell, + git, + 1, + enable_performance_profile=False, + enable_failpoints=True, + cloud=Cloud.AWS, ) self.assertEqual(list(image_tags), ["failpoints_tomato"]) shell.assert_commands(self) @@ -316,6 +419,7 @@ def testFindRecentPerformanceImage(self) -> None: 1, enable_performance_profile=True, enable_failpoints=False, + cloud=Cloud.AWS, ) self.assertEqual(list(image_tags), ["performance_potato"]) shell.assert_commands(self) @@ -368,7 +472,7 @@ def testFindRecentFewImages( ] ) git = Git(shell) - images = find_recent_images(shell, git, 2, "aptos/validator") + images = find_recent_images(shell, git, 2, "validator", cloud=Cloud.AWS) self.assertEqual(list(images), ["crab", "shrimp"]) def testFailpointsProvidedImageTag(self) -> None: @@ -597,6 +701,16 @@ def testMain(self) -> None: "kubectl --kubeconfig temp1 get pods -n forge-perry-1659078000", RunResult(0, b""), ), + FakeCommand( + "kubectl --kubeconfig temp1 delete pod -n default -l forge-namespace=forge-perry-1659078000 " + "--force", + RunResult(0, b""), + ), + FakeCommand( + "kubectl --kubeconfig temp1 wait -n default --for=delete pod -l " + "forge-namespace=forge-perry-1659078000", + RunResult(0, b""), + ), ] ) filesystem = SpyFilesystem( diff --git a/testsuite/fuzzer/Cargo.toml b/testsuite/fuzzer/Cargo.toml new file mode 100644 index 0000000000000..5f07a38412d24 --- /dev/null +++ b/testsuite/fuzzer/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "fuzzer" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/testsuite/fuzzer/fuzz/.gitignore b/testsuite/fuzzer/fuzz/.gitignore new file mode 100644 index 0000000000000..1a45eee7760d2 --- /dev/null +++ b/testsuite/fuzzer/fuzz/.gitignore @@ -0,0 +1,4 @@ +target +corpus +artifacts +coverage diff --git a/testsuite/fuzzer/fuzz/Cargo.lock b/testsuite/fuzzer/fuzz/Cargo.lock new file mode 100644 index 0000000000000..ee1a7d22086d4 --- /dev/null +++ b/testsuite/fuzzer/fuzz/Cargo.lock @@ -0,0 +1,62 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "arbitrary" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" + +[[package]] +name = "cc" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +dependencies = [ + "jobserver", +] + +[[package]] +name = "fuzzer" +version = "0.1.0" + +[[package]] +name = "fuzzer-fuzz" +version = "0.0.0" +dependencies = [ + "fuzzer", + "libfuzzer-sys", +] + +[[package]] +name = "jobserver" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] + +[[package]] +name = "libc" +version = "0.2.144" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" + +[[package]] +name = "libfuzzer-sys" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beb09950ae85a0a94b27676cccf37da5ff13f27076aa1adbc6545dd0d0e1bd4e" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] + +[[package]] +name = "once_cell" +version = "1.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" diff --git a/testsuite/fuzzer/fuzz/Cargo.toml b/testsuite/fuzzer/fuzz/Cargo.toml new file mode 100644 index 0000000000000..9c0da7a38815b --- /dev/null +++ b/testsuite/fuzzer/fuzz/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "fuzzer-fuzz" +version = "0.0.0" +publish = false +edition = "2021" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +aptos-consensus = { workspace = true, features = ["fuzzing"] } +aptos-consensus-types = { workspace = true, features = ["fuzzing"] } +aptos-types = { workspace = true } +arbitrary = "1.3.0" +bcs = { workspace = true } +libfuzzer-sys = "0.4" +move-binary-format = { workspace = true, features = ["fuzzing"] } +move-bytecode-verifier = { workspace = true } +move-core-types = { workspace = true, features = ["fuzzing"] } +move-vm-runtime = { workspace = true } +move-vm-test-utils = { workspace = true } +move-vm-types = { workspace = true, features = ["fuzzing"] } + +[[bin]] +name = "move_bytecode_verifier_code_unit" +path = "fuzz_targets/move/bytecode_verifier_code_unit.rs" +test = false +doc = false + +[[bin]] +name = "move_bytecode_verifier_mixed" +path = "fuzz_targets/move/bytecode_verifier_mixed.rs" +test = false +doc = false + +[[bin]] +name = "move_value_deserialize" +path = "fuzz_targets/move/value_deserialize.rs" +test = false +doc = false + +[[bin]] +name = "move_move_value_deserialize" +path = "fuzz_targets/move/move_value_deserialize.rs" +test = false +doc = false + +[[bin]] +name = "move_move_value_decorate" +path = "fuzz_targets/move/move_value_decorate.rs" +test = false +doc = false + +[[bin]] +name = "move_execute_entry_function" +path = "fuzz_targets/move/execute_entry_function.rs" +test = false +doc = false + +[[bin]] +name = "signed_transaction_deserialize" +path = "fuzz_targets/signed_transaction_deserialize.rs" +test = false +doc = false diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/bytecode_verifier_code_unit.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/bytecode_verifier_code_unit.rs new file mode 100644 index 0000000000000..50bdd44440963 --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/bytecode_verifier_code_unit.rs @@ -0,0 +1,82 @@ +// Copyright (c) The Move Contributors +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use libfuzzer_sys::fuzz_target; +use move_binary_format::file_format::{ + empty_module, AbilitySet, CodeUnit, Constant, FieldDefinition, FunctionDefinition, + FunctionHandle, FunctionHandleIndex, IdentifierIndex, ModuleHandleIndex, Signature, + SignatureIndex, + SignatureToken::{Address, Bool, U128, U64}, + StructDefinition, StructFieldInformation, StructHandle, StructHandleIndex, TypeSignature, + Visibility, +}; +use move_core_types::{account_address::AccountAddress, ident_str}; + +fuzz_target!(|code_unit: CodeUnit| { + let mut module = empty_module(); + module.version = 5; + + module.struct_handles.push(StructHandle { + module: ModuleHandleIndex(0), + name: IdentifierIndex(1), + abilities: AbilitySet::ALL, + type_parameters: vec![], + }); + + let fun_handle = FunctionHandle { + module: ModuleHandleIndex(0), + name: IdentifierIndex(2), + parameters: SignatureIndex(0), + return_: SignatureIndex(1), + type_parameters: vec![], + }; + + module.function_handles.push(fun_handle); + + module.signatures.pop(); + module.signatures.push(Signature(vec![ + Address, U64, Address, Address, U128, Address, U64, U64, U64, + ])); + module.signatures.push(Signature(vec![])); + module + .signatures + .push(Signature(vec![Address, Bool, Address])); + + module.identifiers.extend( + vec![ + ident_str!("zf_hello_world").into(), + ident_str!("awldFnU18mlDKQfh6qNfBGx8X").into(), + ident_str!("aQPwJNHyAHpvJ").into(), + ident_str!("aT7ZphKTrKcYCwCebJySrmrKlckmnL5").into(), + ident_str!("arYpsFa2fvrpPJ").into(), + ] + .into_iter(), + ); + module.address_identifiers.push(AccountAddress::random()); + + module.constant_pool.push(Constant { + type_: Address, + data: AccountAddress::ZERO.into_bytes().to_vec(), + }); + + module.struct_defs.push(StructDefinition { + struct_handle: StructHandleIndex(0), + field_information: StructFieldInformation::Declared(vec![FieldDefinition { + name: IdentifierIndex::new(3), + signature: TypeSignature(Address), + }]), + }); + + let fun_def = FunctionDefinition { + code: Some(code_unit), + function: FunctionHandleIndex(0), + visibility: Visibility::Public, + is_entry: false, + acquires_global_resources: vec![], + }; + + module.function_defs.push(fun_def); + let _ = move_bytecode_verifier::verify_module(&module); +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/bytecode_verifier_mixed.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/bytecode_verifier_mixed.rs new file mode 100644 index 0000000000000..cacd65bfedefa --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/bytecode_verifier_mixed.rs @@ -0,0 +1,96 @@ +// Copyright (c) The Move Contributors +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use arbitrary::Arbitrary; +use libfuzzer_sys::fuzz_target; +use move_binary_format::file_format::{ + empty_module, AbilitySet, Bytecode, CodeUnit, Constant, FieldDefinition, FunctionDefinition, + FunctionHandle, FunctionHandleIndex, IdentifierIndex, ModuleHandleIndex, Signature, + SignatureIndex, SignatureToken, + SignatureToken::{Address, Bool}, + StructDefinition, StructFieldInformation, StructHandle, StructHandleIndex, TypeSignature, + Visibility, +}; +use move_core_types::{account_address::AccountAddress, ident_str}; + +#[derive(Arbitrary, Debug)] +struct Mixed { + code: Vec, + abilities: AbilitySet, + param_types: Vec, + return_type: Option, +} + +fuzz_target!(|mix: Mixed| { + let mut module = empty_module(); + module.version = 5; + + module.struct_handles.push(StructHandle { + module: ModuleHandleIndex(0), + name: IdentifierIndex(1), + abilities: mix.abilities, + type_parameters: vec![], + }); + + let fun_handle = FunctionHandle { + module: ModuleHandleIndex(0), + name: IdentifierIndex(2), + parameters: SignatureIndex(0), + return_: SignatureIndex(1), + type_parameters: vec![], + }; + + module.function_handles.push(fun_handle); + + module.signatures.pop(); + module.signatures.push(Signature(mix.param_types)); + module.signatures.push(Signature( + mix.return_type.map(|s| vec![s]).unwrap_or_default(), + )); + module + .signatures + .push(Signature(vec![Address, Bool, Address])); + + module.identifiers.extend( + vec![ + ident_str!("zf_hello_world").into(), + ident_str!("awldFnU18mlDKQfh6qNfBGx8X").into(), + ident_str!("aQPwJNHyAHpvJ").into(), + ident_str!("aT7ZphKTrKcYCwCebJySrmrKlckmnL5").into(), + ident_str!("arYpsFa2fvrpPJ").into(), + ] + .into_iter(), + ); + module.address_identifiers.push(AccountAddress::random()); + + module.constant_pool.push(Constant { + type_: Address, + data: AccountAddress::ZERO.into_bytes().to_vec(), + }); + + module.struct_defs.push(StructDefinition { + struct_handle: StructHandleIndex(0), + field_information: StructFieldInformation::Declared(vec![FieldDefinition { + name: IdentifierIndex::new(3), + signature: TypeSignature(Address), + }]), + }); + + let code_unit = CodeUnit { + code: mix.code, + locals: SignatureIndex(0), + }; + + let fun_def = FunctionDefinition { + code: Some(code_unit), + function: FunctionHandleIndex(0), + visibility: Visibility::Public, + is_entry: false, + acquires_global_resources: vec![], + }; + + module.function_defs.push(fun_def); + let _ = move_bytecode_verifier::verify_module(&module); +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/execute_entry_function.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/execute_entry_function.rs new file mode 100644 index 0000000000000..ab979771750c6 --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/execute_entry_function.rs @@ -0,0 +1,54 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use arbitrary::Arbitrary; +use libfuzzer_sys::fuzz_target; +use move_binary_format::file_format::CompiledModule; +use move_core_types::{ + account_address::AccountAddress, identifier::IdentStr, language_storage::TypeTag, +}; +use move_vm_runtime::move_vm::MoveVM; +use move_vm_test_utils::{gas_schedule::GasStatus, InMemoryStorage}; + +#[derive(Arbitrary, Debug)] +struct FuzzData { + cm: CompiledModule, + ident: String, + ty_arg: Vec, + args: Vec>, + account_address: AccountAddress, +} + +fuzz_target!(|fuzz_data: FuzzData| { + let mut cm_serialized = Vec::with_capacity(65536); + if fuzz_data.cm.serialize(&mut cm_serialized).is_err() { + return; + } + + if move_bytecode_verifier::verify_module(&fuzz_data.cm).is_err() { + return; + } + + let vm = MoveVM::new(vec![]).unwrap(); + let storage = InMemoryStorage::new(); + let mut session = vm.new_session(&storage); + let mut gas = GasStatus::new_unmetered(); + + if session + .publish_module(cm_serialized, fuzz_data.account_address, &mut gas) + .is_err() + { + return; + } + + let ident = + IdentStr::new(fuzz_data.ident.as_str()).unwrap_or_else(|_| IdentStr::new("f").unwrap()); + let _ = session.execute_entry_function( + &fuzz_data.cm.self_id(), + ident, + fuzz_data.ty_arg, + fuzz_data.args, + &mut gas, + ); +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/move_value_decorate.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/move_value_decorate.rs new file mode 100644 index 0000000000000..b91192b28cf3d --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/move_value_decorate.rs @@ -0,0 +1,35 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use arbitrary::Arbitrary; +use libfuzzer_sys::fuzz_target; +use move_core_types::value::{MoveTypeLayout, MoveValue}; + +mod utils; + +#[derive(Arbitrary, Debug)] +struct FuzzData { + move_value: MoveValue, + layout: MoveTypeLayout, +} + +fuzz_target!(|fuzz_data: FuzzData| { + if !utils::is_valid_layout(&fuzz_data.layout) { + return; + } + + // Undecorate value + let move_value = fuzz_data.move_value.clone(); + let undecorated_move_value = move_value.undecorate(); + + // Decorate value + let move_value = fuzz_data.move_value.clone(); + let decorated_move_value = move_value.decorate(&fuzz_data.layout); + + // Undecorate decorated value + decorated_move_value.undecorate(); + + // Decorate undecorated value + undecorated_move_value.decorate(&fuzz_data.layout); +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/move_value_deserialize.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/move_value_deserialize.rs new file mode 100644 index 0000000000000..3c130d708e3bd --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/move_value_deserialize.rs @@ -0,0 +1,22 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use arbitrary::Arbitrary; +use libfuzzer_sys::fuzz_target; +use move_core_types::value::{MoveTypeLayout, MoveValue}; + +mod utils; + +#[derive(Arbitrary, Debug)] +struct FuzzData { + data: Vec, + layout: MoveTypeLayout, +} + +fuzz_target!(|fuzz_data: FuzzData| { + if fuzz_data.data.is_empty() || !utils::is_valid_layout(&fuzz_data.layout) { + return; + } + let _ = MoveValue::simple_deserialize(&fuzz_data.data, &fuzz_data.layout); +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs new file mode 100644 index 0000000000000..370261b0da715 --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs @@ -0,0 +1,23 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use move_core_types::value::{MoveStructLayout, MoveTypeLayout}; + +pub(crate) fn is_valid_layout(layout: &MoveTypeLayout) -> bool { + use MoveTypeLayout as L; + + match layout { + L::Bool | L::U8 | L::U16 | L::U32 | L::U64 | L::U128 | L::U256 | L::Address | L::Signer => { + true + }, + L::Vector(layout) => is_valid_layout(layout), + L::Struct(struct_layout) => { + if !matches!(struct_layout, MoveStructLayout::Runtime(_)) + || struct_layout.fields().is_empty() + { + return false; + } + struct_layout.fields().iter().all(is_valid_layout) + }, + } +} diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs new file mode 100644 index 0000000000000..adf71d8435fe3 --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs @@ -0,0 +1,23 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use arbitrary::Arbitrary; +use libfuzzer_sys::fuzz_target; +use move_core_types::value::MoveTypeLayout; +use move_vm_types::values::Value; + +mod utils; + +#[derive(Arbitrary, Debug)] +struct FuzzData { + data: Vec, + layout: MoveTypeLayout, +} + +fuzz_target!(|fuzz_data: FuzzData| { + if fuzz_data.data.is_empty() || !utils::is_valid_layout(&fuzz_data.layout) { + return; + } + let _ = Value::simple_deserialize(&fuzz_data.data, &fuzz_data.layout); +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/signed_transaction_deserialize.rs b/testsuite/fuzzer/fuzz/fuzz_targets/signed_transaction_deserialize.rs new file mode 100644 index 0000000000000..fd2cfc34edbf6 --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/signed_transaction_deserialize.rs @@ -0,0 +1,16 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![no_main] +use aptos_types::transaction::SignedTransaction; +use arbitrary::Arbitrary; +use libfuzzer_sys::fuzz_target; + +#[derive(Arbitrary, Debug)] +struct FuzzData { + data: Vec, +} + +fuzz_target!(|fuzz_data: FuzzData| { + let _ = bcs::from_bytes::(&fuzz_data.data); +}); diff --git a/testsuite/fuzzer/google-oss-fuzz/Dockerfile b/testsuite/fuzzer/google-oss-fuzz/Dockerfile new file mode 100644 index 0000000000000..8d49e8a2663a7 --- /dev/null +++ b/testsuite/fuzzer/google-oss-fuzz/Dockerfile @@ -0,0 +1,5 @@ +FROM gcr.io/oss-fuzz-base/base-builder-rust +RUN apt-get update && apt-get install -y make autoconf automake libclang-dev libtool pkg-config +RUN git clone --depth=1 https://github.com/aptos-labs/aptos-core.git +WORKDIR aptos-core +COPY build.sh $SRC diff --git a/testsuite/fuzzer/google-oss-fuzz/build.sh b/testsuite/fuzzer/google-oss-fuzz/build.sh new file mode 100644 index 0000000000000..eb6962ffae4e0 --- /dev/null +++ b/testsuite/fuzzer/google-oss-fuzz/build.sh @@ -0,0 +1,11 @@ +#!/bin/bash -eu + +NIGHTLY_VERSION="nightly-2023-01-01" # bitvec does not compile with latest nightly + +rustup install $NIGHTLY_VERSION +cd testsuite/fuzzer + +RUSTFLAGS="$RUSTFLAGS --cfg tokio_unstable" cargo +$NIGHTLY_VERSION fuzz build -O -a +for fuzzer in $(cat fuzz/Cargo.toml | grep "name = " | grep -v "fuzzer-fuzz" | cut -d'"' -f2); do + cp ../../target/x86_64-unknown-linux-gnu/release/$fuzzer $OUT/ +done \ No newline at end of file diff --git a/testsuite/fuzzer/google-oss-fuzz/project.yaml b/testsuite/fuzzer/google-oss-fuzz/project.yaml new file mode 100644 index 0000000000000..660410573bbcf --- /dev/null +++ b/testsuite/fuzzer/google-oss-fuzz/project.yaml @@ -0,0 +1,12 @@ +homepage: "https://aptos.dev" +language: rust +primary_contact: "gerardo@aptoslabs.com" +main_repo: "https://github.com/aptos-labs/aptos-core" +auto_ccs: + - "davidiw@aptoslabs.com" + - "security@aptoslabs.com" + - "wg@aptoslabs.com" +sanitizers: + - address +fuzzing_engines: + - libfuzzer \ No newline at end of file diff --git a/testsuite/fuzzer/src/main.rs b/testsuite/fuzzer/src/main.rs new file mode 100644 index 0000000000000..8b4b7415a1a1b --- /dev/null +++ b/testsuite/fuzzer/src/main.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation + +fn main() { + println!("Hello, world!"); +} diff --git a/testsuite/fuzzer/test-fuzzers.sh b/testsuite/fuzzer/test-fuzzers.sh new file mode 100755 index 0000000000000..b586988fccb75 --- /dev/null +++ b/testsuite/fuzzer/test-fuzzers.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +export RUSTFLAGS="${RUSTFLAGS} --cfg tokio_unstable" +export RUNS="1000" + +for fuzzer in $(cargo +nightly fuzz list); do + echo "[info] compiling and running ${fuzzer} ${RUNS} times" + cargo +nightly fuzz run -O -a $fuzzer -- -runs=$RUNS + if [ "$?" -ne "0" ]; then + echo "[error] failed to run ${fuzzer}" + return -1 + else + echo "[ok] ${fuzzer}" + fi +done \ No newline at end of file diff --git a/testsuite/indexer_grpc_local.py b/testsuite/indexer_grpc_local.py new file mode 100755 index 0000000000000..6dd30b04be309 --- /dev/null +++ b/testsuite/indexer_grpc_local.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python3 + +from enum import Enum +import json +import platform +import time +import os +import argparse +import logging +from dataclasses import dataclass +from typing import List +from test_framework.logging import init_logging, log +from test_framework.shell import Shell, LocalShell +from test_framework.reqwest import SimpleHttpClient, HttpClient + +GRPCURL_PATH = os.environ.get("GRPCURL_PATH", "grpcurl") + +INDEXER_GRPC_DOCKER_COMPOSE_FILE = "docker/compose/indexer-grpc/docker-compose.yaml" +VALIDATOR_TESTNET_DOCKER_COMPOSE_FILE = ( + "docker/compose/validator-testnet/docker-compose.yaml" +) + +INDEXER_FULLNODE_REST_API_URL = "http://localhost:8080" +INDEXER_DATA_SERVICE_READINESS_URL = "http://localhost:18084/readiness" +GRPC_INDEXER_FULLNODE_URL = "localhost:50051" +GRPC_DATA_SERVICE_URL = "localhost:50052" + +SHARED_DOCKER_VOLUME_NAMES = ["aptos-shared", "indexer-grpc-file-store"] + +WAIT_TESTNET_START_TIMEOUT_SECS = 60 +WAIT_INDEXER_GRPC_START_TIMEOUT_SECS = 60 +GRPC_PROGRESS_THRESHOLD_SECS = 10 + + +@dataclass +class SystemContext: + shell: Shell + http_client: HttpClient + + +class DockerComposeAction(Enum): + UP = "up" + DOWN = "down" + + +class Subcommand(Enum): + START = "start" + STOP = "stop" + WIPE = "wipe" + + +class StartSubcommand(Enum): + NO_INDEXER_GRPC = "no-indexer-grpc" + + +class DockerComposeError(Exception): + def __init__(self, message="Docker Compose Error"): + self.message = message + super().__init__(self.message) + + +def run_docker_compose( + shell: Shell, + compose_file_path: str, + compose_action: DockerComposeAction, + extra_args: List[str] = [], +) -> None: + log.info(f"Running docker-compose {compose_action.value} on {compose_file_path}") + try: + shell.run( + [ + "docker-compose", + "-f", + compose_file_path, + compose_action.value, + ] + + (["--detach"] if compose_action == DockerComposeAction.UP else []) + + extra_args, + stream_output=True, + ) + except Exception as e: + if "No such file or directory" in str(e): + raise DockerComposeError("Failed to find the compose file") from e + else: + raise e + + +def start_single_validator_testnet(shell: Shell) -> None: + run_docker_compose( + shell, VALIDATOR_TESTNET_DOCKER_COMPOSE_FILE, DockerComposeAction.UP + ) + + +def start_indexer_grpc(shell: Shell, redis_only: bool = False) -> None: + extra_indexer_grpc_docker_args = [] + if redis_only: + extra_indexer_grpc_docker_args = [ + "--scale", + "indexer-grpc-cache-worker=0", + "--scale", + "indexer-grpc-file-store=0", + "--scale", + "indexer-grpc-data-service=0", + ] + + run_docker_compose( + shell, + INDEXER_GRPC_DOCKER_COMPOSE_FILE, + DockerComposeAction.UP, + extra_args=extra_indexer_grpc_docker_args, + ) + + +def stop_single_validator_testnet(shell: Shell) -> None: + run_docker_compose( + shell, VALIDATOR_TESTNET_DOCKER_COMPOSE_FILE, DockerComposeAction.DOWN + ) + + +def stop_indexer_grpc(shell: Shell) -> None: + run_docker_compose( + shell, INDEXER_GRPC_DOCKER_COMPOSE_FILE, DockerComposeAction.DOWN + ) + + +def wait_for_testnet_progress(client: HttpClient) -> int: + """Wait for the testnet to start and return the latest version""" + r = None + ledger_version_key = "ledger_version" + for _ in range(WAIT_TESTNET_START_TIMEOUT_SECS): + try: + r = client.get(INDEXER_FULLNODE_REST_API_URL + "/v1") + if r.status_code == 200: + response = json.loads(r.text) + log.debug(f"LedgerInfo: {response}") + version = int(response[ledger_version_key]) + if version > 0: # we're making some progress + return version + except KeyError as e: + log.info(f"Key not found: {e}") + except Exception as e: + log.info(f"Exception: {e}") + time.sleep(5) + + raise Exception("Testnet failed to start within timeout period") + + +def wait_for_indexer_grpc_progress(shell: Shell, client: HttpClient) -> None: + """Wait for the indexer grpc to start and try streaming from it""" + log.info( + f"Waiting for indexer grpc to start for {WAIT_INDEXER_GRPC_START_TIMEOUT_SECS}s" + ) + indexer_grpc_healthcheck_up = False + retry_secs = 5 + for _ in range(WAIT_INDEXER_GRPC_START_TIMEOUT_SECS // retry_secs): + try: + r = client.get(INDEXER_DATA_SERVICE_READINESS_URL) + if r.status_code == 200: + log.info("Indexer grpc data service is up") + indexer_grpc_healthcheck_up = True + break + except Exception as e: + log.info(f"Exception: {e}") + time.sleep(retry_secs) + + if not indexer_grpc_healthcheck_up: + raise Exception("Indexer grpc failed to start within timeout period") + + indexer_grpc_data_service_up = False + log.info( + f"Attempting to stream from indexer grpc for {GRPC_PROGRESS_THRESHOLD_SECS}s" + ) + res = None + for _ in range(GRPC_PROGRESS_THRESHOLD_SECS // retry_secs): + res = shell.run( + [ + "timeout", + f"{GRPC_PROGRESS_THRESHOLD_SECS}s", + GRPCURL_PATH, + "-max-msg-sz", + "10000000", + "-d", + '{ "starting_version": 0 }', + "-H", + "x-aptos-data-authorization:dummy_token", + "-import-path", + "crates/aptos-protos/proto", + "-proto", + "aptos/indexer/v1/raw_data.proto", + "-plaintext", + GRPC_DATA_SERVICE_URL, + "aptos.indexer.v1.RawData/GetTransactions", + ], + ) + if ( + res.exit_code == 124 + ): # timeout exits with 124 if it reaches the end of the timeout + indexer_grpc_data_service_up = True + break + time.sleep(retry_secs) + + if not indexer_grpc_data_service_up: + if res: + log.info(f"Stream output: {res.unwrap().decode()}") + raise Exception( + "Stream interrupted before reaching the end of the timeout. There might be something wrong" + ) + log.info("Stream finished successfully") + + +def start(context: SystemContext, no_indexer_grpc: bool = False) -> None: + start_single_validator_testnet(context.shell) + + # wait for progress + latest_version = wait_for_testnet_progress(context.http_client) + log.info(f"TESTNET STARTED: latest version @ {latest_version}") + + start_indexer_grpc(context.shell, redis_only=no_indexer_grpc) + + if not no_indexer_grpc: + wait_for_indexer_grpc_progress(context.shell, context.http_client) + + +def stop(context: SystemContext) -> None: + stop_indexer_grpc(context.shell) + stop_single_validator_testnet(context.shell) + + +def wipe(context: SystemContext) -> None: + stop(context) # call stop() just for sanity + context.shell.run(["docker", "volume", "rm"] + SHARED_DOCKER_VOLUME_NAMES) + + +def main() -> None: + # set envs based on platform, if it's not already overriden + if not os.environ.get("REDIS_IMAGE_REPO"): + if platform.system() == "Darwin": + os.environ["REDIS_IMAGE_REPO"] = "arm64v8/redis" + + parser = argparse.ArgumentParser( + prog="Indexer GRPC Local", + description="Spins up an indexer GRPC locally using a single validator testnet", + ) + parser.add_argument("--verbose", "-v", action="store_true") + subparser = parser.add_subparsers(dest="subcommand", required=True) + start_parser = subparser.add_parser( + Subcommand.START.value, help="Start the indexer GRPC setup" + ) + start_parser.add_argument( + f"--{StartSubcommand.NO_INDEXER_GRPC.value}", + dest="no_indexer_grpc", + action="store_true", + ) + subparser.add_parser(Subcommand.STOP.value, help="Stop the indexer GRPC setup") + subparser.add_parser(Subcommand.WIPE.value, help="Completely wipe the storage") + args = parser.parse_args() + # init logging + init_logging(logger=log, print_metadata=True) + if args.verbose: + log.setLevel(logging.DEBUG) + + log.debug(f"args: {args}") + + context = SystemContext( + shell=LocalShell(), + http_client=SimpleHttpClient(), + ) + + subcommand = Subcommand(args.subcommand) + + if subcommand == Subcommand.START: + start( + context, + args.no_indexer_grpc, + ) + elif subcommand == Subcommand.STOP: + stop(context) + log.info("To wipe all data, run: $ ./testsuite/indexer_grpc_local.py wipe") + log.info("To start again, run: $ ./testsuite/indexer_grpc_local.py start") + elif subcommand == Subcommand.WIPE: + wipe(context) + + +if __name__ == "__main__": + main() diff --git a/testsuite/indexer_grpc_local_test.py b/testsuite/indexer_grpc_local_test.py new file mode 100755 index 0000000000000..ead22305b24df --- /dev/null +++ b/testsuite/indexer_grpc_local_test.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +import unittest +from indexer_grpc_local import * +from test_framework.shell import FakeCommand, SpyShell, RunResult + + +class TestIndexerGrpcLocal(unittest.TestCase): + def test_run_docker_compose(self): + docker_compose_file = "docker-compose.yaml" + extra_args = [ + "--scale", + "banana=0", + "--scale", + "apple=0", + "--scale", + "orange=0", + ] + extra_args_str = " ".join(extra_args) + shell = SpyShell( + [ + FakeCommand( + f"docker-compose -f {docker_compose_file} up --detach", + RunResult(0, b""), + ), + FakeCommand( + f"docker-compose -f {docker_compose_file} down", + RunResult(0, b""), + ), + FakeCommand( + f"docker-compose -f {docker_compose_file} up --detach {extra_args_str}", + RunResult(0, b""), + ), + ] + ) + run_docker_compose(shell, docker_compose_file, DockerComposeAction.UP) + run_docker_compose(shell, docker_compose_file, DockerComposeAction.DOWN) + run_docker_compose( + shell, + docker_compose_file, + DockerComposeAction.UP, + extra_args=extra_args, + ) + shell.assert_commands(self) diff --git a/testsuite/module-publish/src/main.rs b/testsuite/module-publish/src/main.rs index f23ae4fb9a92f..0503e8e0d192d 100644 --- a/testsuite/module-publish/src/main.rs +++ b/testsuite/module-publish/src/main.rs @@ -23,7 +23,7 @@ fn additional_packages() -> Vec<(&'static str, &'static str)> { // Pairs of (package_name, package_path) vec![( "ambassador_token", - "../../aptos-move/move-examples/token_objects/ambassador/move", + "../../aptos-move/move-examples/token_objects/ambassador", )] } diff --git a/testsuite/parallel_execution_performance.py b/testsuite/parallel_execution_performance.py index df627af4c808f..3db98347f5214 100755 --- a/testsuite/parallel_execution_performance.py +++ b/testsuite/parallel_execution_performance.py @@ -11,7 +11,7 @@ THRESHOLDS = { "1k_8": 11000, "1k_16": 13000, - "1k_32": 15000, + # "1k_32": 13000, "10k_8": 23000, "10k_16": 37000, "10k_32": 48000, @@ -22,14 +22,14 @@ SPEEDUPS = { "1k_8": 3, - "1k_16": 4, - "1k_32": 4, - "10k_8": 5, - "10k_16": 8, - "10k_32": 11, - "50k_8": 6, - "50k_16": 9, - "50k_32": 12, + "1k_16": 3, + # "1k_32": 4, + "10k_8": 4, + "10k_16": 6, + "10k_32": 9, + "50k_8": 3, + "50k_16": 5, + "50k_32": 8, } THRESHOLDS_NOISE = 0.20 @@ -58,6 +58,8 @@ # print(output) for i, block_size in enumerate(BLOCK_SIZES): + if threads == 32 and block_size == "1k": + continue tps_index = i * 2 speedup_index = i * 2 + 1 key = f"{block_size}_{threads}" @@ -96,6 +98,8 @@ for block_size in BLOCK_SIZES: for threads in THREADS: + if threads == 32 and block_size == "1k": + continue key = f"{block_size}_{threads}" print( f"Average Parallel TPS with {threads} threads for {block_size} block: TPS {tps_set[key]}, Threshold TPS: {THRESHOLDS[key]}, Speedup: {speedups_set[key]}x, Speedup Threshold: {SPEEDUPS[key]}x" diff --git a/testsuite/pyproject.toml b/testsuite/pyproject.toml index 627e132080fe2..038a495c60f93 100644 --- a/testsuite/pyproject.toml +++ b/testsuite/pyproject.toml @@ -23,3 +23,4 @@ build-backend = "poetry.core.masonry.api" [tool.pyright] reportUndefinedVariable = "none" +typeCheckingMode = "basic" # TODO(rustielin): eventually make this strict diff --git a/testsuite/sequential_execution_performance.py b/testsuite/sequential_execution_performance.py index 4f39d7e38786a..b1a039d0efed7 100755 --- a/testsuite/sequential_execution_performance.py +++ b/testsuite/sequential_execution_performance.py @@ -4,7 +4,7 @@ # Set the tps threshold for block size 1k, 10k and 50k BLOCK_SIZES = ["1k", "10k", "50k"] -THRESHOLDS = {"1k": 3000, "10k": 4000, "50k": 7000} +THRESHOLDS = {"1k": 3500, "10k": 5200, "50k": 12600} THRESHOLD_NOISE = 0.1 # Run the VM sequential execution with performance optimizations enabled diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index 301af0a791232..62a354044c424 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -21,15 +21,15 @@ ("coin-transfer", False, 1): (12600.0, True), ("coin-transfer", True, 1): (22100.0, True), ("account-generation", False, 1): (11000.0, True), - ("account-generation", True, 1): (17600.0, True), + ("account-generation", True, 1): (20000.0, True), # changed to not use account_pool. either recalibrate or add here to use account pool. - ("account-resource32-b", False, 1): (13000.0, False), + ("account-resource32-b", False, 1): (15000.0, False), ("modify-global-resource", False, 1): (3700.0, True), ("modify-global-resource", False, 10): (10800.0, True), # seems to have changed, disabling as land_blocking, until recalibrated ("publish-package", False, 1): (159.0, False), ("batch100-transfer", False, 1): (350, True), - ("batch100-transfer", True, 1): (553, True), + ("batch100-transfer", True, 1): (630, True), ("token-v1ft-mint-and-transfer", False, 1): (1650.0, True), ("token-v1ft-mint-and-transfer", False, 20): (7100.0, True), ("token-v1nft-mint-and-transfer-sequential", False, 1): (1100.0, True), @@ -222,7 +222,7 @@ def print_table( warnings = [] with tempfile.TemporaryDirectory() as tmpdirname: - create_db_command = f"cargo run {BUILD_FLAG} -- --block-size {BLOCK_SIZE} --concurrency-level {CONCURRENCY_LEVEL} --use-state-kv-db --use-sharded-state-merkle-db create-db --data-dir {tmpdirname}/db --num-accounts {NUM_ACCOUNTS}" + create_db_command = f"cargo run {BUILD_FLAG} -- --block-size {BLOCK_SIZE} --concurrency-level {CONCURRENCY_LEVEL} --split-ledger-db --use-sharded-state-merkle-db create-db --data-dir {tmpdirname}/db --num-accounts {NUM_ACCOUNTS}" output = execute_command(create_db_command) results = [] @@ -237,7 +237,7 @@ def print_table( executor_type = "native" if use_native_executor else "VM" use_native_executor_str = "--use-native-executor" if use_native_executor else "" - common_command_suffix = f"{use_native_executor_str} --generate-then-execute --transactions-per-sender 1 --block-size {cur_block_size} --use-state-kv-db --use-sharded-state-merkle-db run-executor --transaction-type {transaction_type} --module-working-set-size {module_working_set_size} --main-signer-accounts {MAIN_SIGNER_ACCOUNTS} --additional-dst-pool-accounts {ADDITIONAL_DST_POOL_ACCOUNTS} --data-dir {tmpdirname}/db --checkpoint-dir {tmpdirname}/cp" + common_command_suffix = f"{use_native_executor_str} --generate-then-execute --transactions-per-sender 1 --block-size {cur_block_size} --split-ledger-db --use-sharded-state-merkle-db run-executor --transaction-type {transaction_type} --module-working-set-size {module_working_set_size} --main-signer-accounts {MAIN_SIGNER_ACCOUNTS} --additional-dst-pool-accounts {ADDITIONAL_DST_POOL_ACCOUNTS} --data-dir {tmpdirname}/db --checkpoint-dir {tmpdirname}/cp" concurrency_level_results = {} diff --git a/testsuite/smoke-test/src/full_nodes.rs b/testsuite/smoke-test/src/full_nodes.rs index bf4daa01549b9..7abff7eafdf0f 100644 --- a/testsuite/smoke-test/src/full_nodes.rs +++ b/testsuite/smoke-test/src/full_nodes.rs @@ -29,6 +29,7 @@ async fn test_full_node_basic_flow() { let version = swarm.versions().max().unwrap(); let pfn_peer_id = swarm .add_full_node(&version, NodeConfig::get_default_pfn_config()) + .await .unwrap(); for fullnode in swarm.full_nodes_mut() { fullnode @@ -213,7 +214,7 @@ async fn test_private_full_node() { NetworkId::Public, PeerRole::PreferredUpstream, ); - let private = swarm.add_full_node(&version, private_config).unwrap(); + let private = swarm.add_full_node(&version, private_config).await.unwrap(); // And connect the user to the private swarm add_node_to_seeds( @@ -222,7 +223,7 @@ async fn test_private_full_node() { NetworkId::Public, PeerRole::PreferredUpstream, ); - let user = swarm.add_full_node(&version, user_config).unwrap(); + let user = swarm.add_full_node(&version, user_config).await.unwrap(); swarm .wait_for_connectivity(Instant::now() + Duration::from_secs(MAX_CONNECTIVITY_WAIT_SECS)) diff --git a/testsuite/smoke-test/src/fullnode.rs b/testsuite/smoke-test/src/fullnode.rs index 0895a6e24bcbb..fd7828b5db477 100644 --- a/testsuite/smoke-test/src/fullnode.rs +++ b/testsuite/smoke-test/src/fullnode.rs @@ -20,6 +20,7 @@ async fn test_indexer() { let version = swarm.versions().max().unwrap(); let fullnode_peer_id = swarm .add_full_node(&version, NodeConfig::get_default_pfn_config()) + .await .unwrap(); let validator_peer_id = swarm.validators().next().unwrap().peer_id(); let _vfn_peer_id = swarm diff --git a/testsuite/smoke-test/src/genesis.rs b/testsuite/smoke-test/src/genesis.rs index 0094584e89613..75ae619ace1bd 100644 --- a/testsuite/smoke-test/src/genesis.rs +++ b/testsuite/smoke-test/src/genesis.rs @@ -233,7 +233,7 @@ async fn test_genesis_transaction_flow() { (response.inner().epoch, response.inner().version) }; - let (backup_path, snapshot_ver) = db_backup( + let (backup_path, _) = db_backup( env.validators() .next() .unwrap() @@ -271,8 +271,8 @@ async fn test_genesis_transaction_flow() { backup_path.path(), db_dir.as_path(), &[waypoint], - node.config().storage.rocksdb_configs.use_state_kv_db, - Some(snapshot_ver), + node.config().storage.rocksdb_configs.split_ledger_db, + None, ); node.start().unwrap(); diff --git a/testsuite/smoke-test/src/network.rs b/testsuite/smoke-test/src/network.rs index 7b0fcf4941430..ed945499f95de 100644 --- a/testsuite/smoke-test/src/network.rs +++ b/testsuite/smoke-test/src/network.rs @@ -75,6 +75,7 @@ async fn test_connection_limiting() { peer_set, ), ) + .await .unwrap(); swarm .fullnode_mut(pfn_peer_id) @@ -115,6 +116,7 @@ async fn test_connection_limiting() { peer_set, ), ) + .await .unwrap(); // This node should fail to connect @@ -155,7 +157,10 @@ async fn test_rest_discovery() { // Start a new node that should connect to the previous node only via REST // The startup wait time should check if it connects successfully - swarm.add_full_node(&version, full_node_config).unwrap(); + swarm + .add_full_node(&version, full_node_config) + .await + .unwrap(); } // Currently this test seems flaky: https://github.com/aptos-labs/aptos-core/issues/670 diff --git a/testsuite/smoke-test/src/rosetta.rs b/testsuite/smoke-test/src/rosetta.rs index 8888b3ebfc4a8..9cf5cf127de0d 100644 --- a/testsuite/smoke-test/src/rosetta.rs +++ b/testsuite/smoke-test/src/rosetta.rs @@ -1614,6 +1614,7 @@ async fn parse_operations( { let actual_pool_address: AccountAddress = bcs::from_bytes(payload.args().first().unwrap()).unwrap(); + let pool_address = operation .metadata .as_ref() @@ -1646,6 +1647,60 @@ async fn parse_operations( panic!("Not a user transaction"); } }, + OperationType::WithdrawUndelegatedFunds => { + if actual_successful { + assert_eq!( + OperationStatusType::Success, + status, + "Successful transaction should have successful distribute operation" + ); + } else { + assert_eq!( + OperationStatusType::Failure, + status, + "Failed transaction should have failed distribute operation" + ); + } + if let aptos_types::transaction::Transaction::UserTransaction(ref txn) = + actual_txn.transaction + { + if let aptos_types::transaction::TransactionPayload::EntryFunction( + ref payload, + ) = txn.payload() + { + let actual_pool_address: AccountAddress = + bcs::from_bytes(payload.args().first().unwrap()).unwrap(); + + let pool_address = operation + .metadata + .as_ref() + .unwrap() + .pool_address + .as_ref() + .unwrap() + .account_address() + .unwrap(); + + assert_eq!(actual_pool_address, pool_address); + + let actual_amount: u64 = + bcs::from_bytes(payload.args().get(1).unwrap()).unwrap(); + + let amount = operation + .metadata + .as_ref() + .unwrap() + .amount + .as_ref() + .unwrap() + .0; + + assert_eq!(actual_amount, amount); + } else { + panic!("Not an entry function"); + } + } + }, } } @@ -2300,6 +2355,39 @@ async fn unlock_delegated_stake_and_wait( .map_err(ErrorWrapper::AfterSubmission) } +async fn withdraw_undelegated_stake_and_wait( + rosetta_client: &RosettaClient, + rest_client: &aptos_rest_client::Client, + network_identifier: &NetworkIdentifier, + sender_key: &Ed25519PrivateKey, + pool_address: AccountAddress, + amount: Option, + txn_expiry_duration: Duration, + sequence_number: Option, + max_gas: Option, + gas_unit_price: Option, +) -> Result, ErrorWrapper> { + let expiry_time = expiry_time(txn_expiry_duration); + let txn_hash = rosetta_client + .withdraw_undelegated_stake( + network_identifier, + sender_key, + pool_address, + amount, + expiry_time.as_secs(), + sequence_number, + max_gas, + gas_unit_price, + ) + .await + .map_err(ErrorWrapper::BeforeSubmission)? + .hash; + + wait_for_transaction(rest_client, expiry_time, txn_hash) + .await + .map_err(ErrorWrapper::AfterSubmission) +} + #[tokio::test] async fn test_delegation_pool_operations() { const NUM_TXNS_PER_PAGE: u16 = 2; @@ -2386,7 +2474,7 @@ async fn test_delegation_pool_operations() { .await .expect("Should successfully add delegated stake"); - let final_txn = unlock_delegated_stake_and_wait( + unlock_delegated_stake_and_wait( &rosetta_client, &rest_client, &network_identifier, @@ -2401,6 +2489,21 @@ async fn test_delegation_pool_operations() { .await .expect("Should successfully unlock delegated stake"); + let final_txn = withdraw_undelegated_stake_and_wait( + &rosetta_client, + &rest_client, + &network_identifier, + delegate_account_private_key, + pool_address, + Some(staked_amount), + Duration::from_secs(5), + None, + None, + None, + ) + .await + .expect("Should successfully withdraw undelegated"); + let final_block_to_check = rest_client .get_block_by_version(final_txn.info.version.0, false) .await diff --git a/testsuite/smoke-test/src/storage.rs b/testsuite/smoke-test/src/storage.rs index 8c1cfc99d684c..a40490037e639 100644 --- a/testsuite/smoke-test/src/storage.rs +++ b/testsuite/smoke-test/src/storage.rs @@ -131,7 +131,7 @@ async fn test_db_restore() { backup_path.path(), db_dir.as_path(), &[], - node0_config.storage.rocksdb_configs.use_state_kv_db, + node0_config.storage.rocksdb_configs.split_ledger_db, None, ); @@ -408,7 +408,7 @@ pub(crate) fn db_restore( backup_path: &Path, db_path: &Path, trusted_waypoints: &[Waypoint], - use_state_kv_db: bool, + split_ledger_db: bool, target_verion: Option, /* target version should be same as epoch ending version to start a node */ ) { let now = Instant::now(); @@ -424,8 +424,8 @@ pub(crate) fn db_restore( cmd.arg(&w.to_string()); }); - if use_state_kv_db { - cmd.arg("--use-state-kv-db"); + if split_ledger_db { + cmd.arg("--split-ledger-db"); } if let Some(version) = target_verion { cmd.arg("--target-version"); diff --git a/testsuite/test_framework/cluster.py b/testsuite/test_framework/cluster.py index 9cfa266f8e180..e7e8ac7bc7db8 100644 --- a/testsuite/test_framework/cluster.py +++ b/testsuite/test_framework/cluster.py @@ -52,6 +52,7 @@ class ForgeCluster: cloud: Cloud = Cloud.AWS region: Optional[str] = "us-west-2" kubeconf: Optional[str] = None + is_multiregion: bool = False def __repr__(self) -> str: return f"{self.cloud}/{self.region}/{self.name}" @@ -60,6 +61,10 @@ def set_kubeconf(self, kubeconf: str) -> ForgeCluster: self.kubeconf = kubeconf return self + @property + def kubectl_create_context_arg(self) -> List[str]: + return ["--context=karmada-apiserver"] if self.is_multiregion else [] + async def write(self, shell: Shell) -> None: assert self.kubeconf is not None, "kubeconf must be set" await self.write_cluster_config(shell, self.name, self.kubeconf) @@ -148,7 +153,7 @@ def assert_auth(self, shell: Shell) -> None: async def write_cluster_config( self, shell: Shell, cluster_name: str, temp: str ) -> None: - if cluster_name == "multiregion": + if self.is_multiregion: cmd = [ "gcloud", "secrets", diff --git a/testsuite/test_framework/reqwest.py b/testsuite/test_framework/reqwest.py new file mode 100644 index 0000000000000..91759b6009281 --- /dev/null +++ b/testsuite/test_framework/reqwest.py @@ -0,0 +1,19 @@ +# A wrapper around the requests library to make simple HTTP requests. +# Like reqwest in Rust. + +import requests +import sys +from requests import Response +import logging + + +class HttpClient: + def get(self, url: str, headers: dict = {}) -> requests.Response: + raise NotImplementedError() + + +class SimpleHttpClient(HttpClient): + logger: logging.Logger = logging.getLogger("") + + def get(self, url: str, headers: dict = {}) -> requests.Response: + return requests.get(url, headers=headers) diff --git a/testsuite/testcases/src/compatibility_test.rs b/testsuite/testcases/src/compatibility_test.rs index 3c2afcc5d116b..d27ff4392b0f8 100644 --- a/testsuite/testcases/src/compatibility_test.rs +++ b/testsuite/testcases/src/compatibility_test.rs @@ -17,7 +17,7 @@ impl Test for SimpleValidatorUpgrade { } impl NetworkTest for SimpleValidatorUpgrade { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let runtime = Runtime::new()?; // Get the different versions we're testing with @@ -61,11 +61,8 @@ impl NetworkTest for SimpleValidatorUpgrade { // Generate some traffic let txn_stat = generate_traffic(ctx, &all_validators, duration)?; - ctx.report.report_txn_stats( - format!("{}::liveness-check", self.name()), - &txn_stat, - duration, - ); + ctx.report + .report_txn_stats(format!("{}::liveness-check", self.name()), &txn_stat); // Update the first Validator let msg = format!( @@ -81,7 +78,6 @@ impl NetworkTest for SimpleValidatorUpgrade { ctx.report.report_txn_stats( format!("{}::single-validator-upgrade", self.name()), &txn_stat, - duration, ); // Update the rest of the first batch @@ -98,7 +94,6 @@ impl NetworkTest for SimpleValidatorUpgrade { ctx.report.report_txn_stats( format!("{}::half-validator-upgrade", self.name()), &txn_stat, - duration, ); ctx.swarm().fork_check()?; @@ -114,7 +109,6 @@ impl NetworkTest for SimpleValidatorUpgrade { ctx.report.report_txn_stats( format!("{}::rest-validator-upgrade", self.name()), &txn_stat, - duration, ); let msg = "5. check swarm health".to_string(); diff --git a/testsuite/testcases/src/consensus_reliability_tests.rs b/testsuite/testcases/src/consensus_reliability_tests.rs index 351dc0598304a..99a8d6f18ed41 100644 --- a/testsuite/testcases/src/consensus_reliability_tests.rs +++ b/testsuite/testcases/src/consensus_reliability_tests.rs @@ -7,7 +7,7 @@ use aptos_forge::{ test_utils::consensus_utils::{ test_consensus_fault_tolerance, FailPointFailureInjection, NodeState, }, - NetworkContext, NetworkTest, Result, Swarm, SwarmExt, Test, + NetworkContext, NetworkTest, Result, Swarm, SwarmExt, Test, TestReport, }; use aptos_logger::{info, warn}; use rand::Rng; @@ -51,7 +51,12 @@ impl NetworkLoadTest for ChangingWorkingQuorumTest { } } - fn test(&self, swarm: &mut dyn Swarm, duration: Duration) -> Result<()> { + fn test( + &self, + swarm: &mut dyn Swarm, + _report: &mut TestReport, + duration: Duration, + ) -> Result<()> { let runtime = Runtime::new().unwrap(); let validators = swarm.get_validator_clients_with_names(); @@ -291,7 +296,7 @@ impl NetworkLoadTest for ChangingWorkingQuorumTest { } impl NetworkTest for ChangingWorkingQuorumTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/forge_setup_test.rs b/testsuite/testcases/src/forge_setup_test.rs index f7b1613529d65..d88a91dde089b 100644 --- a/testsuite/testcases/src/forge_setup_test.rs +++ b/testsuite/testcases/src/forge_setup_test.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::generate_traffic; +use anyhow::Context; use aptos_forge::{NetworkContext, NetworkTest, Result, Test}; use aptos_logger::info; use rand::{ @@ -8,7 +10,7 @@ use rand::{ seq::IteratorRandom, Rng, SeedableRng, }; -use std::thread; +use std::{thread, time::Duration}; use tokio::runtime::Runtime; const STATE_SYNC_VERSION_COUNTER_NAME: &str = "aptos_state_sync_version"; @@ -22,7 +24,7 @@ impl Test for ForgeSetupTest { } impl NetworkTest for ForgeSetupTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let mut rng = StdRng::from_seed(OsRng.gen()); let runtime = Runtime::new().unwrap(); @@ -34,6 +36,7 @@ impl NetworkTest for ForgeSetupTest { info!("Pick one fullnode to stop and wipe"); let fullnode = swarm.full_node_mut(*fullnode_id).unwrap(); runtime.block_on(fullnode.clear_storage())?; + runtime.block_on(fullnode.start())?; let fullnode = swarm.full_node(*fullnode_id).unwrap(); let fullnode_name = fullnode.name(); @@ -56,6 +59,25 @@ impl NetworkTest for ForgeSetupTest { thread::sleep(std::time::Duration::from_secs(5)); } + // add some PFNs and send load to them + let mut pfns = Vec::new(); + let num_pfns = 5; + for _ in 0..num_pfns { + let pfn_version = swarm.versions().max().unwrap(); + let pfn_node_config = swarm.get_default_pfn_node_config(); + let pfn_peer_id = + runtime.block_on(swarm.add_full_node(&pfn_version, pfn_node_config))?; + + let _pfn = swarm.full_node(pfn_peer_id).context("pfn not found")?; + pfns.push(pfn_peer_id); + } + + let duration = Duration::from_secs(10 * num_pfns); + let txn_stat = generate_traffic(ctx, &pfns, duration)?; + + ctx.report + .report_txn_stats(self.name().to_string(), &txn_stat); + Ok(()) } } diff --git a/testsuite/testcases/src/framework_upgrade.rs b/testsuite/testcases/src/framework_upgrade.rs index 7e13a21b8ca93..cea22bd1476fc 100644 --- a/testsuite/testcases/src/framework_upgrade.rs +++ b/testsuite/testcases/src/framework_upgrade.rs @@ -22,7 +22,7 @@ impl Test for FrameworkUpgrade { } impl NetworkTest for FrameworkUpgrade { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let runtime = Runtime::new()?; // Get the different versions we're testing with @@ -122,7 +122,6 @@ impl NetworkTest for FrameworkUpgrade { ctx.report.report_txn_stats( format!("{}::full-framework-upgrade", self.name()), &txn_stat, - duration, ); ctx.swarm().fork_check()?; diff --git a/testsuite/testcases/src/fullnode_reboot_stress_test.rs b/testsuite/testcases/src/fullnode_reboot_stress_test.rs index dc8fd6d634bac..a2d8702e402a8 100644 --- a/testsuite/testcases/src/fullnode_reboot_stress_test.rs +++ b/testsuite/testcases/src/fullnode_reboot_stress_test.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{LoadDestination, NetworkLoadTest}; -use aptos_forge::{NetworkContext, NetworkTest, Result, Swarm, Test}; +use aptos_forge::{NetworkContext, NetworkTest, Result, Swarm, Test, TestReport}; use rand::{seq::SliceRandom, thread_rng}; use std::time::Duration; use tokio::{runtime::Runtime, time::Instant}; @@ -20,7 +20,12 @@ impl NetworkLoadTest for FullNodeRebootStressTest { Ok(LoadDestination::AllFullnodes) } - fn test(&self, swarm: &mut dyn Swarm, duration: Duration) -> Result<()> { + fn test( + &self, + swarm: &mut dyn Swarm, + _report: &mut TestReport, + duration: Duration, + ) -> Result<()> { let start = Instant::now(); let runtime = Runtime::new().unwrap(); @@ -42,7 +47,7 @@ impl NetworkLoadTest for FullNodeRebootStressTest { } impl NetworkTest for FullNodeRebootStressTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/lib.rs b/testsuite/testcases/src/lib.rs index 48cb18022b150..15070f00f070b 100644 --- a/testsuite/testcases/src/lib.rs +++ b/testsuite/testcases/src/lib.rs @@ -9,7 +9,7 @@ pub mod framework_upgrade; pub mod fullnode_reboot_stress_test; pub mod load_vs_perf_benchmark; pub mod modifiers; -pub mod multi_region_simulation_test; +pub mod multi_region_network_test; pub mod network_bandwidth_test; pub mod network_loss_test; pub mod network_partition_test; @@ -27,7 +27,7 @@ pub mod validator_reboot_stress_test; use anyhow::Context; use aptos_forge::{ EmitJobRequest, NetworkContext, NetworkTest, NodeExt, Result, Swarm, SwarmExt, Test, - TxnEmitter, TxnStats, Version, + TestReport, TxnEmitter, TxnStats, Version, }; use aptos_logger::info; use aptos_sdk::{transaction_builder::TransactionFactory, types::PeerId}; @@ -141,7 +141,12 @@ pub trait NetworkLoadTest: Test { // Load is started before this function is called, and stops after this function returns. // Expected duration is passed into this function, expecting this function to take that much // time to finish. How long this function takes will dictate how long the actual test lasts. - fn test(&self, _swarm: &mut dyn Swarm, duration: Duration) -> Result<()> { + fn test( + &self, + _swarm: &mut dyn Swarm, + _report: &mut TestReport, + duration: Duration, + ) -> Result<()> { std::thread::sleep(duration); Ok(()) } @@ -152,7 +157,7 @@ pub trait NetworkLoadTest: Test { } impl NetworkTest for dyn NetworkLoadTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let runtime = Runtime::new().unwrap(); let start_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -174,7 +179,7 @@ impl NetworkTest for dyn NetworkLoadTest { rng, )?; ctx.report - .report_txn_stats(self.name().to_string(), &txn_stat, actual_test_duration); + .report_txn_stats(self.name().to_string(), &txn_stat); let end_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -231,6 +236,7 @@ impl dyn NetworkLoadTest { stats_tracking_phases = 3; } + info!("Starting emitting txns for {}s", duration.as_secs()); let mut job = rt .block_on(emitter.start_job( ctx.swarm().chain_info().root_account, @@ -243,9 +249,8 @@ impl dyn NetworkLoadTest { let cooldown_duration = duration.mul_f32(cooldown_duration_fraction); let test_duration = duration - warmup_duration - cooldown_duration; let phase_duration = test_duration.div_f32((stats_tracking_phases - 2) as f32); - info!("Starting emitting txns for {}s", duration.as_secs()); - std::thread::sleep(warmup_duration); + job = rt.block_on(job.periodic_stat_forward(warmup_duration, 60)); info!("{}s warmup finished", warmup_duration.as_secs()); let max_start_ledger_transactions = rt @@ -272,8 +277,10 @@ impl dyn NetworkLoadTest { } let phase_start = Instant::now(); - self.test(ctx.swarm(), phase_duration) + let join_stats = rt.spawn(job.periodic_stat_forward(phase_duration, 60)); + self.test(ctx.swarm, ctx.report, phase_duration) .context("test NetworkLoadTest")?; + job = rt.block_on(join_stats).context("join stats")?; actual_phase_durations.push(phase_start.elapsed()); } let actual_test_duration = test_start.elapsed(); @@ -297,7 +304,7 @@ impl dyn NetworkLoadTest { let cooldown_used = cooldown_start.elapsed(); if cooldown_used < cooldown_duration { - std::thread::sleep(cooldown_duration - cooldown_used); + job = rt.block_on(job.periodic_stat_forward(cooldown_duration - cooldown_used, 60)); } info!("{}s cooldown finished", cooldown_duration.as_secs()); @@ -305,7 +312,7 @@ impl dyn NetworkLoadTest { "Emitting txns ran for {} secs, stopping job...", duration.as_secs() ); - let stats_by_phase = rt.block_on(emitter.stop_job(job)); + let stats_by_phase = rt.block_on(job.stop_job()); info!("Stopped job"); info!("Warmup stats: {}", stats_by_phase[0].rate()); @@ -346,17 +353,48 @@ impl dyn NetworkLoadTest { pub struct CompositeNetworkTest { // Wrapper tests - their setup and finish methods are called, before the test ones. // TODO don't know how to make this array, and have forge/main.rs work - pub wrapper: &'static dyn NetworkLoadTest, + pub wrappers: Vec>, // This is the main test, return values from this test are used in setup, and // only it's test function is called. - pub test: &'static dyn NetworkTest, + pub test: Box, +} + +impl CompositeNetworkTest { + pub fn new( + wrapper: W, + test: T, + ) -> CompositeNetworkTest { + CompositeNetworkTest { + wrappers: vec![Box::new(wrapper)], + test: Box::new(test), + } + } + + pub fn new_with_two_wrappers< + T1: NetworkLoadTest + 'static, + T2: NetworkLoadTest + 'static, + W: NetworkTest + 'static, + >( + wrapper1: T1, + wrapper2: T2, + test: W, + ) -> CompositeNetworkTest { + CompositeNetworkTest { + wrappers: vec![Box::new(wrapper1), Box::new(wrapper2)], + test: Box::new(test), + } + } } impl NetworkTest for CompositeNetworkTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { - self.wrapper.setup(ctx)?; + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { + for wrapper in &self.wrappers { + wrapper.setup(ctx)?; + } self.test.run(ctx)?; - self.wrapper.finish(ctx.swarm())?; + for wrapper in &self.wrappers { + wrapper.finish(ctx.swarm())?; + } Ok(()) } } diff --git a/testsuite/testcases/src/load_vs_perf_benchmark.rs b/testsuite/testcases/src/load_vs_perf_benchmark.rs index e716713dc69f4..4105d838a4809 100644 --- a/testsuite/testcases/src/load_vs_perf_benchmark.rs +++ b/testsuite/testcases/src/load_vs_perf_benchmark.rs @@ -3,8 +3,9 @@ use crate::NetworkLoadTest; use aptos_forge::{ - args::TransactionTypeArg, EmitJobMode, EmitJobRequest, NetworkContext, NetworkTest, Result, - Test, TxnStats, + args::TransactionTypeArg, + success_criteria::{SuccessCriteria, SuccessCriteriaChecker}, + EmitJobMode, EmitJobRequest, NetworkContext, NetworkTest, Result, Test, TxnStats, }; use aptos_logger::info; use rand::SeedableRng; @@ -84,8 +85,9 @@ impl Display for TransactionWorkload { } pub struct LoadVsPerfBenchmark { - pub test: &'static dyn NetworkLoadTest, + pub test: Box, pub workloads: Workloads, + pub criteria: Vec, } impl Test for LoadVsPerfBenchmark { @@ -139,7 +141,14 @@ impl LoadVsPerfBenchmark { } impl NetworkTest for LoadVsPerfBenchmark { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { + assert!( + self.criteria.is_empty() || self.criteria.len() == self.workloads.len(), + "Invalid config, {} criteria and {} workloads given", + self.criteria.len(), + self.workloads.len(), + ); + let _runtime = Runtime::new().unwrap(); let individual_with_buffer = ctx .global_duration @@ -167,38 +176,65 @@ impl NetworkTest for LoadVsPerfBenchmark { // let mut aptos_info = ctx.swarm().aptos_public_info(); // runtime.block_on(aptos_info.reconfig()); - println!( - "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", - "workload", - "submitted/s", - "committed/s", - "expired/s", - "rejected/s", - "chain txn/s", - "latency", - "p50 lat", - "p90 lat", - "p99 lat", - "actual dur" - ); - for result in &results { - let rate = result.stats.rate(); - println!( - "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", - result.name, - rate.submitted, - rate.committed, - rate.expired, - rate.failed_submission, - result.ledger_transactions / result.actual_duration.as_secs(), - rate.latency, - rate.p50_latency, - rate.p90_latency, - rate.p99_latency, - result.actual_duration.as_secs() - ) + let table = to_table(&results); + for line in table { + info!("{}", line); + } + } + + let table = to_table(&results); + for line in table { + ctx.report.report_text(line); + } + for (index, result) in results.iter().enumerate() { + let rate = result.stats.rate(); + if let Some(criteria) = self.criteria.get(index) { + SuccessCriteriaChecker::check_core_for_success( + criteria, + ctx.report, + &rate, + Some(result.name.clone()), + )?; } } Ok(()) } } + +fn to_table(results: &[SingleRunStats]) -> Vec { + let mut table = Vec::new(); + table.push(format!( + "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", + "workload", + "submitted/s", + "committed/s", + "expired/s", + "rejected/s", + "chain txn/s", + "latency", + "p50 lat", + "p90 lat", + "p99 lat", + "actual dur" + )); + + for result in results { + let rate = result.stats.rate(); + table.push(format!( + "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", + result.name, + rate.submitted, + rate.committed, + rate.expired, + rate.failed_submission, + result.ledger_transactions / result.actual_duration.as_secs(), + rate.latency, + rate.p50_latency, + rate.p90_latency, + rate.p99_latency, + result.actual_duration.as_secs() + )); + } + + table +} diff --git a/testsuite/testcases/src/modifiers.rs b/testsuite/testcases/src/modifiers.rs index 9f979bed69348..4c02c3e90ba02 100644 --- a/testsuite/testcases/src/modifiers.rs +++ b/testsuite/testcases/src/modifiers.rs @@ -1,9 +1,12 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{LoadDestination, NetworkLoadTest}; -use aptos_forge::{NetworkContext, NetworkTest, Swarm, SwarmExt, Test}; +use crate::{multi_region_network_test::chunk_validators, LoadDestination, NetworkLoadTest}; +use aptos_forge::{ + GroupCpuStress, NetworkContext, NetworkTest, Swarm, SwarmChaos, SwarmCpuStress, SwarmExt, Test, +}; use aptos_logger::info; +use aptos_types::PeerId; use rand::Rng; use tokio::runtime::Runtime; @@ -99,7 +102,7 @@ impl NetworkLoadTest for ExecutionDelayTest { } impl NetworkTest for ExecutionDelayTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } @@ -184,7 +187,7 @@ impl NetworkLoadTest for NetworkUnreliabilityTest { } impl NetworkTest for NetworkUnreliabilityTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } @@ -194,3 +197,80 @@ impl Test for NetworkUnreliabilityTest { "NetworkUnreliabilityWrapper" } } + +#[derive(Clone)] +pub struct CpuChaosConfig { + pub num_groups: usize, + pub load_per_worker: u64, +} + +impl Default for CpuChaosConfig { + fn default() -> Self { + Self { + num_groups: 4, + load_per_worker: 100, + } + } +} + +pub struct CpuChaosTest { + pub override_config: Option, +} + +impl CpuChaosTest { + fn create_cpu_chaos(&self, swarm: &mut dyn Swarm) -> SwarmCpuStress { + let all_validators = swarm.validators().map(|v| v.peer_id()).collect::>(); + + let config = self.override_config.as_ref().cloned().unwrap_or_default(); + + create_cpu_stress_template(all_validators, &config) + } +} + +impl Test for CpuChaosTest { + fn name(&self) -> &'static str { + "CpuChaosWrapper" + } +} + +fn create_cpu_stress_template( + all_validators: Vec, + config: &CpuChaosConfig, +) -> SwarmCpuStress { + let validator_chunks = chunk_validators(all_validators, config.num_groups); + + let group_cpu_stresses = validator_chunks + .into_iter() + .enumerate() + .map(|(idx, chunk)| GroupCpuStress { + name: format!("group-{}-cpu-stress", idx), + target_nodes: chunk, + num_workers: (config.num_groups - idx) as u64, + load_per_worker: config.load_per_worker, + }) + .collect(); + SwarmCpuStress { group_cpu_stresses } +} + +impl NetworkLoadTest for CpuChaosTest { + fn setup(&self, ctx: &mut NetworkContext) -> anyhow::Result { + let swarm_cpu_stress = self.create_cpu_chaos(ctx.swarm()); + + ctx.swarm() + .inject_chaos(SwarmChaos::CpuStress(swarm_cpu_stress))?; + + Ok(LoadDestination::FullnodesOtherwiseValidators) + } + + fn finish(&self, swarm: &mut dyn Swarm) -> anyhow::Result<()> { + let swarm_cpu_stress = self.create_cpu_chaos(swarm); + + swarm.remove_chaos(SwarmChaos::CpuStress(swarm_cpu_stress)) + } +} + +impl NetworkTest for CpuChaosTest { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { + ::run(self, ctx) + } +} diff --git a/testsuite/testcases/src/multi_region_network_test.rs b/testsuite/testcases/src/multi_region_network_test.rs new file mode 100644 index 0000000000000..0178c4217a7e7 --- /dev/null +++ b/testsuite/testcases/src/multi_region_network_test.rs @@ -0,0 +1,316 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{LoadDestination, NetworkLoadTest}; +use aptos_forge::{GroupNetEm, NetworkContext, NetworkTest, Swarm, SwarmChaos, SwarmNetEm, Test}; +use aptos_logger::info; +use aptos_types::PeerId; +use itertools::{self, Itertools}; +use std::collections::BTreeMap; + +/// The link stats are obtained from https://github.com/doitintl/intercloud-throughput/blob/master/results_202202/results.csv +/// The four regions were hand-picked from the dataset to simulate a multi-region setup +/// with high latencies and low bandwidth. +macro_rules! FOUR_REGION_LINK_STATS_CSV { + () => { + "data/four_region_link_stats.csv" + }; +} + +fn get_link_stats_table() -> BTreeMap> { + let mut stats_table = BTreeMap::new(); + + let mut rdr = + csv::Reader::from_reader(include_bytes!(FOUR_REGION_LINK_STATS_CSV!()).as_slice()); + rdr.deserialize() + .for_each(|result: Result<(String, String, u64, f64), _>| { + if let Ok((from, to, bitrate, latency)) = result { + stats_table + .entry(from) + .or_insert_with(BTreeMap::new) + .insert(to, (bitrate, latency)); + } + }); + stats_table +} + +pub(crate) fn chunk_validators(validators: Vec, num_groups: usize) -> Vec> { + let approx_chunk_size = validators.len() / num_groups; + + let chunks = validators.chunks_exact(approx_chunk_size); + + let mut validator_chunks: Vec> = + chunks.clone().map(|chunk| chunk.to_vec()).collect(); + + // Get any remaining validators and add them to the first group + let remaining_validators: Vec = chunks + .remainder() + .iter() + // If `approx_validators_per_region` is 1, then it is possible we will have more regions than desired, so the + // remaining validators will be in the first group. + .chain(chunks.skip(num_groups).flatten()) + .cloned() + .collect(); + if !remaining_validators.is_empty() { + validator_chunks[0].append(remaining_validators.to_vec().as_mut()); + } + + validator_chunks +} + +/// Creates a table of validators grouped by region. The validators divided into N groups, where N is the number of regions +/// provided in the link stats table. Any remaining validators are added to the first group. +fn create_link_stats_table_with_peer_groups( + validators: Vec, + link_stats_table: &LinkStatsTable, +) -> LinkStatsTableWithPeerGroups { + assert!(validators.len() >= link_stats_table.len()); + + let number_of_regions = link_stats_table.len(); + assert!( + number_of_regions >= 2, + "At least 2 regions are required for inter-region network chaos." + ); + assert!( + number_of_regions <= 4, + "ChaosMesh only supports simulating up to 4 regions." + ); + + let validator_chunks = chunk_validators(validators, number_of_regions); + + let validator_groups = validator_chunks + .into_iter() + .zip(link_stats_table.iter()) + .map(|(chunk, (from_region, stats))| (from_region.clone(), chunk, stats.clone())) + .collect(); + + validator_groups +} + +// A map of "source" regions to a map of "destination" region to (bandwidth, latency) +type LinkStatsTable = BTreeMap>; +// A map of "source" regions to a tuple of (list of validators, map of "destination" region to (bandwidth, latency)) +type LinkStatsTableWithPeerGroups = Vec<(String, Vec, BTreeMap)>; + +#[derive(Clone)] +pub struct InterRegionNetEmConfig { + delay_jitter_ms: u64, + delay_correlation_percentage: u64, + loss_percentage: u64, + loss_correlation_percentage: u64, +} + +impl Default for InterRegionNetEmConfig { + fn default() -> Self { + Self { + delay_jitter_ms: 20, + delay_correlation_percentage: 50, + loss_percentage: 3, + loss_correlation_percentage: 50, + } + } +} + +impl InterRegionNetEmConfig { + // Creates GroupNetEm for inter-region network chaos + fn build(&self, validator_groups: &LinkStatsTableWithPeerGroups) -> Vec { + let group_netems: Vec = validator_groups + .iter() + .combinations(2) + .map(|comb| { + let (from_region, from_chunk, stats) = &comb[0]; + let (to_region, to_chunk, _) = &comb[1]; + + let (bandwidth, latency) = stats.get(to_region).unwrap(); + let netem = GroupNetEm { + name: format!("{}-to-{}-netem", from_region, to_region), + source_nodes: from_chunk.to_vec(), + target_nodes: to_chunk.to_vec(), + delay_latency_ms: *latency as u64, + delay_jitter_ms: self.delay_jitter_ms, + delay_correlation_percentage: self.delay_correlation_percentage, + loss_percentage: self.loss_percentage, + loss_correlation_percentage: self.loss_correlation_percentage, + rate_in_mbps: *bandwidth / 1e6 as u64, + }; + info!("inter-region netem {:?}", netem); + + netem + }) + .collect(); + + group_netems + } +} + +#[derive(Clone)] +pub struct IntraRegionNetEmConfig { + bandwidth_rate_mbps: u64, + delay_latency_ms: u64, + delay_jitter_ms: u64, + delay_correlation_percentage: u64, + loss_percentage: u64, + loss_correlation_percentage: u64, +} + +impl Default for IntraRegionNetEmConfig { + fn default() -> Self { + Self { + bandwidth_rate_mbps: 10 * 1000, // 10 Gbps + delay_latency_ms: 50, + delay_jitter_ms: 5, + delay_correlation_percentage: 50, + loss_percentage: 1, + loss_correlation_percentage: 50, + } + } +} + +impl IntraRegionNetEmConfig { + fn build(&self, validator_groups: LinkStatsTableWithPeerGroups) -> Vec { + let group_netems: Vec = validator_groups + .iter() + .map(|(region, chunk, _)| { + let netem = GroupNetEm { + name: format!("{}-self-netem", region), + source_nodes: chunk.to_vec(), + target_nodes: chunk.to_vec(), + delay_latency_ms: self.delay_latency_ms, + delay_jitter_ms: self.delay_jitter_ms, + delay_correlation_percentage: self.delay_correlation_percentage, + loss_percentage: self.loss_percentage, + loss_correlation_percentage: self.loss_correlation_percentage, + rate_in_mbps: self.bandwidth_rate_mbps, + }; + info!("intra-region netem {:?}", netem); + + netem + }) + .collect(); + + group_netems + } +} + +#[derive(Clone)] +pub struct MultiRegionNetworkEmulationConfig { + pub link_stats_table: LinkStatsTable, + pub inter_region_config: InterRegionNetEmConfig, + pub intra_region_config: Option, +} + +impl Default for MultiRegionNetworkEmulationConfig { + fn default() -> Self { + Self { + link_stats_table: get_link_stats_table(), + inter_region_config: InterRegionNetEmConfig::default(), + intra_region_config: Some(IntraRegionNetEmConfig::default()), + } + } +} + +/// A test to emulate network conditions for a multi-region setup. +pub struct MultiRegionNetworkEmulationTest { + pub override_config: Option, +} + +impl MultiRegionNetworkEmulationTest { + fn create_netem_chaos(&self, swarm: &mut dyn Swarm) -> SwarmNetEm { + let all_validators = swarm.validators().map(|v| v.peer_id()).collect::>(); + + let config = self.override_config.clone().unwrap_or_default(); + + create_multi_region_swarm_network_chaos(all_validators, &config) + } +} + +impl Test for MultiRegionNetworkEmulationTest { + fn name(&self) -> &'static str { + "network:multi-region-network-emulation" + } +} + +fn create_multi_region_swarm_network_chaos( + all_validators: Vec, + config: &MultiRegionNetworkEmulationConfig, +) -> SwarmNetEm { + let validator_groups = + create_link_stats_table_with_peer_groups(all_validators, &config.link_stats_table); + + let inter_region_netem = config.inter_region_config.build(&validator_groups); + let intra_region_netem = config + .intra_region_config + .as_ref() + .map(|config| config.build(validator_groups)) + .unwrap_or_default(); + + SwarmNetEm { + group_netems: itertools::concat(vec![intra_region_netem, inter_region_netem]), + } +} + +impl NetworkLoadTest for MultiRegionNetworkEmulationTest { + fn setup(&self, ctx: &mut NetworkContext) -> anyhow::Result { + let chaos = self.create_netem_chaos(ctx.swarm()); + ctx.swarm().inject_chaos(SwarmChaos::NetEm(chaos))?; + + Ok(LoadDestination::FullnodesOtherwiseValidators) + } + + fn finish(&self, swarm: &mut dyn Swarm) -> anyhow::Result<()> { + let chaos = self.create_netem_chaos(swarm); + swarm.remove_chaos(SwarmChaos::NetEm(chaos)) + } +} + +impl NetworkTest for MultiRegionNetworkEmulationTest { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { + ::run(self, ctx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::vec; + + #[test] + fn test_create_multi_region_swarm_network_chaos() { + aptos_logger::Logger::new().init(); + + let config = MultiRegionNetworkEmulationConfig::default(); + + let all_validators = (0..8).map(|_| PeerId::random()).collect(); + let netem = create_multi_region_swarm_network_chaos(all_validators, &config); + + assert_eq!(netem.group_netems.len(), 10); + + let all_validators: Vec = (0..10).map(|_| PeerId::random()).collect(); + let netem = create_multi_region_swarm_network_chaos(all_validators.clone(), &config); + + assert_eq!(netem.group_netems.len(), 10); + assert_eq!(netem.group_netems[0].source_nodes.len(), 4); + assert_eq!(netem.group_netems[0].target_nodes.len(), 4); + assert_eq!(netem.group_netems[0], GroupNetEm { + name: "aws--ap-northeast-1-self-netem".to_owned(), + rate_in_mbps: 10000, + source_nodes: vec![ + all_validators[0], + all_validators[1], + all_validators[8], + all_validators[9], + ], + target_nodes: vec![ + all_validators[0], + all_validators[1], + all_validators[8], + all_validators[9], + ], + delay_latency_ms: 50, + delay_jitter_ms: 5, + delay_correlation_percentage: 50, + loss_percentage: 1, + loss_correlation_percentage: 50 + }) + } +} diff --git a/testsuite/testcases/src/multi_region_simulation_test.rs b/testsuite/testcases/src/multi_region_simulation_test.rs deleted file mode 100644 index 12dcc2726e3b6..0000000000000 --- a/testsuite/testcases/src/multi_region_simulation_test.rs +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{LoadDestination, NetworkLoadTest}; -use aptos_forge::{ - GroupNetworkBandwidth, GroupNetworkDelay, NetworkContext, NetworkTest, Swarm, SwarmChaos, - SwarmNetworkBandwidth, SwarmNetworkDelay, Test, -}; -use aptos_logger::info; -use aptos_types::PeerId; -use csv::Reader; -use itertools::{self, Itertools}; -use std::collections::BTreeMap; - -/// The link stats are obtained from https://github.com/doitintl/intercloud-throughput/blob/master/results_202202/results.csv -/// The four regions were hand-picked from the dataset to simulate a multi-region setup -/// with high latencies and low bandwidth. -macro_rules! FOUR_REGION_LINK_STATS_CSV { - () => { - "data/four_region_link_stats.csv" - }; -} - -/// A test to simulate network between multiple regions in different clouds. -/// It currently supports only 4 regions, due to ChaosMesh limitations. -pub struct MultiRegionMultiCloudSimulationTest {} - -impl Test for MultiRegionMultiCloudSimulationTest { - fn name(&self) -> &'static str { - "network::multi-region-multi-cloud-simulation" - } -} - -fn get_link_stats_table() -> BTreeMap> { - let mut stats_table = BTreeMap::new(); - - let mut rdr = Reader::from_reader(include_bytes!(FOUR_REGION_LINK_STATS_CSV!()).as_slice()); - rdr.deserialize() - .for_each(|result: Result<(String, String, u64, f64), _>| { - if let Ok((from, to, bitrate, latency)) = result { - stats_table - .entry(from) - .or_insert_with(BTreeMap::new) - .insert(to, (bitrate, latency)); - } - }); - stats_table -} - -/// Creates a SwarmNetworkDelay -fn create_multi_region_swarm_network_chaos( - all_validators: Vec, -) -> (SwarmNetworkDelay, SwarmNetworkBandwidth) { - let link_stats_table = get_link_stats_table(); - - assert!(all_validators.len() >= link_stats_table.len()); - - let number_of_regions = link_stats_table.len(); - let approx_validators_per_region = all_validators.len() / number_of_regions; - - let validator_chunks = all_validators.chunks_exact(approx_validators_per_region); - - let (mut group_network_delays, group_network_bandwidths): ( - Vec, - Vec, - ) = validator_chunks - .clone() - .zip(link_stats_table.iter().clone()) - .combinations(2) - .map(|comb| { - let (from_chunk, (from_region, stats)) = &comb[0]; - let (to_chunk, (to_region, _)) = &comb[1]; - - let (bandwidth, latency) = stats.get(*to_region).unwrap(); - let delay = GroupNetworkDelay { - name: format!("{}-to-{}-delay", from_region, to_region), - source_nodes: from_chunk.to_vec(), - target_nodes: to_chunk.to_vec(), - latency_ms: *latency as u64, - jitter_ms: 5, - correlation_percentage: 50, - }; - info!("delay {:?}", delay); - - let bandwidth = GroupNetworkBandwidth { - name: format!("{}-to-{}-bandwidth", from_region, to_region), - // source_nodes: from_chunk.to_vec(), - // target_nodes: to_chunk.to_vec(), - rate: bandwidth / 8, - limit: 20971520, - buffer: 10000, - }; - info!("bandwidth {:?}", bandwidth); - - (delay, bandwidth) - }) - .unzip(); - - let remainder = validator_chunks.remainder(); - let remaining_validators: Vec = validator_chunks - .skip(number_of_regions) - .flatten() - .chain(remainder.iter()) - .cloned() - .collect(); - info!("remaining: {:?}", remaining_validators); - if !remaining_validators.is_empty() { - group_network_delays[0] - .source_nodes - .append(remaining_validators.to_vec().as_mut()); - } - - ( - SwarmNetworkDelay { - group_network_delays, - }, - SwarmNetworkBandwidth { - group_network_bandwidths, - }, - ) -} - -impl NetworkLoadTest for MultiRegionMultiCloudSimulationTest { - fn setup(&self, ctx: &mut NetworkContext) -> anyhow::Result { - let all_validators = ctx - .swarm() - .validators() - .map(|v| v.peer_id()) - .collect::>(); - - let (delay, bandwidth) = create_multi_region_swarm_network_chaos(all_validators); - - // inject bandwidth limit - let chaos = SwarmChaos::Bandwidth(bandwidth); - ctx.swarm().inject_chaos(chaos)?; - - // inject network delay - let chaos = SwarmChaos::Delay(delay); - ctx.swarm().inject_chaos(chaos)?; - - Ok(LoadDestination::FullnodesOtherwiseValidators) - } - - fn finish(&self, swarm: &mut dyn Swarm) -> anyhow::Result<()> { - swarm.remove_all_chaos() - } -} - -impl NetworkTest for MultiRegionMultiCloudSimulationTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { - ::run(self, ctx) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_create_multi_region_swarm_network_chaos() { - aptos_logger::Logger::new().init(); - - let all_validators = (0..8).map(|_| PeerId::random()).collect(); - let (delay, bandwidth) = create_multi_region_swarm_network_chaos(all_validators); - - assert_eq!(delay.group_network_delays.len(), 6); - assert_eq!(bandwidth.group_network_bandwidths.len(), 6); - - let all_validators: Vec = (0..10).map(|_| PeerId::random()).collect(); - let (delay, bandwidth) = create_multi_region_swarm_network_chaos(all_validators); - - assert_eq!(delay.group_network_delays.len(), 6); - assert_eq!(bandwidth.group_network_bandwidths.len(), 6); - assert_eq!(delay.group_network_delays[0].source_nodes.len(), 4); - assert_eq!(delay.group_network_delays[0].target_nodes.len(), 2); - assert_eq!( - bandwidth.group_network_bandwidths[0], - GroupNetworkBandwidth { - name: "aws--ap-northeast-1-to-aws--eu-west-1-bandwidth".to_owned(), - rate: 5160960, - limit: 20971520, - buffer: 10000, - } - ) - } -} diff --git a/testsuite/testcases/src/network_bandwidth_test.rs b/testsuite/testcases/src/network_bandwidth_test.rs index 83ab365c8f34f..2b4b8e4dc1335 100644 --- a/testsuite/testcases/src/network_bandwidth_test.rs +++ b/testsuite/testcases/src/network_bandwidth_test.rs @@ -56,7 +56,7 @@ impl NetworkLoadTest for NetworkBandwidthTest { } impl NetworkTest for NetworkBandwidthTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/network_loss_test.rs b/testsuite/testcases/src/network_loss_test.rs index 37a9f98107d34..83df42470e5f5 100644 --- a/testsuite/testcases/src/network_loss_test.rs +++ b/testsuite/testcases/src/network_loss_test.rs @@ -42,7 +42,7 @@ impl NetworkLoadTest for NetworkLossTest { } impl NetworkTest for NetworkLossTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/network_partition_test.rs b/testsuite/testcases/src/network_partition_test.rs index 95300ea5a9dd6..6118a59496ea1 100644 --- a/testsuite/testcases/src/network_partition_test.rs +++ b/testsuite/testcases/src/network_partition_test.rs @@ -45,7 +45,7 @@ impl NetworkLoadTest for NetworkPartitionTest { } impl NetworkTest for NetworkPartitionTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/partial_nodes_down_test.rs b/testsuite/testcases/src/partial_nodes_down_test.rs index 96c7504eedd45..2d6c126907cc4 100644 --- a/testsuite/testcases/src/partial_nodes_down_test.rs +++ b/testsuite/testcases/src/partial_nodes_down_test.rs @@ -16,7 +16,7 @@ impl Test for PartialNodesDown { } impl NetworkTest for PartialNodesDown { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let runtime = Runtime::new()?; let duration = Duration::from_secs(120); let all_validators = ctx @@ -36,7 +36,7 @@ impl NetworkTest for PartialNodesDown { // Generate some traffic let txn_stat = generate_traffic(ctx, &up_nodes, duration)?; ctx.report - .report_txn_stats(self.name().to_string(), &txn_stat, duration); + .report_txn_stats(self.name().to_string(), &txn_stat); for n in &down_nodes { let node = ctx.swarm().validator_mut(*n).unwrap(); println!("Node {} is going to restart", node.name()); diff --git a/testsuite/testcases/src/performance_test.rs b/testsuite/testcases/src/performance_test.rs index 3475584acae96..f602ede7d437f 100644 --- a/testsuite/testcases/src/performance_test.rs +++ b/testsuite/testcases/src/performance_test.rs @@ -16,7 +16,7 @@ impl Test for PerformanceBenchmark { impl NetworkLoadTest for PerformanceBenchmark {} impl NetworkTest for PerformanceBenchmark { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/quorum_store_onchain_enable_test.rs b/testsuite/testcases/src/quorum_store_onchain_enable_test.rs index 2946199f9affa..69ec6a2e478f6 100644 --- a/testsuite/testcases/src/quorum_store_onchain_enable_test.rs +++ b/testsuite/testcases/src/quorum_store_onchain_enable_test.rs @@ -28,6 +28,7 @@ impl NetworkLoadTest for QuorumStoreOnChainEnableTest { fn test( &self, swarm: &mut dyn aptos_forge::Swarm, + _report: &mut aptos_forge::TestReport, duration: std::time::Duration, ) -> anyhow::Result<()> { let runtime = Runtime::new().unwrap(); @@ -107,7 +108,7 @@ impl NetworkLoadTest for QuorumStoreOnChainEnableTest { } impl NetworkTest for QuorumStoreOnChainEnableTest { - fn run<'t>(&self, ctx: &mut aptos_forge::NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut aptos_forge::NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/reconfiguration_test.rs b/testsuite/testcases/src/reconfiguration_test.rs index 3dfde8f347a91..57f99767eb194 100644 --- a/testsuite/testcases/src/reconfiguration_test.rs +++ b/testsuite/testcases/src/reconfiguration_test.rs @@ -14,7 +14,7 @@ impl Test for ReconfigurationTest { } impl NetworkTest for ReconfigurationTest { - fn run<'t>(&self, _ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, _ctx: &mut NetworkContext<'_>) -> Result<()> { Err(anyhow!("Not supported in aptos-framework yet")) } // TODO(https://github.com/aptos-labs/aptos-core/issues/317): add back after support those transactions in aptos-framework diff --git a/testsuite/testcases/src/state_sync_performance.rs b/testsuite/testcases/src/state_sync_performance.rs index c14af8e4f7cf7..22f43cc7a1569 100644 --- a/testsuite/testcases/src/state_sync_performance.rs +++ b/testsuite/testcases/src/state_sync_performance.rs @@ -28,7 +28,7 @@ impl Test for StateSyncFullnodePerformance { } impl NetworkTest for StateSyncFullnodePerformance { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let all_fullnodes = get_fullnodes_and_check_setup(ctx, self.name())?; // Emit a lot of traffic and ensure the fullnodes can all sync @@ -54,7 +54,7 @@ impl Test for StateSyncFullnodeFastSyncPerformance { } impl NetworkTest for StateSyncFullnodeFastSyncPerformance { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { let all_fullnodes = get_fullnodes_and_check_setup(ctx, self.name())?; // Emit a lot of traffic and ensure the fullnodes can all sync @@ -130,7 +130,7 @@ impl Test for StateSyncValidatorPerformance { } impl NetworkTest for StateSyncValidatorPerformance { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { // Verify we have at least 7 validators (i.e., 3f+1, where f is 2) // so we can kill 2 validators but still make progress. let all_validators = ctx @@ -361,7 +361,7 @@ fn ensure_state_sync_transaction_throughput( // TODO: we fetch the TPS requirement from the given success criteria. // But, we should probably make it more generic to avoid this. // Ensure we meet the success criteria. - let min_expected_tps = ctx.success_criteria.avg_tps as u64; + let min_expected_tps = ctx.success_criteria.min_avg_tps as u64; if state_sync_throughput < min_expected_tps { let error_message = format!( "State sync TPS requirement failed. Average TPS: {}, minimum required TPS: {}", diff --git a/testsuite/testcases/src/three_region_simulation_test.rs b/testsuite/testcases/src/three_region_simulation_test.rs index 12c3eec27a4b2..2d17e83b00151 100644 --- a/testsuite/testcases/src/three_region_simulation_test.rs +++ b/testsuite/testcases/src/three_region_simulation_test.rs @@ -101,7 +101,7 @@ impl NetworkLoadTest for ThreeRegionSameCloudSimulationTest { } impl NetworkTest for ThreeRegionSameCloudSimulationTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/twin_validator_test.rs b/testsuite/testcases/src/twin_validator_test.rs index 100cbbce8e747..53905027f22b5 100644 --- a/testsuite/testcases/src/twin_validator_test.rs +++ b/testsuite/testcases/src/twin_validator_test.rs @@ -19,7 +19,7 @@ impl Test for TwinValidatorTest { impl NetworkLoadTest for TwinValidatorTest {} impl NetworkTest for TwinValidatorTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> anyhow::Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> anyhow::Result<()> { let runtime = Runtime::new().unwrap(); let all_validators_ids = ctx diff --git a/testsuite/testcases/src/two_traffics_test.rs b/testsuite/testcases/src/two_traffics_test.rs index 8c7b6face4dc5..846099b3134f8 100644 --- a/testsuite/testcases/src/two_traffics_test.rs +++ b/testsuite/testcases/src/two_traffics_test.rs @@ -4,25 +4,17 @@ use crate::{ create_emitter_and_request, traffic_emitter_runtime, LoadDestination, NetworkLoadTest, }; -use anyhow::{bail, Ok}; use aptos_forge::{ - success_criteria::{LatencyType, SuccessCriteriaChecker}, - EmitJobMode, EmitJobRequest, NetworkContext, NetworkTest, Result, Swarm, Test, TransactionType, + success_criteria::{SuccessCriteria, SuccessCriteriaChecker}, + EmitJobRequest, NetworkContext, NetworkTest, Result, Swarm, Test, TestReport, }; use aptos_logger::info; use rand::{rngs::OsRng, Rng, SeedableRng}; use std::time::{Duration, Instant}; pub struct TwoTrafficsTest { - // cannot have 'static EmitJobRequest, like below, so need to have inner fields - // pub inner_emit_job_request: EmitJobRequest, - pub inner_tps: usize, - pub inner_gas_price: u64, - pub inner_init_gas_price_multiplier: u64, - pub inner_transaction_type: TransactionType, - - pub avg_tps: usize, - pub latency_thresholds: &'static [(f32, LatencyType)], + pub inner_traffic: EmitJobRequest, + pub inner_success_criteria: SuccessCriteria, } impl Test for TwoTrafficsTest { @@ -32,7 +24,12 @@ impl Test for TwoTrafficsTest { } impl NetworkLoadTest for TwoTrafficsTest { - fn test(&self, swarm: &mut dyn Swarm, duration: Duration) -> Result<()> { + fn test( + &self, + swarm: &mut dyn Swarm, + report: &mut TestReport, + duration: Duration, + ) -> Result<()> { info!( "Running TwoTrafficsTest test for duration {}s", duration.as_secs_f32() @@ -43,13 +40,7 @@ impl NetworkLoadTest for TwoTrafficsTest { let (emitter, emit_job_request) = create_emitter_and_request( swarm, - EmitJobRequest::default() - .mode(EmitJobMode::ConstTps { - tps: self.inner_tps, - }) - .gas_price(self.inner_gas_price) - .init_gas_price_multiplier(self.inner_init_gas_price_multiplier) - .transaction_type(self.inner_transaction_type), + self.inner_traffic.clone(), &nodes_to_send_load_to, rng, )?; @@ -72,33 +63,21 @@ impl NetworkLoadTest for TwoTrafficsTest { ); let rate = stats.rate(); - info!("Inner traffic: {:?}", rate); - let avg_tps = rate.committed; - if avg_tps < self.avg_tps as u64 { - bail!( - "TPS requirement for inner traffic failed. Average TPS {}, minimum TPS requirement {}. Full inner stats: {:?}", - avg_tps, - self.avg_tps, - rate, - ) - } + report.report_txn_stats(format!("{}: inner traffic", self.name()), &stats); - SuccessCriteriaChecker::check_latency( - &self - .latency_thresholds - .iter() - .map(|(s, t)| (Duration::from_secs_f32(*s), t.clone())) - .collect::>(), + SuccessCriteriaChecker::check_core_for_success( + &self.inner_success_criteria, + report, &rate, + Some("inner traffic".to_string()), )?; - Ok(()) } } impl NetworkTest for TwoTrafficsTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/validator_join_leave_test.rs b/testsuite/testcases/src/validator_join_leave_test.rs index 96179f3e74ed8..46141e6304264 100644 --- a/testsuite/testcases/src/validator_join_leave_test.rs +++ b/testsuite/testcases/src/validator_join_leave_test.rs @@ -4,7 +4,8 @@ use crate::{LoadDestination, NetworkLoadTest}; use aptos::{account::create::DEFAULT_FUNDED_COINS, test::CliTestFramework}; use aptos_forge::{ - reconfig, NetworkContext, NetworkTest, NodeExt, Result, Swarm, SwarmExt, Test, FORGE_KEY_SEED, + reconfig, NetworkContext, NetworkTest, NodeExt, Result, Swarm, SwarmExt, Test, TestReport, + FORGE_KEY_SEED, }; use aptos_keygen::KeyGen; use aptos_logger::info; @@ -28,7 +29,12 @@ impl NetworkLoadTest for ValidatorJoinLeaveTest { Ok(LoadDestination::FullnodesOtherwiseValidators) } - fn test(&self, swarm: &mut dyn Swarm, duration: Duration) -> Result<()> { + fn test( + &self, + swarm: &mut dyn Swarm, + _report: &mut TestReport, + duration: Duration, + ) -> Result<()> { // Verify we have at least 7 validators (i.e., 3f+1, where f is 2) // so we can lose 2 validators but still make progress. let num_validators = swarm.validators().count(); @@ -179,7 +185,7 @@ impl NetworkLoadTest for ValidatorJoinLeaveTest { } impl NetworkTest for ValidatorJoinLeaveTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/src/validator_reboot_stress_test.rs b/testsuite/testcases/src/validator_reboot_stress_test.rs index 0842f4ca346f6..9355ac97a99d8 100644 --- a/testsuite/testcases/src/validator_reboot_stress_test.rs +++ b/testsuite/testcases/src/validator_reboot_stress_test.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::NetworkLoadTest; -use aptos_forge::{NetworkContext, NetworkTest, Result, Swarm, Test}; +use aptos_forge::{NetworkContext, NetworkTest, Result, Swarm, Test, TestReport}; use rand::{seq::SliceRandom, thread_rng}; use std::time::Duration; use tokio::{runtime::Runtime, time::Instant}; @@ -20,7 +20,12 @@ impl Test for ValidatorRebootStressTest { } impl NetworkLoadTest for ValidatorRebootStressTest { - fn test(&self, swarm: &mut dyn Swarm, duration: Duration) -> Result<()> { + fn test( + &self, + swarm: &mut dyn Swarm, + _report: &mut TestReport, + duration: Duration, + ) -> Result<()> { let start = Instant::now(); let runtime = Runtime::new().unwrap(); @@ -56,7 +61,7 @@ impl NetworkLoadTest for ValidatorRebootStressTest { } impl NetworkTest for ValidatorRebootStressTest { - fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + fn run(&self, ctx: &mut NetworkContext<'_>) -> Result<()> { ::run(self, ctx) } } diff --git a/testsuite/testcases/tests/forge-local-compatibility.rs b/testsuite/testcases/tests/forge-local-compatibility.rs index e7c75fcebec7b..636b121676cca 100644 --- a/testsuite/testcases/tests/forge-local-compatibility.rs +++ b/testsuite/testcases/tests/forge-local-compatibility.rs @@ -12,7 +12,7 @@ fn main() -> Result<()> { let tests = ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(4).unwrap()) .with_initial_version(InitialVersion::Oldest) - .with_network_tests(vec![&SimpleValidatorUpgrade]); + .add_network_test(SimpleValidatorUpgrade); let options = Options::from_args(); forge_main( diff --git a/testsuite/testcases/tests/forge-local-performance.rs b/testsuite/testcases/tests/forge-local-performance.rs index 8d0f21aa54bae..5516c270c3b2b 100644 --- a/testsuite/testcases/tests/forge-local-performance.rs +++ b/testsuite/testcases/tests/forge-local-performance.rs @@ -16,7 +16,7 @@ fn main() -> Result<()> { let tests = ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(2).unwrap()) .with_initial_version(InitialVersion::Newest) - .with_network_tests(vec![&PerformanceBenchmark]) + .add_network_test(PerformanceBenchmark) .with_emit_job( EmitJobRequest::default() .mode(EmitJobMode::ConstTps { tps: 30 }) diff --git a/third_party/move/documentation/book/src/coding-conventions.md b/third_party/move/documentation/book/src/coding-conventions.md index caea845e411c2..654557f22c896 100644 --- a/third_party/move/documentation/book/src/coding-conventions.md +++ b/third_party/move/documentation/book/src/coding-conventions.md @@ -10,7 +10,7 @@ This section lays out some basic coding conventions for Move that the Move team - **Constant names**: should be upper camel case and begin with an `E` if they represent error codes (e.g., `EIndexOutOfBounds`) and upper snake case if they represent a non-error value (e.g., `MIN_STAKE`). - - **Generic type names**: should be descriptive, or anti-descriptive where appropriate, e.g., `T` or `Element` for the Vector generic type parameter. Most of the time the "main" type in a module should be the same name as the module e.g., `option::Option`, `fixed_point32::FixedPoint32`. -- **Module file names**: should be the same as the module name e.g., `Option.move`. +- **Module file names**: should be the same as the module name e.g., `option.move`. - **Script file names**: should be lower snake case and should match the name of the “main” function in the script. - **Mixed file names**: If the file contains multiple modules and/or scripts, the file name should be lower snake case, where the name does not match any particular module/script inside. diff --git a/third_party/move/documentation/book/translations/move-book-zh/src/coding-conventions.md b/third_party/move/documentation/book/translations/move-book-zh/src/coding-conventions.md index 0ea7920b8d0b8..e5f1e6a0417ab 100644 --- a/third_party/move/documentation/book/translations/move-book-zh/src/coding-conventions.md +++ b/third_party/move/documentation/book/translations/move-book-zh/src/coding-conventions.md @@ -9,7 +9,7 @@ - **函数名称**:应该使用小写的蛇形命名法,例如:`destroy_empty`。 - **常量名称**:应该使用大写的蛇形命名法,例如:`REQUIRES_CAPABILITY`。 - 泛型类型应该具备描述性,当然在适当的情况下也可以是反描述性的,例如:Vector 泛型类型的参数可以是 `T` 或 `Element`。大多数情况下,模块中的“主”类型命名应该与模块名相同,例如:`option::Option`,`fixed_point32::FixedPoint32`。 -- **模块文件名称**:应该与模块名相同,例如:`Option.move`。 +- **模块文件名称**:应该与模块名相同,例如:`option.move`。 - **脚本文件名称**:应该使用小写的蛇形命名法,并且应该与脚本中的“主”函数名匹配。 - **混合文件名称**:如果文件包含多个模块和/或脚本,文件命名应该使用小写的蛇形命名法,并且不需要与内部的任何特定模块/脚本名匹配。 diff --git a/third_party/move/evm/move-to-yul/src/context.rs b/third_party/move/evm/move-to-yul/src/context.rs index 11f45c4c85a86..4a8998dde9924 100644 --- a/third_party/move/evm/move-to-yul/src/context.rs +++ b/third_party/move/evm/move-to-yul/src/context.rs @@ -174,7 +174,7 @@ impl<'a> Context<'a> { .unwrap_or_else(|_| PathBuf::from(".")) .to_string_lossy() .to_string() - + &std::path::MAIN_SEPARATOR.to_string(); + + std::path::MAIN_SEPARATOR_STR; if file_path.starts_with(¤t_dir) { file_path[current_dir.len()..].to_string() } else { @@ -255,7 +255,6 @@ impl<'a> Context<'a> { pub fn derive_contracts(&self) -> Vec { self.env .get_modules() - .into_iter() .filter_map(|ref m| { if is_evm_contract_module(m) { Some(self.extract_contract(m)) diff --git a/third_party/move/extensions/async/move-async-vm/tests/testsuite.rs b/third_party/move/extensions/async/move-async-vm/tests/testsuite.rs index 705c4176b9f6e..28bec3e0cb2cc 100644 --- a/third_party/move/extensions/async/move-async-vm/tests/testsuite.rs +++ b/third_party/move/extensions/async/move-async-vm/tests/testsuite.rs @@ -23,7 +23,7 @@ use move_core_types::{ identifier::{IdentStr, Identifier}, language_storage::{ModuleId, StructTag}, metadata::Metadata, - resolver::{ModuleResolver, ResourceResolver}, + resolver::{resource_size, ModuleResolver, ResourceResolver}, }; use move_prover_test_utils::{baseline_test::verify_or_update_baseline, extract_test_directives}; use move_vm_test_utils::gas_schedule::GasStatus; @@ -398,14 +398,15 @@ impl<'a> ResourceResolver for HarnessProxy<'a> { address: &AccountAddress, typ: &StructTag, _metadata: &[Metadata], - ) -> Result>, Error> { + ) -> anyhow::Result<(Option>, usize)> { let res = self .harness .resource_store .borrow() .get(&(*address, typ.clone())) .cloned(); - Ok(res) + let res_size = resource_size(&res); + Ok((res, res_size)) } } diff --git a/third_party/move/move-binary-format/Cargo.toml b/third_party/move/move-binary-format/Cargo.toml index 248035640f737..9672cc7d1d448 100644 --- a/third_party/move/move-binary-format/Cargo.toml +++ b/third_party/move/move-binary-format/Cargo.toml @@ -11,8 +11,9 @@ edition = "2021" [dependencies] anyhow = "1.0.52" -arbitrary = { version = "1.1.7", optional = true, features = ["derive"] } indexmap = "1.9.3" +arbitrary = { version = "1.3.0", optional = true, features = ["derive"] } +dearbitrary = { git = "https://github.com/otter-sec/dearbitrary", features = ["derive"], optional = true } move-core-types = { path = "../move-core/types" } once_cell = "1.7.2" proptest = { version = "1.0.0", optional = true } @@ -29,4 +30,4 @@ serde_json = "1.0.64" [features] default = [] -fuzzing = ["proptest", "proptest-derive", "arbitrary", "move-core-types/fuzzing"] +fuzzing = ["proptest", "proptest-derive", "arbitrary", "move-core-types/fuzzing", "dearbitrary"] diff --git a/third_party/move/move-binary-format/src/file_format.rs b/third_party/move/move-binary-format/src/file_format.rs index bc11c05835b6e..b0cdde0524d0a 100644 --- a/third_party/move/move-binary-format/src/file_format.rs +++ b/third_party/move/move-binary-format/src/file_format.rs @@ -60,7 +60,7 @@ macro_rules! define_index { #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] - #[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] + #[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] #[doc=$comment] pub struct $name(pub TableIndex); @@ -217,7 +217,7 @@ pub const NO_TYPE_ARGUMENTS: SignatureIndex = SignatureIndex(0); #[derive(Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct ModuleHandle { /// Index into the `AddressIdentifierIndex`. Identifies module-holding account's address. pub address: AddressIdentifierIndex, @@ -241,7 +241,7 @@ pub struct ModuleHandle { #[derive(Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct StructHandle { /// The module that defines the type. pub module: ModuleHandleIndex, @@ -265,7 +265,7 @@ impl StructHandle { #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct StructTypeParameter { /// The type parameter constraints. pub constraints: AbilitySet, @@ -283,7 +283,7 @@ pub struct StructTypeParameter { #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(params = "usize"))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FunctionHandle { /// The module that defines the function. pub module: ModuleHandleIndex, @@ -301,7 +301,7 @@ pub struct FunctionHandle { #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FieldHandle { pub owner: StructDefinitionIndex, pub field: MemberCount, @@ -314,7 +314,7 @@ pub struct FieldHandle { #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum StructFieldInformation { Native, Declared(Vec), @@ -332,7 +332,7 @@ pub enum StructFieldInformation { #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct StructDefInstantiation { pub def: StructDefinitionIndex, pub type_parameters: SignatureIndex, @@ -342,7 +342,7 @@ pub struct StructDefInstantiation { #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FunctionInstantiation { pub handle: FunctionHandleIndex, pub type_parameters: SignatureIndex, @@ -357,7 +357,7 @@ pub struct FunctionInstantiation { #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FieldInstantiation { pub handle: FieldHandleIndex, pub type_parameters: SignatureIndex, @@ -368,7 +368,7 @@ pub struct FieldInstantiation { #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct StructDefinition { /// The `StructHandle` for this `StructDefinition`. This has the name and the abilities /// for the type. @@ -401,7 +401,7 @@ impl StructDefinition { #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FieldDefinition { /// The name of the field. pub name: IdentifierIndex, @@ -411,13 +411,14 @@ pub struct FieldDefinition { /// `Visibility` restricts the accessibility of the associated entity. /// - For function visibility, it restricts who may call into the associated function. -#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] #[repr(u8)] pub enum Visibility { /// Accessible within its defining module only. + #[default] Private = 0x0, /// Accessible by any module or script outside of its declaring module. Public = 0x1, @@ -432,12 +433,6 @@ impl Visibility { pub const DEPRECATED_SCRIPT: u8 = 0x2; } -impl Default for Visibility { - fn default() -> Self { - Visibility::Private - } -} - impl std::convert::TryFrom for Visibility { type Error = (); @@ -456,7 +451,7 @@ impl std::convert::TryFrom for Visibility { #[derive(Clone, Debug, Default, Eq, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(params = "usize"))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FunctionDefinition { /// The prototype of the function (module, name, signature). pub function: FunctionHandleIndex, @@ -507,7 +502,7 @@ impl FunctionDefinition { #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct TypeSignature(pub SignatureToken); // TODO: remove at some point or move it in the front end (language/move-ir-compiler) @@ -516,7 +511,7 @@ pub struct TypeSignature(pub SignatureToken); #[derive(Clone, Debug, Eq, Hash, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(params = "usize"))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct FunctionSignature { /// The list of return types. #[cfg_attr( @@ -541,7 +536,7 @@ pub struct FunctionSignature { #[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Ord, PartialOrd)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(params = "usize"))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct Signature( #[cfg_attr( any(test, feature = "fuzzing"), @@ -572,7 +567,7 @@ pub type TypeParameterIndex = u16; #[repr(u8)] #[derive(Debug, Clone, Eq, Copy, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum Ability { /// Allows values of types with this ability to be copied, via CopyLoc or ReadRef Copy = 0x1, @@ -630,7 +625,7 @@ impl Ability { /// A set of `Ability`s #[derive(Clone, Eq, Copy, Hash, Ord, PartialEq, PartialOrd, Serialize, Deserialize)] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct AbilitySet(u8); impl AbilitySet { @@ -861,7 +856,7 @@ impl Arbitrary for AbilitySet { /// A SignatureToken can express more types than the VM can handle safely, and correctness is /// enforced by the verifier. #[derive(Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum SignatureToken { /// Boolean, `true` or `false`. Bool, @@ -1139,7 +1134,7 @@ impl SignatureToken { /// A `Constant` is a serialized value along with its type. That type will be deserialized by the /// loader/evauluator #[derive(Clone, Debug, Eq, PartialEq, Hash)] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct Constant { pub type_: SignatureToken, pub data: Vec, @@ -1149,7 +1144,7 @@ pub struct Constant { #[derive(Clone, Debug, Default, Eq, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(params = "usize"))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct CodeUnit { /// List of locals type. All locals are typed. pub locals: SignatureIndex, @@ -1169,7 +1164,7 @@ pub struct CodeUnit { #[derive(Clone, Hash, Eq, VariantCount, PartialEq)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum Bytecode { /// Pop and discard the value at the top of the stack. /// The value on the stack must be an copyable type. @@ -1813,6 +1808,7 @@ impl Bytecode { /// A CompiledScript defines the constant pools (string, address, signatures, etc.), the handle /// tables (external code references) and it has a `main` definition. #[derive(Clone, Default, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct CompiledScript { /// Version number found during deserialization pub version: u32, @@ -1855,7 +1851,7 @@ impl CompiledScript { /// /// A module is published as a single entry and it is retrieved as a single blob. #[derive(Clone, Debug, Default, Eq, PartialEq)] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct CompiledModule { /// Version number found during deserialization pub version: u32, diff --git a/third_party/move/move-command-line-common/src/types.rs b/third_party/move/move-command-line-common/src/types.rs index 6045e30a3c5cc..30c6378830185 100644 --- a/third_party/move/move-command-line-common/src/types.rs +++ b/third_party/move/move-command-line-common/src/types.rs @@ -45,7 +45,7 @@ pub enum ParsedType { } impl Display for TypeToken { - fn fmt<'f>(&self, formatter: &mut fmt::Formatter<'f>) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { let s = match *self { TypeToken::Whitespace => "[whitespace]", TypeToken::Ident => "[identifier]", diff --git a/third_party/move/move-command-line-common/src/values.rs b/third_party/move/move-command-line-common/src/values.rs index 54d1178f0ba61..915dedff84c00 100644 --- a/third_party/move/move-command-line-common/src/values.rs +++ b/third_party/move/move-command-line-common/src/values.rs @@ -120,7 +120,7 @@ impl ParsableValue for () { } impl Display for ValueToken { - fn fmt<'f>(&self, formatter: &mut fmt::Formatter<'f>) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { let s = match self { ValueToken::Number => "[num]", ValueToken::NumberTyped => "[num typed]", diff --git a/third_party/move/move-compiler/src/cfgir/translate.rs b/third_party/move/move-compiler/src/cfgir/translate.rs index bf3d963ccc4f2..1609edbe910bc 100644 --- a/third_party/move/move-compiler/src/cfgir/translate.rs +++ b/third_party/move/move-compiler/src/cfgir/translate.rs @@ -97,7 +97,7 @@ impl<'env> Context<'env> { // Returns the blocks inserted in insertion ordering pub fn finish_blocks(&mut self) -> (Label, BasicBlocks, Vec<(Label, BlockInfo)>) { self.next_label = None; - let start = mem::replace(&mut self.start, None); + let start = self.start.take(); let blocks = mem::take(&mut self.blocks); let block_ordering = mem::take(&mut self.block_ordering); let block_info = mem::take(&mut self.block_info); diff --git a/third_party/move/move-compiler/src/expansion/translate.rs b/third_party/move/move-compiler/src/expansion/translate.rs index a3aefcf21835d..4ea08bf3f83bf 100644 --- a/third_party/move/move-compiler/src/expansion/translate.rs +++ b/third_party/move/move-compiler/src/expansion/translate.rs @@ -2522,7 +2522,7 @@ fn check_valid_address_name_( fn check_valid_local_name(context: &mut Context, v: &Var) { fn is_valid(s: Symbol) -> bool { - s.starts_with('_') || s.starts_with(|c| matches!(c, 'a'..='z')) + s.starts_with('_') || s.starts_with(|c: char| c.is_ascii_lowercase()) } if !is_valid(v.value()) { let msg = format!( @@ -2682,7 +2682,7 @@ fn check_valid_module_member_name_impl( } pub fn is_valid_struct_constant_or_schema_name(s: &str) -> bool { - s.starts_with(|c| matches!(c, 'A'..='Z')) + s.starts_with(|c: char| c.is_ascii_uppercase()) } // Checks for a restricted name in any decl case diff --git a/third_party/move/move-compiler/src/hlir/translate.rs b/third_party/move/move-compiler/src/hlir/translate.rs index 704e48c73b3e2..2d5bba58bb863 100644 --- a/third_party/move/move-compiler/src/hlir/translate.rs +++ b/third_party/move/move-compiler/src/hlir/translate.rs @@ -804,8 +804,8 @@ fn exp( Box::new(exp_(context, result, expected_type_opt, te)) } -fn exp_<'env>( - context: &mut Context<'env>, +fn exp_( + context: &mut Context<'_>, result: &mut Block, initial_expected_type_opt: Option<&H::Type>, initial_e: T::Exp, diff --git a/third_party/move/move-compiler/src/parser/lexer.rs b/third_party/move/move-compiler/src/parser/lexer.rs index 8a620e59ec689..5fb32638a5832 100644 --- a/third_party/move/move-compiler/src/parser/lexer.rs +++ b/third_party/move/move-compiler/src/parser/lexer.rs @@ -84,7 +84,7 @@ pub enum Tok { } impl fmt::Display for Tok { - fn fmt<'f>(&self, formatter: &mut fmt::Formatter<'f>) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { use Tok::*; let s = match *self { EOF => "[end-of-file]", diff --git a/third_party/move/move-core/types/Cargo.toml b/third_party/move/move-core/types/Cargo.toml index fbfd938df03d0..93635947368d4 100644 --- a/third_party/move/move-core/types/Cargo.toml +++ b/third_party/move/move-core/types/Cargo.toml @@ -11,14 +11,15 @@ edition = "2021" [dependencies] anyhow = "1.0.52" -arbitrary = { version = "1.1.7", features = [ "derive_arbitrary"], optional = true } +arbitrary = { version = "1.3.0", optional = true, features = ["derive"] } +dearbitrary = { git = "https://github.com/otter-sec/dearbitrary", features = ["derive"], optional = true } ethnum = "1.0.4" hex = "0.4.3" num = "0.4.0" once_cell = "1.7.2" primitive-types = { version = "0.10.1", features = ["impl-serde"] } -proptest = { version = "1.0.0", default-features = false, optional = true } -proptest-derive = { version = "0.3.0", default-features = false, optional = true } +proptest = { version = "1.0.0", optional = true } +proptest-derive = { version = "0.3.0", optional = true } rand = "0.8.3" ref-cast = "1.0.6" serde = { version = "1.0.124", default-features = false } @@ -28,12 +29,11 @@ uint = "0.9.4" bcs = { workspace = true } [dev-dependencies] -arbitrary = { version = "1.1.7", features = [ "derive_arbitrary"] } -proptest = "1.0.0" +proptest = "1.1.0" proptest-derive = "0.3.0" regex = "1.5.5" serde_json = "1.0.64" [features] default = [] -fuzzing = ["proptest", "proptest-derive", "arbitrary"] +fuzzing = ["proptest", "proptest-derive", "arbitrary", "dearbitrary"] diff --git a/third_party/move/move-core/types/src/account_address.rs b/third_party/move/move-core/types/src/account_address.rs index f1d5d9356aef4..f9b8f35ebea42 100644 --- a/third_party/move/move-core/types/src/account_address.rs +++ b/third_party/move/move-core/types/src/account_address.rs @@ -11,7 +11,7 @@ use std::{convert::TryFrom, fmt, str::FromStr}; /// A struct that represents an account address. #[derive(Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] #[cfg_attr(any(test, feature = "fuzzing"), derive(proptest_derive::Arbitrary))] -#[cfg_attr(any(test, feature = "fuzzing"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "fuzzing"), derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct AccountAddress([u8; AccountAddress::LENGTH]); impl AccountAddress { @@ -436,6 +436,7 @@ mod tests { } #[test] + #[allow(clippy::redundant_clone)] // Required to work around prop_assert_eq! limitations fn test_address_protobuf_roundtrip(addr in any::()) { let bytes = addr.to_vec(); prop_assert_eq!(bytes.clone(), addr.as_ref()); diff --git a/third_party/move/move-core/types/src/identifier.rs b/third_party/move/move-core/types/src/identifier.rs index 0aed508c74865..2781f8c50d27b 100644 --- a/third_party/move/move-core/types/src/identifier.rs +++ b/third_party/move/move-core/types/src/identifier.rs @@ -89,7 +89,7 @@ pub(crate) static ALLOWED_NO_SELF_IDENTIFIERS: &str = /// /// For more details, see the module level documentation. #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, Deserialize)] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct Identifier(Box); // An identifier cannot be mutated so use Box instead of String -- it is 1 word smaller. diff --git a/third_party/move/move-core/types/src/language_storage.rs b/third_party/move/move-core/types/src/language_storage.rs index ee9a88b9c76aa..119f2e56e41d2 100644 --- a/third_party/move/move-core/types/src/language_storage.rs +++ b/third_party/move/move-core/types/src/language_storage.rs @@ -23,6 +23,7 @@ pub const RESOURCE_TAG: u8 = 1; pub const CORE_CODE_ADDRESS: AccountAddress = AccountAddress::ONE; #[derive(Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, PartialOrd, Ord)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum TypeTag { // alias for compatibility with old json serialized data. #[serde(rename = "bool", alias = "Bool")] @@ -101,6 +102,7 @@ impl FromStr for TypeTag { } #[derive(Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, PartialOrd, Ord)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct StructTag { pub address: AccountAddress, pub module: Identifier, @@ -201,6 +203,7 @@ impl ResourceKey { /// the struct tag #[derive(Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, PartialOrd, Ord)] #[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] pub struct ModuleId { address: AccountAddress, diff --git a/third_party/move/move-core/types/src/metadata.rs b/third_party/move/move-core/types/src/metadata.rs index 9191615d9c39d..e14fdb97ee194 100644 --- a/third_party/move/move-core/types/src/metadata.rs +++ b/third_party/move/move-core/types/src/metadata.rs @@ -3,7 +3,7 @@ /// Representation of metadata, #[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub struct Metadata { /// The key identifying the type of metadata. pub key: Vec, diff --git a/third_party/move/move-core/types/src/resolver.rs b/third_party/move/move-core/types/src/resolver.rs index f61d82a23ae80..5a43faaced949 100644 --- a/third_party/move/move-core/types/src/resolver.rs +++ b/third_party/move/move-core/types/src/resolver.rs @@ -26,6 +26,10 @@ pub trait ModuleResolver { fn get_module(&self, id: &ModuleId) -> Result>, Error>; } +pub fn resource_size(resource: &Option>) -> usize { + resource.as_ref().map(|bytes| bytes.len()).unwrap_or(0) +} + /// A persistent storage backend that can resolve resources by address + type /// Storage backends should return /// - Ok(Some(..)) if the data exists @@ -41,7 +45,7 @@ pub trait ResourceResolver { address: &AccountAddress, typ: &StructTag, metadata: &[Metadata], - ) -> Result>, Error>; + ) -> Result<(Option>, usize), Error>; } /// A persistent storage implementation that can resolve both resources and modules @@ -51,7 +55,9 @@ pub trait MoveResolver: ModuleResolver + ResourceResolver { address: &AccountAddress, typ: &StructTag, ) -> Result>, Error> { - self.get_resource_with_metadata(address, typ, &self.get_module_metadata(&typ.module_id())) + Ok(self + .get_resource_with_metadata(address, typ, &self.get_module_metadata(&typ.module_id()))? + .0) } } @@ -63,7 +69,7 @@ impl ResourceResolver for &T { address: &AccountAddress, tag: &StructTag, metadata: &[Metadata], - ) -> Result>, Error> { + ) -> Result<(Option>, usize), Error> { (**self).get_resource_with_metadata(address, tag, metadata) } } diff --git a/third_party/move/move-core/types/src/u256.rs b/third_party/move/move-core/types/src/u256.rs index b73647ea06fe8..44153b1c169b8 100644 --- a/third_party/move/move-core/types/src/u256.rs +++ b/third_party/move/move-core/types/src/u256.rs @@ -705,6 +705,13 @@ impl<'a> arbitrary::Arbitrary<'a> for U256 { } } +#[cfg(any(test, feature = "fuzzing"))] +impl dearbitrary::Dearbitrary for U256 { + fn dearbitrary(&self, dearbitrator: &mut dearbitrary::Dearbitrator) { + self.to_le_bytes().dearbitrary(dearbitrator) + } +} + #[test] fn wrapping_add() { // a + b overflows U256::MAX by 100 diff --git a/third_party/move/move-core/types/src/value.rs b/third_party/move/move-core/types/src/value.rs index b0caed49d94bd..c9354653c93f3 100644 --- a/third_party/move/move-core/types/src/value.rs +++ b/third_party/move/move-core/types/src/value.rs @@ -29,6 +29,7 @@ pub const MOVE_STRUCT_TYPE: &str = "type"; pub const MOVE_STRUCT_FIELDS: &str = "fields"; #[derive(Debug, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum MoveStruct { /// The representation used by the MoveVM Runtime(Vec), @@ -42,6 +43,7 @@ pub enum MoveStruct { } #[derive(Debug, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary,dearbitrary::Dearbitrary))] pub enum MoveValue { U8(u8), U64(u64), @@ -59,6 +61,7 @@ pub enum MoveValue { /// A layout associated with a named field #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "fuzzing"), derive(arbitrary::Arbitrary))] pub struct MoveFieldLayout { pub name: Identifier, pub layout: MoveTypeLayout, @@ -71,6 +74,7 @@ impl MoveFieldLayout { } #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "fuzzing"), derive(arbitrary::Arbitrary))] pub enum MoveStructLayout { /// The representation used by the MoveVM Runtime(Vec), @@ -84,6 +88,7 @@ pub enum MoveStructLayout { } #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "fuzzing"), derive(arbitrary::Arbitrary))] pub enum MoveTypeLayout { #[serde(rename(serialize = "bool", deserialize = "bool"))] Bool, diff --git a/third_party/move/move-ir-compiler/move-ir-to-bytecode/syntax/src/lexer.rs b/third_party/move/move-ir-compiler/move-ir-to-bytecode/syntax/src/lexer.rs index 4bc9b895d7ca2..1435d31df3770 100644 --- a/third_party/move/move-ir-compiler/move-ir-to-bytecode/syntax/src/lexer.rs +++ b/third_party/move/move-ir-compiler/move-ir-to-bytecode/syntax/src/lexer.rs @@ -403,7 +403,7 @@ impl<'input> Lexer<'input> { fn get_name_len(text: &str) -> usize { // If the first character is 0..=9 or EOF, then return a length of 0. let first_char = text.chars().next().unwrap_or('0'); - if ('0'..='9').contains(&first_char) { + if first_char.is_ascii_digit() { return 0; } text.chars() diff --git a/third_party/move/move-model/src/builder/module_builder.rs b/third_party/move/move-model/src/builder/module_builder.rs index 3226db0480166..0af5719318699 100644 --- a/third_party/move/move-model/src/builder/module_builder.rs +++ b/third_party/move/move-model/src/builder/module_builder.rs @@ -22,8 +22,8 @@ use crate::{ options::ModelBuilderOptions, pragmas::{ is_pragma_valid_for_block, is_property_valid_for_condition, CONDITION_ABSTRACT_PROP, - CONDITION_CONCRETE_PROP, CONDITION_DEACTIVATED_PROP, CONDITION_INJECTED_PROP, - OPAQUE_PRAGMA, VERIFY_PRAGMA, + CONDITION_CONCRETE_PROP, CONDITION_DEACTIVATED_PROP, CONDITION_EXPORT_PROP, + CONDITION_INJECTED_PROP, OPAQUE_PRAGMA, VERIFY_PRAGMA, }, symbol::{Symbol, SymbolPool}, ty::{PrimitiveType, Type, BOOL_TYPE}, @@ -1122,7 +1122,7 @@ impl<'env, 'translator> ModuleBuilder<'env, 'translator> { } let spec_fun_idx = spec_fun_id.as_usize(); let body = if self.spec_funs[spec_fun_idx].body.is_some() { - std::mem::replace(&mut self.spec_funs[spec_fun_idx].body, None).unwrap() + self.spec_funs[spec_fun_idx].body.take().unwrap() } else { // If the function is native and contains no mutable references // as parameters, consider it pure. @@ -2951,8 +2951,10 @@ impl<'env, 'translator> ModuleBuilder<'env, 'translator> { et.get_type_params() }; // Create a property marking this as injected. - let context_properties = + let mut context_properties = self.add_bool_property(PropertyBag::default(), CONDITION_INJECTED_PROP, true); + context_properties = + self.add_bool_property(context_properties, CONDITION_EXPORT_PROP, true); self.def_ana_schema_inclusion_outside_schema( loc, &SpecBlockContext::Function(fun_name), @@ -3078,7 +3080,7 @@ impl<'env, 'translator> ModuleBuilder<'env, 'translator> { // the full self. Rust requires us to do so (at least the author doesn't know better yet), // but moving it should be not too expensive. let body = if self.spec_funs[fun_idx].body.is_some() { - std::mem::replace(&mut self.spec_funs[fun_idx].body, None).unwrap() + self.spec_funs[fun_idx].body.take().unwrap() } else { // No body: assume it is pure. return; diff --git a/third_party/move/move-prover/bytecode/src/number_operation_analysis.rs b/third_party/move/move-prover/bytecode/src/number_operation_analysis.rs index d9ca376fa917f..13c145a24dd2a 100644 --- a/third_party/move/move-prover/bytecode/src/number_operation_analysis.rs +++ b/third_party/move/move-prover/bytecode/src/number_operation_analysis.rs @@ -84,7 +84,7 @@ impl NumberOperationProcessor { } } - fn analyze_fun<'a>(&self, env: &'a GlobalEnv, target: FunctionTarget) { + fn analyze_fun(&self, env: &'_ GlobalEnv, target: FunctionTarget) { if !target.func_env.is_native_or_intrinsic() { let cfg = StacklessControlFlowGraph::one_block(target.get_bytecode()); let analyzer = NumberOperationAnalysis { diff --git a/third_party/move/move-prover/move-docgen/src/docgen.rs b/third_party/move/move-prover/move-docgen/src/docgen.rs index f030f1fc58942..b0d1bbc014500 100644 --- a/third_party/move/move-prover/move-docgen/src/docgen.rs +++ b/third_party/move/move-prover/move-docgen/src/docgen.rs @@ -9,7 +9,7 @@ use log::{debug, info, warn}; use move_compiler::parser::keywords::{BUILTINS, CONTEXTUAL_KEYWORDS, KEYWORDS}; use move_core_types::account_address::AccountAddress; use move_model::{ - ast::{Address, ModuleName, SpecBlockInfo, SpecBlockTarget}, + ast::{Address, Attribute, AttributeValue, ModuleName, SpecBlockInfo, SpecBlockTarget}, code_writer::{CodeWriter, CodeWriterLabel}, emit, emitln, model::{ @@ -123,8 +123,8 @@ pub struct Docgen<'env> { /// Mapping from module id to the set of schemas defined in this module. /// We currently do not have this information in the environment. declared_schemas: BTreeMap>, - /// A list of file names and output generated for those files. - output: Vec<(String, String)>, + /// A map of file names to output generated for each file. + output: BTreeMap, /// Map from module id to information about this module. infos: BTreeMap, /// Current code writer. @@ -237,7 +237,15 @@ impl<'env> Docgen<'env> { if !info.is_included && m.is_target() { self.gen_module(&m, &info); let path = self.make_file_in_out_dir(&info.target_file); - self.output.push((path, self.writer.extract_result())); + match self.output.get_mut(&path) { + Some(out) => { + out.push_str("\n\n"); + out.push_str(&self.writer.extract_result()); + }, + None => { + self.output.insert(path, self.writer.extract_result()); + }, + } } } @@ -250,7 +258,7 @@ impl<'env> Docgen<'env> { { let trimmed_content = content.trim(); if !trimmed_content.is_empty() { - for (_, out) in self.output.iter_mut() { + for out in self.output.values_mut() { out.push_str("\n\n"); out.push_str(trimmed_content); out.push('\n'); @@ -265,6 +273,9 @@ impl<'env> Docgen<'env> { } self.output + .iter() + .map(|(a, b)| (a.clone(), b.clone())) + .collect() } /// Compute the schemas declared in all modules. This information is currently not directly @@ -372,10 +383,10 @@ impl<'env> Docgen<'env> { } // Add result to output. - self.output.push(( + self.output.insert( self.make_file_in_out_dir(output_file_name), self.writer.extract_result(), - )); + ); } /// Compute ModuleInfo for all modules, considering root template content. @@ -477,7 +488,7 @@ impl<'env> Docgen<'env> { } } - /// Make a file name in the output directory. + /// Makes a file name in the output directory. fn make_file_in_out_dir(&self, name: &str) -> String { if self.options.compile_relative_to_output_dir { name.to_string() @@ -488,7 +499,7 @@ impl<'env> Docgen<'env> { } } - /// Make path relative to other path. + /// Makes path relative to other path. fn path_relative_to(&self, path: &Path, to: &Path) -> PathBuf { if path.is_absolute() || to.is_absolute() { path.to_path_buf() @@ -501,6 +512,68 @@ impl<'env> Docgen<'env> { } } + /// Gets a readable version of an attribute. + fn gen_attribute(&self, attribute: &Attribute) -> String { + let annotation_body: String = match attribute { + Attribute::Apply(_node_id, symbol, attribute_vector) => { + let symbol_string = self.name_string(*symbol).to_string(); + if attribute_vector.is_empty() { + symbol_string + } else { + let value_string = self.gen_attributes(attribute_vector).iter().join(", "); + format!("{}({})", symbol_string, value_string) + } + }, + Attribute::Assign(_node_id, symbol, attribute_value) => { + let symbol_string = self.name_string(*symbol).to_string(); + match attribute_value { + AttributeValue::Value(_node_id, value) => { + let value_string = self.env.display(value); + format!("{} = {}", symbol_string, value_string) + }, + AttributeValue::Name(_node_id, module_name_option, symbol2) => { + let symbol2_name = self.name_string(*symbol2).to_string(); + let module_prefix = match module_name_option { + None => "".to_string(), + Some(ref module_name) => { + format!("{}::", module_name.display_full(self.env)) + }, + }; + format!("{} = {}{}", symbol_string, module_prefix, symbol2_name) + }, + } + }, + }; + annotation_body + } + + /// Returns attributes as vector of Strings like #[attr]. + fn gen_attributes(&self, attributes: &[Attribute]) -> Vec { + if !attributes.is_empty() { + attributes + .iter() + .map(|attr| format!("#[{}]", self.gen_attribute(attr))) + .collect::>() + } else { + vec![] + } + } + + /// Emits a labelled md-formatted attributes list if attributes_slice is non-empty. + fn emit_attributes_list(&self, attributes_slice: &[Attribute]) { + // Any attributes + let attributes = self + .gen_attributes(attributes_slice) + .iter() + .map(|attr| format!("\n - `{}`", attr)) + .join(""); + if !attributes.is_empty() { + emit!(self.writer, "\n\n- Attributes:"); + emit!(self.writer, &attributes); + emit!(self.writer, "\n\n"); + } + } + /// Generates documentation for a module. The result is written into the current code /// writer. Writer and other state is initialized if this module is standalone. fn gen_module(&mut self, module_env: &ModuleEnv<'env>, info: &ModuleInfo) { @@ -536,6 +609,9 @@ impl<'env> Docgen<'env> { self.increment_section_nest(); + // Emit a list of attributes if non-empty. + self.emit_attributes_list(module_env.get_attributes()); + // Document module overview. self.doc_text(module_env.get_doc()); @@ -973,11 +1049,17 @@ impl<'env> Docgen<'env> { let name = self.name_string(struct_env.get_name()); let type_params = self.type_parameter_list_display(struct_env.get_type_parameters()); let ability_tokens = self.ability_tokens(struct_env.get_abilities()); + let attributes_string = self + .gen_attributes(struct_env.get_attributes()) + .iter() + .map(|attr| format!("{}\n", attr)) + .join(""); if ability_tokens.is_empty() { - format!("struct {}{}", name, type_params) + format!("{}struct {}{}", attributes_string, name, type_params) } else { format!( - "struct {}{} has {}", + "{}struct {}{} has {}", + attributes_string, name, type_params, ability_tokens.join(", ") @@ -1080,8 +1162,14 @@ impl<'env> Docgen<'env> { } else { "".to_owned() }; + let attributes_string = self + .gen_attributes(func_env.get_attributes()) + .iter() + .map(|attr| format!("{}\n", attr)) + .join(""); format!( - "{}{}fun {}{}({}){}", + "{}{}{}fun {}{}({}){}", + attributes_string, func_env.visibility_str(), entry_str, name, diff --git a/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.move b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.move new file mode 100644 index 0000000000000..ed23d6ad79df5 --- /dev/null +++ b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.move @@ -0,0 +1,53 @@ +#[attr1] +address 0x42 { +#[attr2] +#[attr7] +module M { + #[attr3] + use 0x42::N; + + #[attr4] + struct S {} + + #[attr4b] + #[resource_group(scope = global)] + struct T {} + + #[attr2] + #[attr5] + const C: u64 = 0; + + #[attr6] + #[resource_group_member(group = std::string::String)] + public fun foo() { N::bar() } + + #[attr7] + spec foo {} +} +} + +#[attr8] +module 0x42::N { + #[attr9] + friend 0x42::M; + + #[attr10] + public fun bar() {} +} + +#[attr11] +script { + #[attr12] + use 0x42::M; + + #[attr13] + const C: u64 = 0; + + #[attr14] + fun main() { + M::foo(); + } + + #[attr15] + spec main { } +} diff --git a/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_inline.md b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_inline.md new file mode 100644 index 0000000000000..8483491112ac1 --- /dev/null +++ b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_inline.md @@ -0,0 +1,236 @@ + + + +# Module `0x42::N` + + + +- Attributes: + - `#[attr8]` + + + +- [Function `bar`](#0x42_N_bar) + + +
+ + + + + +## Function `bar` + + + +
#[attr10]
+public fun bar()
+
+ + + +
+Implementation + + +
public fun bar() {}
+
+ + + +
+ + + + + +# Module `0x42::M` + + + +- Attributes: + - `#[attr2]` + - `#[attr7]` + + + +- [Struct `S`](#0x42_M_S) +- [Struct `T`](#0x42_M_T) +- [Constants](#@Constants_0) +- [Function `foo`](#0x42_M_foo) + + +
use 0x42::N;
+
+ + + + + +## Struct `S` + + + +
#[attr4]
+struct S
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `T` + + + +
#[attr4b]
+#[resource_group(#[scope = global])]
+struct T
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const C: u64 = 0;
+
+ + + + + +## Function `foo` + + + +
#[attr6]
+#[resource_group_member(#[group = 0x1::string::String])]
+public fun foo()
+
+ + + +
+Implementation + + +
public fun foo() { N::bar() }
+
+ + + +
+ +
+Specification + + + +
+ + + + + +# Module `0x1::main` + + + +- Attributes: + - `#[attr11]` + + + +- [Constants](#@Constants_0) +- [Function `main`](#0x1_main_main) + + +
use 0x42::M;
+
+ + + + + +## Constants + + + + + + +
const C: u64 = 0;
+
+ + + + + +## Function `main` + + + +
#[attr14]
+fun main()
+
+ + + +
+Implementation + + +
fun main() {
+    M::foo();
+}
+
+ + + +
+ +
+Specification + + + +
diff --git a/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_inline_no_fold.md b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_inline_no_fold.md new file mode 100644 index 0000000000000..ccc3e9c24e0bb --- /dev/null +++ b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_inline_no_fold.md @@ -0,0 +1,209 @@ + + + +# Module `0x42::N` + + + +- Attributes: + - `#[attr8]` + + + +- [Function `bar`](#0x42_N_bar) + + +
+ + + + + +## Function `bar` + + + +
#[attr10]
+public fun bar()
+
+ + + +##### Implementation + + +
public fun bar() {}
+
+ + + + + +# Module `0x42::M` + + + +- Attributes: + - `#[attr2]` + - `#[attr7]` + + + +- [Struct `S`](#0x42_M_S) +- [Struct `T`](#0x42_M_T) +- [Constants](#@Constants_0) +- [Function `foo`](#0x42_M_foo) + + +
use 0x42::N;
+
+ + + + + +## Struct `S` + + + +
#[attr4]
+struct S
+
+ + + +##### Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + + + +## Struct `T` + + + +
#[attr4b]
+#[resource_group(#[scope = global])]
+struct T
+
+ + + +##### Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + + + +## Constants + + + + + + +
const C: u64 = 0;
+
+ + + + + +## Function `foo` + + + +
#[attr6]
+#[resource_group_member(#[group = 0x1::string::String])]
+public fun foo()
+
+ + + +##### Implementation + + +
public fun foo() { N::bar() }
+
+ + + +##### Specification + + + + + +# Module `0x1::main` + + + +- Attributes: + - `#[attr11]` + + + +- [Constants](#@Constants_0) +- [Function `main`](#0x1_main_main) + + +
use 0x42::M;
+
+ + + + + +## Constants + + + + + + +
const C: u64 = 0;
+
+ + + + + +## Function `main` + + + +
#[attr14]
+fun main()
+
+ + + +##### Implementation + + +
fun main() {
+    M::foo();
+}
+
+ + + +##### Specification diff --git a/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_separate.md b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_separate.md new file mode 100644 index 0000000000000..03e7bb08144ed --- /dev/null +++ b/third_party/move/move-prover/move-docgen/tests/sources/attribute_placement.spec_separate.md @@ -0,0 +1,255 @@ + + + +# Module `0x42::N` + + + +- Attributes: + - `#[attr8]` + + + +- [Function `bar`](#0x42_N_bar) + + +
+ + + + + +## Function `bar` + + + +
#[attr10]
+public fun bar()
+
+ + + +
+Implementation + + +
public fun bar() {}
+
+ + + +
+ + + + + +# Module `0x42::M` + + + +- Attributes: + - `#[attr2]` + - `#[attr7]` + + + +- [Struct `S`](#0x42_M_S) +- [Struct `T`](#0x42_M_T) +- [Constants](#@Constants_0) +- [Function `foo`](#0x42_M_foo) +- [Specification](#@Specification_1) + - [Function `foo`](#@Specification_1_foo) + + +
use 0x42::N;
+
+ + + + + +## Struct `S` + + + +
#[attr4]
+struct S
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `T` + + + +
#[attr4b]
+#[resource_group(#[scope = global])]
+struct T
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const C: u64 = 0;
+
+ + + + + +## Function `foo` + + + +
#[attr6]
+#[resource_group_member(#[group = 0x1::string::String])]
+public fun foo()
+
+ + + +
+Implementation + + +
public fun foo() { N::bar() }
+
+ + + +
+ + + +## Specification + + + + +### Function `foo` + + +
#[attr6]
+#[resource_group_member(#[group = 0x1::string::String])]
+public fun foo()
+
+ + + + + +# Module `0x1::main` + + + +- Attributes: + - `#[attr11]` + + + +- [Constants](#@Constants_0) +- [Function `main`](#0x1_main_main) +- [Specification](#@Specification_1) + - [Function `main`](#@Specification_1_main) + + +
use 0x42::M;
+
+ + + + + +## Constants + + + + + + +
const C: u64 = 0;
+
+ + + + + +## Function `main` + + + +
#[attr14]
+fun main()
+
+ + + +
+Implementation + + +
fun main() {
+    M::foo();
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `main` + + +
#[attr14]
+fun main()
+
diff --git a/third_party/move/move-prover/tests/sources/functional/schema_apply.exp b/third_party/move/move-prover/tests/sources/functional/schema_apply.exp new file mode 100644 index 0000000000000..226c05ee84a21 --- /dev/null +++ b/third_party/move/move-prover/tests/sources/functional/schema_apply.exp @@ -0,0 +1,8 @@ +Move prover returns: exiting with verification errors +error: precondition does not hold at this call + ┌─ tests/sources/functional/schema_apply.move:16:9 + │ +16 │ requires false; + │ ^^^^^^^^^^^^^^^ + │ + = at tests/sources/functional/schema_apply.move:16 diff --git a/third_party/move/move-prover/tests/sources/functional/schema_apply.move b/third_party/move/move-prover/tests/sources/functional/schema_apply.move new file mode 100644 index 0000000000000..91c6ca5230699 --- /dev/null +++ b/third_party/move/move-prover/tests/sources/functional/schema_apply.move @@ -0,0 +1,18 @@ +module 0x42::requires { + public fun g() { + f(); + } + + public fun f() { + } + spec f { + } + + spec module { + apply RequiresFalse to f; + } + + spec schema RequiresFalse { + requires false; + } +} diff --git a/third_party/move/move-stdlib/docs/vector.md b/third_party/move/move-stdlib/docs/vector.md index 6e03ecb1efa00..f175170a18286 100644 --- a/third_party/move/move-stdlib/docs/vector.md +++ b/third_party/move/move-stdlib/docs/vector.md @@ -68,7 +68,8 @@ The index into the vector is out of bounds Create an empty vector. -
public fun empty<Element>(): vector<Element>
+
#[bytecode_instruction]
+public fun empty<Element>(): vector<Element>
 
@@ -91,7 +92,8 @@ Create an empty vector. Return the length of the vector. -
public fun length<Element>(v: &vector<Element>): u64
+
#[bytecode_instruction]
+public fun length<Element>(v: &vector<Element>): u64
 
@@ -115,7 +117,8 @@ Acquire an immutable reference to the ith element of the vector i
is out of bounds. -
public fun borrow<Element>(v: &vector<Element>, i: u64): &Element
+
#[bytecode_instruction]
+public fun borrow<Element>(v: &vector<Element>, i: u64): &Element
 
@@ -138,7 +141,8 @@ Aborts if i is out of bounds. Add element e to the end of the vector v. -
public fun push_back<Element>(v: &mut vector<Element>, e: Element)
+
#[bytecode_instruction]
+public fun push_back<Element>(v: &mut vector<Element>, e: Element)
 
@@ -162,7 +166,8 @@ Return a mutable reference to the ith element in the vector v Aborts if i is out of bounds. -
public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element
+
#[bytecode_instruction]
+public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element
 
@@ -186,7 +191,8 @@ Pop an element from the end of vector v. Aborts if v is empty. -
public fun pop_back<Element>(v: &mut vector<Element>): Element
+
#[bytecode_instruction]
+public fun pop_back<Element>(v: &mut vector<Element>): Element
 
@@ -210,7 +216,8 @@ Destroy the vector v. Aborts if v is not empty. -
public fun destroy_empty<Element>(v: vector<Element>)
+
#[bytecode_instruction]
+public fun destroy_empty<Element>(v: vector<Element>)
 
@@ -234,7 +241,8 @@ Swaps the elements at the ith and jth indices in the v Aborts if i or j is out of bounds. -
public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64)
+
#[bytecode_instruction]
+public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64)
 
diff --git a/third_party/move/move-stdlib/nursery/tests/event_tests.move b/third_party/move/move-stdlib/nursery/tests/event_tests.move index df5a9e3e05e00..064ebb7877eeb 100644 --- a/third_party/move/move-stdlib/nursery/tests/event_tests.move +++ b/third_party/move/move-stdlib/nursery/tests/event_tests.move @@ -73,7 +73,7 @@ module std::event_tests { } #[test(s = @0x42)] - #[expected_failure(abort_code = 0, location = std::event)] + #[expected_failure] // VM_MAX_VALUE_DEPTH_REACHED fun test_event_129(s: signer) acquires MyEvent { event_129(&s); } diff --git a/third_party/move/move-stdlib/tests/bcs_tests.move b/third_party/move/move-stdlib/tests/bcs_tests.move index 9564f5a1c530a..cf16ce111371b 100644 --- a/third_party/move/move-stdlib/tests/bcs_tests.move +++ b/third_party/move/move-stdlib/tests/bcs_tests.move @@ -96,7 +96,7 @@ module std::bcs_tests { } #[test] - #[expected_failure(abort_code = 453, location = std::bcs)] + #[expected_failure] // VM_MAX_VALUE_DEPTH_REACHED fun encode_129() { bcs::to_bytes(&Box { x: box127(true) }); } diff --git a/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs b/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs index f73715b2869a1..530b316c35ce7 100644 --- a/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs +++ b/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs @@ -526,7 +526,7 @@ impl ResourceResolver for BogusStorage { _address: &AccountAddress, _tag: &StructTag, _metadata: &[Metadata], - ) -> Result>, anyhow::Error> { + ) -> anyhow::Result<(Option>, usize)> { Ok(Err( PartialVMError::new(self.bad_status_code).finish(Location::Undefined) )?) diff --git a/third_party/move/move-vm/runtime/src/config.rs b/third_party/move/move-vm/runtime/src/config.rs index faa54c80c7176..5c23b87133f07 100644 --- a/third_party/move/move-vm/runtime/src/config.rs +++ b/third_party/move/move-vm/runtime/src/config.rs @@ -4,6 +4,8 @@ use move_binary_format::file_format_common::VERSION_MAX; use move_bytecode_verifier::VerifierConfig; +pub const DEFAULT_MAX_VALUE_NEST_DEPTH: u64 = 128; + /// Dynamic config options for the Move VM. pub struct VMConfig { pub verifier: VerifierConfig, @@ -14,6 +16,8 @@ pub struct VMConfig { // When this flag is set to true, MoveVM will check invariant violation in swap_loc pub enable_invariant_violation_check_in_swap_loc: bool, pub type_size_limit: bool, + /// Maximum value nest depth for structs + pub max_value_nest_depth: Option, } impl Default for VMConfig { @@ -24,6 +28,7 @@ impl Default for VMConfig { paranoid_type_checks: false, enable_invariant_violation_check_in_swap_loc: true, type_size_limit: false, + max_value_nest_depth: Some(DEFAULT_MAX_VALUE_NEST_DEPTH), } } } diff --git a/third_party/move/move-vm/runtime/src/data_cache.rs b/third_party/move/move-vm/runtime/src/data_cache.rs index f48f4b0406209..095df781c3135 100644 --- a/third_party/move/move-vm/runtime/src/data_cache.rs +++ b/third_party/move/move-vm/runtime/src/data_cache.rs @@ -157,7 +157,7 @@ impl<'r> TransactionDataCache<'r> { map.get_mut(k).unwrap() } - // Retrieve data from the local cache or loads it from the remote cache into the local cache. + // Retrieves data from the local cache or loads it from the remote cache into the local cache. // All operations on the global data are based on this API and they all load the data // into the cache. pub(crate) fn load_resource( @@ -165,7 +165,7 @@ impl<'r> TransactionDataCache<'r> { loader: &Loader, addr: AccountAddress, ty: &Type, - ) -> PartialVMResult<(&mut GlobalValue, Option>)> { + ) -> PartialVMResult<(&mut GlobalValue, Option)> { let account_cache = Self::get_mut_or_insert_with(&mut self.account_map, &addr, || { (addr, AccountDataCache::new()) }); @@ -189,12 +189,17 @@ impl<'r> TransactionDataCache<'r> { None => &[], }; - let gv = match self + let (data, bytes_loaded) = self .remote .get_resource_with_metadata(&addr, &ty_tag, metadata) - { - Ok(Some(blob)) => { - load_res = Some(Some(NumBytes::new(blob.len() as u64))); + .map_err(|err| { + let msg = format!("Unexpected storage error: {:?}", err); + PartialVMError::new(StatusCode::STORAGE_ERROR).with_message(msg) + })?; + load_res = Some(NumBytes::new(bytes_loaded as u64)); + + let gv = match data { + Some(blob) => { let val = match Value::simple_deserialize(&blob, &ty_layout) { Some(val) => val, None => { @@ -209,14 +214,7 @@ impl<'r> TransactionDataCache<'r> { GlobalValue::cached(val)? }, - Ok(None) => { - load_res = Some(None); - GlobalValue::none() - }, - Err(err) => { - let msg = format!("Unexpected storage error: {:?}", err); - return Err(PartialVMError::new(StatusCode::STORAGE_ERROR).with_message(msg)); - }, + None => GlobalValue::none(), }; account_cache.data_map.insert(ty.clone(), (ty_layout, gv)); diff --git a/third_party/move/move-vm/runtime/src/interpreter.rs b/third_party/move/move-vm/runtime/src/interpreter.rs index c50bcf4466953..37352abf4ef01 100644 --- a/third_party/move/move-vm/runtime/src/interpreter.rs +++ b/third_party/move/move-vm/runtime/src/interpreter.rs @@ -544,21 +544,13 @@ impl Interpreter { ) -> PartialVMResult<&'c mut GlobalValue> { match data_store.load_resource(loader, addr, ty) { Ok((gv, load_res)) => { - if let Some(loaded) = load_res { - let opt = match loaded { - Some(num_bytes) => { - let view = gv.view().ok_or_else(|| { - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message( - "Failed to create view for global value".to_owned(), - ) - })?; - - Some((num_bytes, view)) - }, - None => None, - }; - gas_meter.charge_load_resource(addr, TypeWithLoader { ty, loader }, opt)?; + if let Some(bytes_loaded) = load_res { + gas_meter.charge_load_resource( + addr, + TypeWithLoader { ty, loader }, + gv.view(), + bytes_loaded, + )?; } Ok(gv) }, @@ -1008,6 +1000,91 @@ impl CallStack { } } +fn check_depth_of_type(resolver: &Resolver, ty: &Type) -> PartialVMResult<()> { + // Start at 1 since we always call this right before we add a new node to the value's depth. + let max_depth = match resolver.loader().vm_config().max_value_nest_depth { + Some(max_depth) => max_depth, + None => return Ok(()), + }; + check_depth_of_type_impl(resolver, ty, max_depth, 1)?; + Ok(()) +} + +fn check_depth_of_type_impl( + resolver: &Resolver, + ty: &Type, + max_depth: u64, + depth: u64, +) -> PartialVMResult { + macro_rules! check_depth { + ($additional_depth:expr) => {{ + let new_depth = depth.saturating_add($additional_depth); + if new_depth > max_depth { + return Err(PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED)); + } else { + new_depth + } + }}; + } + + // Calculate depth of the type itself + let ty_depth = match ty { + Type::Bool + | Type::U8 + | Type::U16 + | Type::U32 + | Type::U64 + | Type::U128 + | Type::U256 + | Type::Address + | Type::Signer => check_depth!(0), + // Even though this is recursive this is OK since the depth of this recursion is + // bounded by the depth of the type arguments, which we have already checked. + Type::Reference(ty) | Type::MutableReference(ty) | Type::Vector(ty) => { + check_depth_of_type_impl(resolver, ty, max_depth, check_depth!(1))? + }, + Type::Struct(si) => { + let struct_type = resolver.loader().get_struct_type(*si).ok_or_else(|| { + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Struct Definition not resolved".to_string()) + })?; + check_depth!(struct_type + .depth + .as_ref() + .ok_or_else(|| { PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED) })? + .solve(&[])) + }, + // NB: substitution must be performed before calling this function + Type::StructInstantiation(si, ty_args) => { + // Calculate depth of all type arguments, and make sure they themselves are not too deep. + let ty_arg_depths = ty_args + .iter() + .map(|ty| { + // Ty args should be fully resolved and not need any type arguments + check_depth_of_type_impl(resolver, ty, max_depth, check_depth!(0)) + }) + .collect::>>()?; + let struct_type = resolver.loader().get_struct_type(*si).ok_or_else(|| { + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Struct Definition not resolved".to_string()) + })?; + check_depth!(struct_type + .depth + .as_ref() + .ok_or_else(|| { PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED) })? + .solve(&ty_arg_depths)) + }, + Type::TyParam(_) => { + return Err( + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Type parameter should be fully resolved".to_string()), + ) + }, + }; + + Ok(ty_depth) +} + /// A `Frame` is the execution context for a function. It holds the locals of the function and /// the function itself. // #[derive(Debug)] @@ -1872,6 +1949,8 @@ impl Frame { }, Bytecode::Pack(sd_idx) => { let field_count = resolver.field_count(*sd_idx); + let struct_type = resolver.get_struct_type(*sd_idx); + check_depth_of_type(resolver, &struct_type)?; gas_meter.charge_pack( false, interpreter.operand_stack.last_n(field_count as usize)?, @@ -1883,6 +1962,8 @@ impl Frame { }, Bytecode::PackGeneric(si_idx) => { let field_count = resolver.field_instantiation_count(*si_idx); + let ty = resolver.instantiate_generic_type(*si_idx, self.ty_args())?; + check_depth_of_type(resolver, &ty)?; gas_meter.charge_pack( true, interpreter.operand_stack.last_n(field_count as usize)?, @@ -2199,6 +2280,7 @@ impl Frame { }, Bytecode::VecPack(si, num) => { let ty = resolver.instantiate_single_type(*si, self.ty_args())?; + check_depth_of_type(resolver, &ty)?; gas_meter.charge_vec_pack( make_ty!(&ty), interpreter.operand_stack.last_n(*num as usize)?, diff --git a/third_party/move/move-vm/runtime/src/loader.rs b/third_party/move/move-vm/runtime/src/loader.rs index d1a6041376b4b..531c1c2339fc8 100644 --- a/third_party/move/move-vm/runtime/src/loader.rs +++ b/third_party/move/move-vm/runtime/src/loader.rs @@ -18,7 +18,7 @@ use move_binary_format::{ FieldHandleIndex, FieldInstantiationIndex, FunctionDefinition, FunctionDefinitionIndex, FunctionHandleIndex, FunctionInstantiationIndex, Signature, SignatureIndex, SignatureToken, StructDefInstantiationIndex, StructDefinition, StructDefinitionIndex, - StructFieldInformation, TableIndex, Visibility, + StructFieldInformation, TableIndex, TypeParameterIndex, Visibility, }, IndexKind, }; @@ -29,7 +29,9 @@ use move_core_types::{ value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout}, vm_status::StatusCode, }; -use move_vm_types::loaded_data::runtime_types::{CachedStructIndex, StructType, Type}; +use move_vm_types::loaded_data::runtime_types::{ + CachedStructIndex, DepthFormula, StructType, Type, +}; use parking_lot::RwLock; use sha3::{Digest, Sha3_256}; use std::{ @@ -201,6 +203,30 @@ impl ModuleCache { self.structs.truncate(starting_idx); err.finish(Location::Undefined) })?; + + let struct_defs_len = module.struct_defs.len(); + + let mut depth_cache = BTreeMap::new(); + + for cached_idx in starting_idx..(starting_idx + struct_defs_len) { + self.calculate_depth_of_struct(CachedStructIndex(cached_idx), &mut depth_cache) + .map_err(|err| err.finish(Location::Undefined))?; + } + debug_assert!(depth_cache.len() == struct_defs_len); + for (cache_idx, depth) in depth_cache { + match Arc::get_mut(self.structs.get_mut(cache_idx.0).unwrap()) { + Some(struct_type) => struct_type.depth = Some(depth), + None => { + // we have pending references to the `Arc` which is impossible, + // given the code that adds the `Arc` is above and no reference to + // it should exist. + // So in the spirit of not crashing we just leave it as None and + // log the issue. + error!("Arc cannot have any live reference while publishing"); + }, + } + } + for (idx, func) in module.function_defs().iter().enumerate() { let findex = FunctionDefinitionIndex(idx as TableIndex); let mut function = Function::new(natives, findex, func, module); @@ -256,6 +282,7 @@ impl ModuleCache { name, module, struct_def: idx, + depth: None, } } @@ -366,7 +393,7 @@ impl ModuleCache { SignatureToken::U256 => Type::U256, SignatureToken::Address => Type::Address, SignatureToken::Signer => Type::Signer, - SignatureToken::TypeParameter(idx) => Type::TyParam(*idx as usize), + SignatureToken::TypeParameter(idx) => Type::TyParam(*idx), SignatureToken::Vector(inner_tok) => { let inner_type = Self::make_type_internal(module, inner_tok, resolver)?; Type::Vector(Box::new(inner_type)) @@ -457,6 +484,81 @@ impl ModuleCache { ), } } + + fn calculate_depth_of_struct( + &self, + def_idx: CachedStructIndex, + depth_cache: &mut BTreeMap, + ) -> PartialVMResult { + let struct_type = &self.struct_at(def_idx); + + // If we've already computed this structs depth, no more work remains to be done. + if let Some(form) = &struct_type.depth { + return Ok(form.clone()); + } + if let Some(form) = depth_cache.get(&def_idx) { + return Ok(form.clone()); + } + + let formulas = struct_type + .fields + .iter() + .map(|field_type| self.calculate_depth_of_type(field_type, depth_cache)) + .collect::>>()?; + let formula = DepthFormula::normalize(formulas); + let prev = depth_cache.insert(def_idx, formula.clone()); + if prev.is_some() { + return Err( + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Recursive type?".to_owned()), + ); + } + Ok(formula) + } + + fn calculate_depth_of_type( + &self, + ty: &Type, + depth_cache: &mut BTreeMap, + ) -> PartialVMResult { + Ok(match ty { + Type::Bool + | Type::U8 + | Type::U64 + | Type::U128 + | Type::Address + | Type::Signer + | Type::U16 + | Type::U32 + | Type::U256 => DepthFormula::constant(1), + Type::Vector(ty) | Type::Reference(ty) | Type::MutableReference(ty) => { + let mut inner = self.calculate_depth_of_type(ty, depth_cache)?; + inner.scale(1); + inner + }, + Type::TyParam(ty_idx) => DepthFormula::type_parameter(*ty_idx), + Type::Struct(cache_idx) => { + let mut struct_formula = self.calculate_depth_of_struct(*cache_idx, depth_cache)?; + debug_assert!(struct_formula.terms.is_empty()); + struct_formula.scale(1); + struct_formula + }, + Type::StructInstantiation(cache_idx, ty_args) => { + let ty_arg_map = ty_args + .iter() + .enumerate() + .map(|(idx, ty)| { + let var = idx as TypeParameterIndex; + Ok((var, self.calculate_depth_of_type(ty, depth_cache)?)) + }) + .collect::>>()?; + let struct_formula = self.calculate_depth_of_struct(*cache_idx, depth_cache)?; + let mut subst_struct_formula = struct_formula.subst(ty_arg_map)?; + subst_struct_formula.scale(1); + subst_struct_formula + }, + }) + } } // @@ -618,7 +720,7 @@ impl Loader { && type_arguments .iter() .map(|loaded_ty| self.count_type_nodes(loaded_ty)) - .sum::() + .sum::() > MAX_TYPE_INSTANTIATION_NODES { return Err( @@ -745,7 +847,7 @@ impl Loader { fn match_return_type<'a>( returned: &Type, expected: &'a Type, - map: &mut BTreeMap, + map: &mut BTreeMap, ) -> bool { match (returned, expected) { // The important case, deduce the type params @@ -839,7 +941,7 @@ impl Loader { let mut type_arguments = vec![]; let type_param_len = func.type_parameters().len(); for i in 0..type_param_len { - if let Option::Some(t) = map.get(&i) { + if let Option::Some(t) = map.get(&(i as u16)) { type_arguments.push((*t).clone()); } else { // Unknown type argument we are not able to infer the type arguments. @@ -1397,7 +1499,7 @@ impl Loader { } }, Type::StructInstantiation(_, struct_inst) => { - let mut sum_nodes: usize = 1; + let mut sum_nodes = 1u64; for ty in ty_args.iter().chain(struct_inst.iter()) { sum_nodes = sum_nodes.saturating_add(self.count_type_nodes(ty)); if sum_nodes > MAX_TYPE_INSTANTIATION_NODES { @@ -1591,7 +1693,7 @@ impl<'a> Resolver<'a> { } // Check if the function instantiation over all generics is larger // than MAX_TYPE_INSTANTIATION_NODES. - let mut sum_nodes: usize = 1; + let mut sum_nodes = 1u64; for ty in type_params.iter().chain(instantiation.iter()) { sum_nodes = sum_nodes.saturating_add(self.loader.count_type_nodes(ty)); if sum_nodes > MAX_TYPE_INSTANTIATION_NODES { @@ -1635,8 +1737,8 @@ impl<'a> Resolver<'a> { // Before instantiating the type, count the # of nodes of all type arguments plus // existing type instantiation. // If that number is larger than MAX_TYPE_INSTANTIATION_NODES, refuse to construct this type. - // This prevents constructing larger and lager types via struct instantiation. - let mut sum_nodes: usize = 1; + // This prevents constructing larger and larger types via struct instantiation. + let mut sum_nodes = 1u64; for ty in ty_args.iter().chain(struct_inst.instantiation.iter()) { sum_nodes = sum_nodes.saturating_add(self.loader.count_type_nodes(ty)); if sum_nodes > MAX_TYPE_INSTANTIATION_NODES { @@ -2587,8 +2689,8 @@ struct StructInfo { struct_tag: Option, struct_layout: Option, annotated_struct_layout: Option, - node_count: Option, - annotated_node_count: Option, + node_count: Option, + annotated_node_count: Option, } impl StructInfo { @@ -2616,15 +2718,15 @@ impl TypeCache { } /// Maximal depth of a value in terms of type depth. -const VALUE_DEPTH_MAX: usize = 128; +pub const VALUE_DEPTH_MAX: u64 = 128; /// Maximal nodes which are allowed when converting to layout. This includes the the types of /// fields for struct types. -const MAX_TYPE_TO_LAYOUT_NODES: usize = 256; +const MAX_TYPE_TO_LAYOUT_NODES: u64 = 256; /// Maximal nodes which are all allowed when instantiating a generic type. This does not include /// field types of structs. -const MAX_TYPE_INSTANTIATION_NODES: usize = 128; +const MAX_TYPE_INSTANTIATION_NODES: u64 = 128; impl Loader { fn struct_gidx_to_type_tag( @@ -2691,7 +2793,7 @@ impl Loader { }) } - fn count_type_nodes(&self, ty: &Type) -> usize { + fn count_type_nodes(&self, ty: &Type) -> u64 { let mut todo = vec![ty]; let mut result = 0; while let Some(ty) = todo.pop() { @@ -2716,8 +2818,8 @@ impl Loader { &self, gidx: CachedStructIndex, ty_args: &[Type], - count: &mut usize, - depth: usize, + count: &mut u64, + depth: u64, ) -> PartialVMResult { if let Some(struct_map) = self.type_cache.read().structs.get(&gidx) { if let Some(struct_info) = struct_map.get(ty_args) { @@ -2761,8 +2863,8 @@ impl Loader { fn type_to_type_layout_impl( &self, ty: &Type, - count: &mut usize, - depth: usize, + count: &mut u64, + depth: u64, ) -> PartialVMResult { if *count > MAX_TYPE_TO_LAYOUT_NODES { return Err(PartialVMError::new(StatusCode::TOO_MANY_TYPE_NODES)); @@ -2838,8 +2940,8 @@ impl Loader { &self, gidx: CachedStructIndex, ty_args: &[Type], - count: &mut usize, - depth: usize, + count: &mut u64, + depth: u64, ) -> PartialVMResult { if let Some(struct_map) = self.type_cache.read().structs.get(&gidx) { if let Some(struct_info) = struct_map.get(ty_args) { @@ -2892,8 +2994,8 @@ impl Loader { fn type_to_fully_annotated_layout_impl( &self, ty: &Type, - count: &mut usize, - depth: usize, + count: &mut u64, + depth: u64, ) -> PartialVMResult { if *count > MAX_TYPE_TO_LAYOUT_NODES { return Err(PartialVMError::new(StatusCode::TOO_MANY_TYPE_NODES)); diff --git a/third_party/move/move-vm/runtime/src/move_vm.rs b/third_party/move/move-vm/runtime/src/move_vm.rs index aab355185ac95..ea57adce68f15 100644 --- a/third_party/move/move-vm/runtime/src/move_vm.rs +++ b/third_party/move/move-vm/runtime/src/move_vm.rs @@ -69,10 +69,10 @@ impl MoveVM { } /// Load a module into VM's code cache - pub fn load_module<'r>( + pub fn load_module( &self, module_id: &ModuleId, - remote: &'r dyn MoveResolver, + remote: &dyn MoveResolver, ) -> VMResult> { self.runtime .loader() diff --git a/third_party/move/move-vm/runtime/src/native_functions.rs b/third_party/move/move-vm/runtime/src/native_functions.rs index 3446e6e4ffd5e..d93a593231690 100644 --- a/third_party/move/move-vm/runtime/src/native_functions.rs +++ b/third_party/move/move-vm/runtime/src/native_functions.rs @@ -128,7 +128,7 @@ impl<'a, 'b, 'c> NativeContext<'a, 'b, 'c> { &mut self, address: AccountAddress, type_: &Type, - ) -> VMResult<(bool, Option>)> { + ) -> VMResult<(bool, Option)> { let (value, num_bytes) = self .data_store .load_resource(self.resolver.loader(), address, type_) diff --git a/third_party/move/move-vm/runtime/src/session.rs b/third_party/move/move-vm/runtime/src/session.rs index b0f93f0c1c886..a0582d785d78e 100644 --- a/third_party/move/move-vm/runtime/src/session.rs +++ b/third_party/move/move-vm/runtime/src/session.rs @@ -311,7 +311,7 @@ impl<'r, 'l> Session<'r, 'l> { &mut self, addr: AccountAddress, ty: &Type, - ) -> PartialVMResult<(&mut GlobalValue, Option>)> { + ) -> PartialVMResult<(&mut GlobalValue, Option)> { self.data_cache .load_resource(self.move_vm.runtime.loader(), addr, ty) } diff --git a/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs b/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs index 40e712d3a2cc8..b7d2c457fe536 100644 --- a/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs +++ b/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs @@ -265,8 +265,8 @@ impl ResourceResolver for RemoteStore { _address: &AccountAddress, _tag: &StructTag, _metadata: &[Metadata], - ) -> Result>, anyhow::Error> { - Ok(None) + ) -> anyhow::Result<(Option>, usize)> { + Ok((None, 0)) } } diff --git a/third_party/move/move-vm/test-utils/src/gas_schedule.rs b/third_party/move/move-vm/test-utils/src/gas_schedule.rs index 7394a2c306b7a..3a0d918ca18d2 100644 --- a/third_party/move/move-vm/test-utils/src/gas_schedule.rs +++ b/third_party/move/move-vm/test-utils/src/gas_schedule.rs @@ -355,7 +355,8 @@ impl<'b> GasMeter for GasStatus<'b> { &mut self, _addr: AccountAddress, _ty: impl TypeView, - _loaded: Option<(NumBytes, impl ValueView)>, + _val: Option, + _bytes_loaded: NumBytes, ) -> PartialVMResult<()> { Ok(()) } diff --git a/third_party/move/move-vm/test-utils/src/storage.rs b/third_party/move/move-vm/test-utils/src/storage.rs index 6df6246ab41e6..7eff9cc7724fd 100644 --- a/third_party/move/move-vm/test-utils/src/storage.rs +++ b/third_party/move/move-vm/test-utils/src/storage.rs @@ -9,7 +9,7 @@ use move_core_types::{ identifier::Identifier, language_storage::{ModuleId, StructTag}, metadata::Metadata, - resolver::{ModuleResolver, MoveResolver, ResourceResolver}, + resolver::{resource_size, ModuleResolver, MoveResolver, ResourceResolver}, }; #[cfg(feature = "table-extension")] use move_table_extension::{TableChangeSet, TableHandle, TableResolver}; @@ -44,8 +44,8 @@ impl ResourceResolver for BlankStorage { _address: &AccountAddress, _tag: &StructTag, _metadata: &[Metadata], - ) -> Result>> { - Ok(None) + ) -> Result<(Option>, usize)> { + Ok((None, 0)) } } @@ -90,10 +90,12 @@ impl<'a, 'b, S: ResourceResolver> ResourceResolver for DeltaStorage<'a, 'b, S> { address: &AccountAddress, tag: &StructTag, metadata: &[Metadata], - ) -> Result>, Error> { + ) -> Result<(Option>, usize)> { if let Some(account_storage) = self.delta.accounts().get(address) { if let Some(blob_opt) = account_storage.resources().get(tag) { - return Ok(blob_opt.clone().ok()); + let buf = blob_opt.clone().ok(); + let buf_size = resource_size(&buf); + return Ok((buf, buf_size)); } } @@ -241,12 +243,8 @@ impl InMemoryStorage { changes, } = changes; self.tables.retain(|h, _| !removed_tables.contains(h)); - self.tables.extend( - new_tables - .keys() - .into_iter() - .map(|h| (*h, BTreeMap::default())), - ); + self.tables + .extend(new_tables.keys().map(|h| (*h, BTreeMap::default()))); for (h, c) in changes { assert!( self.tables.contains_key(&h), @@ -303,11 +301,13 @@ impl ResourceResolver for InMemoryStorage { address: &AccountAddress, tag: &StructTag, _metadata: &[Metadata], - ) -> Result>, Error> { + ) -> Result<(Option>, usize)> { if let Some(account_storage) = self.accounts.get(address) { - return Ok(account_storage.resources.get(tag).cloned()); + let buf = account_storage.resources.get(tag).cloned(); + let buf_size = resource_size(&buf); + return Ok((buf, buf_size)); } - Ok(None) + Ok((None, 0)) } } diff --git a/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.exp b/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.exp index a7ad267396026..ff2cde123ef29 100644 --- a/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.exp +++ b/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.exp @@ -6,14 +6,14 @@ Error: Script execution failed with VMError: { sub_status: None, location: 0x42::M, indices: [], - offsets: [(FunctionDefinitionIndex(8), 3)], + offsets: [(FunctionDefinitionIndex(0), 1)], } -task 3 'run'. lines 89-97: +task 3 'run'. lines 89-96: Error: Script execution failed with VMError: { major_status: VM_MAX_VALUE_DEPTH_REACHED, sub_status: None, location: 0x42::M, indices: [], - offsets: [(FunctionDefinitionIndex(9), 4)], + offsets: [(FunctionDefinitionIndex(0), 1)], } diff --git a/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.mvir b/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.mvir index 7f2bc48e6bf95..9044139ecd6f3 100644 --- a/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/recursion/runtime_layout_deeply_nested.mvir @@ -71,11 +71,11 @@ import 0x42.M; main(account: signer) { label b0: + // hits VM_MAX_VALUE_DEPTH_REACHED M.publish_128(&account); return; } - //# run --args @0x2 import 0x42.M; @@ -91,7 +91,6 @@ import 0x42.M; main(account: signer) { label b0: - // hits VM_MAX_VALUE_DEPTH_REACHED M.publish_257(&account); return; } diff --git a/third_party/move/move-vm/types/src/gas.rs b/third_party/move/move-vm/types/src/gas.rs index 9afb608d2020d..5cd0c01d776aa 100644 --- a/third_party/move/move-vm/types/src/gas.rs +++ b/third_party/move/move-vm/types/src/gas.rs @@ -264,8 +264,6 @@ pub trait GasMeter { /// Charges for loading a resource from storage. This is only called when the resource is not /// cached. - /// - `Some(n)` means `n` bytes are loaded. - /// - `None` means a load operation is performed but the resource does not exist. /// /// WARNING: This can be dangerous if you execute multiple user transactions in the same /// session -- identical transactions can have different gas costs. Use at your own risk. @@ -273,7 +271,8 @@ pub trait GasMeter { &mut self, addr: AccountAddress, ty: impl TypeView, - loaded: Option<(NumBytes, impl ValueView)>, + val: Option, + bytes_loaded: NumBytes, ) -> PartialVMResult<()>; /// Charge for executing a native function. @@ -501,7 +500,8 @@ impl GasMeter for UnmeteredGasMeter { &mut self, _addr: AccountAddress, _ty: impl TypeView, - _loaded: Option<(NumBytes, impl ValueView)>, + _val: Option, + _bytes_loaded: NumBytes, ) -> PartialVMResult<()> { Ok(()) } diff --git a/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs b/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs index d8c9a99697c37..e8ed89f2e5088 100644 --- a/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs +++ b/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs @@ -4,15 +4,108 @@ use move_binary_format::{ errors::{PartialVMError, PartialVMResult}, - file_format::{AbilitySet, SignatureToken, StructDefinitionIndex, StructTypeParameter}, + file_format::{ + AbilitySet, SignatureToken, StructDefinitionIndex, StructTypeParameter, TypeParameterIndex, + }, }; use move_core_types::{ gas_algebra::AbstractMemorySize, identifier::Identifier, language_storage::ModuleId, vm_status::StatusCode, }; +use std::{cmp::max, collections::BTreeMap, fmt::Debug}; pub const TYPE_DEPTH_MAX: usize = 256; +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] +/// A formula describing the value depth of a type, using (the depths of) the type parameters as inputs. +/// +/// It has the form of `max(CBase, T1 + C1, T2 + C2, ..)` where `Ti` is the depth of the ith type parameter +/// and `Ci` is just some constant. +/// +/// This form has a special property: when you compute the max of multiple formulae, you can normalize +/// them into a single formula. +pub struct DepthFormula { + pub terms: Vec<(TypeParameterIndex, u64)>, // Ti + Ci + pub constant: Option, // Cbase +} + +impl DepthFormula { + pub fn constant(constant: u64) -> Self { + Self { + terms: vec![], + constant: Some(constant), + } + } + + pub fn type_parameter(tparam: TypeParameterIndex) -> Self { + Self { + terms: vec![(tparam, 0)], + constant: None, + } + } + + pub fn normalize(formulas: Vec) -> Self { + let mut var_map = BTreeMap::new(); + let mut constant_acc = None; + for formula in formulas { + let Self { terms, constant } = formula; + for (var, cur_factor) in terms { + var_map + .entry(var) + .and_modify(|prev_factor| *prev_factor = max(cur_factor, *prev_factor)) + .or_insert(cur_factor); + } + match (constant_acc, constant) { + (_, None) => (), + (None, Some(_)) => constant_acc = constant, + (Some(c1), Some(c2)) => constant_acc = Some(max(c1, c2)), + } + } + Self { + terms: var_map.into_iter().collect(), + constant: constant_acc, + } + } + + pub fn subst( + &self, + mut map: BTreeMap, + ) -> PartialVMResult { + let Self { terms, constant } = self; + let mut formulas = vec![]; + if let Some(constant) = constant { + formulas.push(DepthFormula::constant(*constant)) + } + for (t_i, c_i) in terms { + let Some(mut u_form) = map.remove(t_i) else { + return Err(PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message(format!("{t_i:?} missing mapping"))) + }; + u_form.scale(*c_i); + formulas.push(u_form) + } + Ok(DepthFormula::normalize(formulas)) + } + + pub fn solve(&self, tparam_depths: &[u64]) -> u64 { + let Self { terms, constant } = self; + let mut depth = constant.as_ref().copied().unwrap_or(0); + for (t_i, c_i) in terms { + depth = max(depth, tparam_depths[*t_i as usize].saturating_add(*c_i)) + } + depth + } + + pub fn scale(&mut self, c: u64) { + let Self { terms, constant } = self; + for (_t_i, c_i) in terms { + *c_i = (*c_i).saturating_add(c); + } + if let Some(cbase) = constant.as_mut() { + *cbase = (*cbase).saturating_add(c); + } + } +} + #[derive(Debug, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct StructType { pub fields: Vec, @@ -22,6 +115,7 @@ pub struct StructType { pub name: Identifier, pub module: ModuleId, pub struct_def: StructDefinitionIndex, + pub depth: Option, } impl StructType { @@ -46,7 +140,7 @@ pub enum Type { StructInstantiation(CachedStructIndex, Vec), Reference(Box), MutableReference(Box), - TyParam(usize), + TyParam(u16), U16, U32, U256, @@ -62,7 +156,7 @@ impl Type { fn apply_subst(&self, subst: F, depth: usize) -> PartialVMResult where - F: Fn(usize, usize) -> PartialVMResult + Copy, + F: Fn(u16, usize) -> PartialVMResult + Copy, { if depth > TYPE_DEPTH_MAX { return Err(PartialVMError::new(StatusCode::VM_MAX_TYPE_DEPTH_REACHED)); @@ -97,7 +191,7 @@ impl Type { pub fn subst(&self, ty_args: &[Type]) -> PartialVMResult { self.apply_subst( - |idx, depth| match ty_args.get(idx) { + |idx, depth| match ty_args.get(idx as usize) { Some(ty) => ty.clone_impl(depth), None => Err( PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) diff --git a/third_party/move/move-vm/types/src/values/values_impl.rs b/third_party/move/move-vm/types/src/values/values_impl.rs index 9d47503ed3101..4f0a947494b6e 100644 --- a/third_party/move/move-vm/types/src/values/values_impl.rs +++ b/third_party/move/move-vm/types/src/values/values_impl.rs @@ -3440,15 +3440,13 @@ impl ValueView for SignerRef { // Note: We may want to add more helpers to retrieve value views behind references here. impl Struct { - #[allow(clippy::needless_lifetimes)] - pub fn field_views<'a>(&'a self) -> impl ExactSizeIterator + Clone { + pub fn field_views(&self) -> impl ExactSizeIterator + Clone { self.fields.iter() } } impl Vector { - #[allow(clippy::needless_lifetimes)] - pub fn elem_views<'a>(&'a self) -> impl ExactSizeIterator + Clone { + pub fn elem_views(&self) -> impl ExactSizeIterator + Clone { struct ElemView<'b> { container: &'b Container, idx: usize, @@ -3470,8 +3468,7 @@ impl Vector { } impl Reference { - #[allow(clippy::needless_lifetimes)] - pub fn value_view<'a>(&'a self) -> impl ValueView + 'a { + pub fn value_view(&self) -> impl ValueView + '_ { struct ValueBehindRef<'b>(&'b ReferenceImpl); impl<'b> ValueView for ValueBehindRef<'b> { @@ -3490,8 +3487,7 @@ impl Reference { } impl GlobalValue { - #[allow(clippy::needless_lifetimes)] - pub fn view<'a>(&'a self) -> Option { + pub fn view(&self) -> Option { use GlobalValueImpl as G; struct Wrapper<'b>(&'b Rc>>); diff --git a/third_party/move/move-vm/types/src/views.rs b/third_party/move/move-vm/types/src/views.rs index 29a87e3261ac4..fbbb12c261cf6 100644 --- a/third_party/move/move-vm/types/src/views.rs +++ b/third_party/move/move-vm/types/src/views.rs @@ -4,7 +4,7 @@ use move_core_types::{ account_address::AccountAddress, gas_algebra::AbstractMemorySize, language_storage::TypeTag, }; -use std::mem::size_of; +use std::mem::size_of_val; /// Trait that provides an abstract view into a Move type. /// @@ -78,35 +78,35 @@ pub trait ValueView { } fn visit_vec_u8(&mut self, _depth: usize, vals: &[u8]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_u16(&mut self, _depth: usize, vals: &[u16]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_u32(&mut self, _depth: usize, vals: &[u32]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_u64(&mut self, _depth: usize, vals: &[u64]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_u128(&mut self, _depth: usize, vals: &[u128]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_u256(&mut self, _depth: usize, vals: &[move_core_types::u256::U256]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_bool(&mut self, _depth: usize, vals: &[bool]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_vec_address(&mut self, _depth: usize, vals: &[AccountAddress]) { - self.0 += ((size_of::() * vals.len()) as u64).into(); + self.0 += (size_of_val(vals) as u64).into(); } fn visit_ref(&mut self, _depth: usize, _is_global: bool) -> bool { diff --git a/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs b/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs index fb1a85de9472d..7cd608489522a 100644 --- a/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs +++ b/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs @@ -17,7 +17,7 @@ use move_core_types::{ language_storage::{ModuleId, StructTag, TypeTag}, metadata::Metadata, parser, - resolver::{ModuleResolver, ResourceResolver}, + resolver::{resource_size, ModuleResolver, ResourceResolver}, }; use move_disassembler::disassembler::Disassembler; use move_ir_types::location::Spanned; @@ -418,8 +418,10 @@ impl ResourceResolver for OnDiskStateView { address: &AccountAddress, struct_tag: &StructTag, _metadata: &[Metadata], - ) -> Result>, anyhow::Error> { - self.get_resource_bytes(*address, struct_tag.clone()) + ) -> Result<(Option>, usize)> { + let buf = self.get_resource_bytes(*address, struct_tag.clone())?; + let buf_size = resource_size(&buf); + Ok((buf, buf_size)) } } diff --git a/third_party/move/tools/move-cli/tests/build_tests/simple_build_with_docs/args.exp b/third_party/move/tools/move-cli/tests/build_tests/simple_build_with_docs/args.exp index 323ab5c2aeff1..1bd534f1b57e7 100644 --- a/third_party/move/tools/move-cli/tests/build_tests/simple_build_with_docs/args.exp +++ b/third_party/move/tools/move-cli/tests/build_tests/simple_build_with_docs/args.exp @@ -1,11 +1,11 @@ Command `new --path . Foo`: Command `build`: -FETCHING GIT DEPENDENCY https://github.com/move-language/move.git +UPDATING GIT DEPENDENCY https://github.com/move-language/move.git INCLUDING DEPENDENCY MoveStdlib BUILDING Foo Command `docgen --template template.md --exclude-impl --exclude-private-fun --exclude-specs --include-call-diagrams --include-dep-diagrams --independent-specs --no-collapsed-sections --output-directory doc --references-file template.md --section-level-start 3 --toc-depth 3`: -Generated "doc/template.md" Generated "doc/Foo.md" +Generated "doc/template.md" Documentation generation successful! External Command `grep documentation doc/Foo.md`: diff --git a/types/Cargo.toml b/types/Cargo.toml index 35ca764d3b1cf..fb27effe6cddf 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -31,6 +31,7 @@ once_cell = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } rand = { workspace = true } +rayon = { workspace = true } serde = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } diff --git a/types/src/block_executor/mod.rs b/types/src/block_executor/mod.rs new file mode 100644 index 0000000000000..bd7cb7ff962fd --- /dev/null +++ b/types/src/block_executor/mod.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod partitioner; diff --git a/types/src/block_executor/partitioner.rs b/types/src/block_executor/partitioner.rs new file mode 100644 index 0000000000000..cb97e3a557659 --- /dev/null +++ b/types/src/block_executor/partitioner.rs @@ -0,0 +1,435 @@ +// Copyright © Aptos Foundation + +use crate::transaction::{analyzed_transaction::StorageLocation, Transaction}; +use aptos_crypto::HashValue; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +pub type ShardId = usize; +pub type TxnIndex = usize; + +#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] +pub struct TxnIdxWithShardId { + pub txn_index: TxnIndex, + pub shard_id: ShardId, +} + +impl TxnIdxWithShardId { + pub fn new(txn_index: TxnIndex, shard_id: ShardId) -> Self { + Self { + shard_id, + txn_index, + } + } +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +/// Denotes a set of cross shard edges, which contains the set (required or dependent) transaction +/// indices and the relevant storage locations that are conflicting. +pub struct CrossShardEdges { + edges: HashMap>, +} + +impl CrossShardEdges { + pub fn new(txn_idx: TxnIdxWithShardId, storage_locations: Vec) -> Self { + let mut edges = HashMap::new(); + edges.insert(txn_idx, storage_locations); + Self { edges } + } + + pub fn add_edge( + &mut self, + txn_idx: TxnIdxWithShardId, + storage_locations: Vec, + ) { + self.edges + .entry(txn_idx) + .or_insert_with(Vec::new) + .extend(storage_locations.into_iter()); + } + + pub fn iter(&self) -> impl Iterator)> { + self.edges.iter() + } + + pub fn len(&self) -> usize { + self.edges.len() + } + + pub fn contains_idx(&self, txn_idx: &TxnIdxWithShardId) -> bool { + self.edges.contains_key(txn_idx) + } + + pub fn is_empty(&self) -> bool { + self.edges.is_empty() + } +} + +impl IntoIterator for CrossShardEdges { + type IntoIter = std::collections::hash_map::IntoIter>; + type Item = (TxnIdxWithShardId, Vec); + + fn into_iter(self) -> Self::IntoIter { + self.edges.into_iter() + } +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +/// Represents the dependencies of a transaction on other transactions across shards. Two types +/// of dependencies are supported: +/// 1. `required_edges`: The transaction depends on the execution of the transactions in the set. In this +/// case, the transaction can only be executed after the transactions in the set have been executed. +/// 2. `dependent_edges`: The transactions in the set depend on the execution of the transaction. In this +/// case, the transactions in the set can only be executed after the transaction has been executed. +/// Dependent edge is a reverse of required edge, for example if txn 20 in shard 2 requires txn 10 in shard 1, +/// then txn 10 in shard 1 will have a dependent edge to txn 20 in shard 2. +pub struct CrossShardDependencies { + required_edges: CrossShardEdges, + dependent_edges: CrossShardEdges, +} + +impl CrossShardDependencies { + pub fn num_required_edges(&self) -> usize { + self.required_edges.len() + } + + pub fn required_edges_iter( + &self, + ) -> impl Iterator)> { + self.required_edges.iter() + } + + pub fn has_required_txn(&self, txn_idx: TxnIdxWithShardId) -> bool { + self.required_edges.contains_idx(&txn_idx) + } + + pub fn get_required_edge_for( + &self, + txn_idx: TxnIdxWithShardId, + ) -> Option<&Vec> { + self.required_edges.edges.get(&txn_idx) + } + + pub fn get_dependent_edge_for( + &self, + txn_idx: TxnIdxWithShardId, + ) -> Option<&Vec> { + self.dependent_edges.edges.get(&txn_idx) + } + + pub fn has_dependent_txn(&self, txn_ids: TxnIdxWithShardId) -> bool { + self.dependent_edges.contains_idx(&txn_ids) + } + + pub fn add_required_edge( + &mut self, + txn_idx: TxnIdxWithShardId, + storage_location: StorageLocation, + ) { + self.required_edges + .add_edge(txn_idx, vec![storage_location]); + } + + pub fn add_dependent_edge( + &mut self, + txn_idx: TxnIdxWithShardId, + storage_locations: Vec, + ) { + self.dependent_edges.add_edge(txn_idx, storage_locations); + } +} + +/// A contiguous chunk of transactions (along with their dependencies) in a block. +/// +/// Each `SubBlock` represents a sequential section of transactions within a block. +/// The sub block includes the index of the first transaction relative to the block and a vector +/// of `TransactionWithDependencies` representing the transactions included in the chunk. +/// +/// Illustration: +/// ```plaintext +/// Block (Split into 3 transactions chunks): +/// +----------------+------------------+------------------+ +/// | Chunk 1 | Chunk 2 | Chunk 3 | +/// +----------------+------------------+------------------+ +/// | Transaction 1 | Transaction 4 | Transaction 7 | +/// | Transaction 2 | Transaction 5 | Transaction 8 | +/// | Transaction 3 | Transaction 6 | Transaction 9 | +/// +----------------+------------------+------------------+ +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SubBlock { + // This is the index of first transaction relative to the block. + pub start_index: TxnIndex, + pub transactions: Vec>, +} + +impl SubBlock { + pub fn new(start_index: TxnIndex, transactions: Vec>) -> Self { + Self { + start_index, + transactions, + } + } + + pub fn empty() -> Self { + Self { + start_index: 0, + transactions: vec![], + } + } + + pub fn num_txns(&self) -> usize { + self.transactions.len() + } + + pub fn is_empty(&self) -> bool { + self.transactions.is_empty() + } + + pub fn end_index(&self) -> TxnIndex { + self.start_index + self.num_txns() + } + + pub fn transactions_with_deps(&self) -> &Vec> { + &self.transactions + } + + pub fn into_transactions_with_deps(self) -> Vec> { + self.transactions + } + + pub fn into_txns(self) -> Vec { + self.transactions + .into_iter() + .map(|txn_with_deps| txn_with_deps.into_txn()) + .collect() + } + + pub fn add_dependent_edge( + &mut self, + source_index: TxnIndex, + txn_idx: TxnIdxWithShardId, + storage_locations: Vec, + ) { + let source_txn = self + .transactions + .get_mut(source_index - self.start_index) + .unwrap(); + source_txn.add_dependent_edge(txn_idx, storage_locations); + } + + pub fn iter(&self) -> impl Iterator> { + self.transactions.iter() + } +} + +impl IntoIterator for SubBlock { + type IntoIter = std::vec::IntoIter>; + type Item = TransactionWithDependencies; + + fn into_iter(self) -> Self::IntoIter { + self.transactions.into_iter() + } +} + +// A set of sub blocks assigned to a shard. +#[derive(Default, Clone, Debug, Serialize, Deserialize)] +pub struct SubBlocksForShard { + pub shard_id: ShardId, + pub sub_blocks: Vec>, +} + +impl SubBlocksForShard { + pub fn new(shard_id: ShardId, sub_blocks: Vec>) -> Self { + Self { + shard_id, + sub_blocks, + } + } + + pub fn empty(shard_id: ShardId) -> Self { + Self { + shard_id, + sub_blocks: Vec::new(), + } + } + + pub fn add_sub_block(&mut self, sub_block: SubBlock) { + self.sub_blocks.push(sub_block); + } + + pub fn num_txns(&self) -> usize { + self.sub_blocks + .iter() + .map(|sub_block| sub_block.num_txns()) + .sum() + } + + pub fn num_sub_blocks(&self) -> usize { + self.sub_blocks.len() + } + + pub fn into_sub_blocks(self) -> Vec> { + self.sub_blocks + } + + pub fn into_txns(self) -> Vec { + self.sub_blocks + .into_iter() + .flat_map(|sub_block| sub_block.into_txns()) + .collect() + } + + pub fn is_empty(&self) -> bool { + self.sub_blocks.is_empty() + } + + pub fn iter(&self) -> impl Iterator> { + self.sub_blocks + .iter() + .flat_map(|sub_block| sub_block.iter()) + } + + pub fn sub_block_iter(&self) -> impl Iterator> { + self.sub_blocks.iter() + } + + pub fn get_sub_block(&self, round: usize) -> Option<&SubBlock> { + self.sub_blocks.get(round) + } + + pub fn get_sub_block_mut(&mut self, round: usize) -> Option<&mut SubBlock> { + self.sub_blocks.get_mut(round) + } + + // Flattens a vector of `SubBlocksForShard` into a vector of transactions in the order they + // appear in the block. + pub fn flatten(block: Vec>) -> Vec { + let num_shards = block.len(); + let mut flattened_txns = Vec::new(); + let num_rounds = block[0].num_sub_blocks(); + let mut ordered_blocks = vec![SubBlock::empty(); num_shards * num_rounds]; + for (shard_id, sub_blocks) in block.into_iter().enumerate() { + for (round, sub_block) in sub_blocks.into_sub_blocks().into_iter().enumerate() { + ordered_blocks[round * num_shards + shard_id] = sub_block; + } + } + + for sub_block in ordered_blocks.into_iter() { + flattened_txns.extend(sub_block.into_txns()); + } + + flattened_txns + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionWithDependencies { + pub txn: T, + pub cross_shard_dependencies: CrossShardDependencies, +} + +impl TransactionWithDependencies { + pub fn new(txn: T, cross_shard_dependencies: CrossShardDependencies) -> Self { + Self { + txn, + cross_shard_dependencies, + } + } + + pub fn txn(&self) -> &T { + &self.txn + } + + pub fn cross_shard_dependencies(&self) -> &CrossShardDependencies { + &self.cross_shard_dependencies + } + + pub fn into_txn(self) -> T { + self.txn + } + + pub fn add_dependent_edge( + &mut self, + txn_idx: TxnIdxWithShardId, + storage_locations: Vec, + ) { + self.cross_shard_dependencies + .add_dependent_edge(txn_idx, storage_locations); + } +} + +pub struct ExecutableBlock { + pub block_id: HashValue, + pub transactions: ExecutableTransactions, +} + +impl ExecutableBlock { + pub fn new(block_id: HashValue, transactions: ExecutableTransactions) -> Self { + Self { + block_id, + transactions, + } + } +} + +impl From<(HashValue, Vec)> for ExecutableBlock { + fn from((block_id, transactions): (HashValue, Vec)) -> Self { + Self::new(block_id, ExecutableTransactions::Unsharded(transactions)) + } +} + +// Represents the transactions in a block that are ready to be executed. +pub enum ExecutableTransactions { + Unsharded(Vec), + Sharded(Vec>), +} + +impl ExecutableTransactions { + pub fn num_transactions(&self) -> usize { + match self { + ExecutableTransactions::Unsharded(transactions) => transactions.len(), + ExecutableTransactions::Sharded(sub_blocks) => sub_blocks + .iter() + .map(|sub_block| sub_block.num_txns()) + .sum(), + } + } +} + +impl From> for ExecutableTransactions { + fn from(txns: Vec) -> Self { + Self::Unsharded(txns) + } +} + +// Represents the transactions that are executed on a particular block executor shard. Unsharded +// transactions represents the entire block. Sharded transactions represents the transactions +// that are assigned to this shard. +pub enum BlockExecutorTransactions { + Unsharded(Vec), + Sharded(SubBlocksForShard), +} + +impl BlockExecutorTransactions { + pub fn num_txns(&self) -> usize { + match self { + BlockExecutorTransactions::Unsharded(transactions) => transactions.len(), + BlockExecutorTransactions::Sharded(sub_blocks) => sub_blocks.num_txns(), + } + } + + pub fn get_unsharded_transactions(&self) -> Option<&Vec> { + match self { + BlockExecutorTransactions::Unsharded(transactions) => Some(transactions), + BlockExecutorTransactions::Sharded(_) => None, + } + } + + pub fn into_txns(self) -> Vec { + match self { + BlockExecutorTransactions::Unsharded(transactions) => transactions, + BlockExecutorTransactions::Sharded(sub_blocks) => sub_blocks.into_txns(), + } + } +} diff --git a/types/src/executable.rs b/types/src/executable.rs index ba33af4c77ae7..ea4454975cf5c 100644 --- a/types/src/executable.rs +++ b/types/src/executable.rs @@ -37,7 +37,7 @@ impl ModulePath for StateKey { /// For the executor to manage memory consumption, executables should provide size. /// Note: explore finer-grained eviction mechanisms, e.g. LRU-based, or having /// different ownership for the arena / memory. -pub trait Executable: Clone { +pub trait Executable: Clone + Send + Sync { fn size_bytes(&self) -> usize; } @@ -50,9 +50,12 @@ impl Executable for ExecutableTestType { } } +// TODO: variant for a compiled module when available to avoid deserialization. pub enum FetchedModule { Blob(Option>), - // TODO: compiled module when available to avoid deserialization. + // Note: We could use Weak / & for parallel and sequential modes, respectively + // but rely on Arc for a simple and unified treatment for the time being. + // TODO: change Arc to custom reference when we have memory manager / arena. Executable(Arc), } diff --git a/types/src/fee_statement.rs b/types/src/fee_statement.rs new file mode 100644 index 0000000000000..6c7738e1fae80 --- /dev/null +++ b/types/src/fee_statement.rs @@ -0,0 +1,88 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct FeeStatement { + /// Total gas charge. + total_charge_gas_units: u64, + /// Execution gas charge. + execution_gas_units: u64, + /// IO gas charge. + io_gas_units: u64, + /// Storage gas charge. + storage_gas_units: u64, + /// Storage fee charge. + storage_fee_octas: u64, +} + +impl FeeStatement { + pub fn zero() -> Self { + Self { + total_charge_gas_units: 0, + execution_gas_units: 0, + io_gas_units: 0, + storage_gas_units: 0, + storage_fee_octas: 0, + } + } + + pub fn new( + total_charge_gas_units: u64, + execution_gas_units: u64, + io_gas_units: u64, + storage_gas_units: u64, + storage_fee_octas: u64, + ) -> Self { + Self { + total_charge_gas_units, + execution_gas_units, + io_gas_units, + storage_gas_units, + storage_fee_octas, + } + } + + pub fn new_from_fee_statement(fee_statement: &FeeStatement) -> Self { + Self { + total_charge_gas_units: fee_statement.total_charge_gas_units, + execution_gas_units: fee_statement.execution_gas_units, + io_gas_units: fee_statement.io_gas_units, + storage_gas_units: fee_statement.storage_gas_units, + storage_fee_octas: fee_statement.storage_fee_octas, + } + } + + pub fn gas_used(&self) -> u64 { + self.total_charge_gas_units + } + + pub fn execution_gas_used(&self) -> u64 { + self.execution_gas_units + } + + pub fn io_gas_used(&self) -> u64 { + self.io_gas_units + } + + pub fn storage_gas_used(&self) -> u64 { + self.storage_gas_units + } + + pub fn storage_fee_used(&self) -> u64 { + self.storage_fee_octas + } + + pub fn add_fee_statement(&mut self, other: &FeeStatement) { + self.total_charge_gas_units += other.total_charge_gas_units; + self.execution_gas_units += other.execution_gas_units; + self.io_gas_units += other.io_gas_units; + self.storage_gas_units += other.storage_gas_units; + self.storage_fee_octas += other.storage_fee_octas; + } + + pub fn fee_statement(&self) -> FeeStatement { + self.clone() + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index a9fb39f4c7c50..ea758ebec8881 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -16,6 +16,7 @@ pub mod epoch_change; pub mod epoch_state; pub mod event; pub mod executable; +pub mod fee_statement; pub mod governance; pub mod ledger_info; pub mod mempool_status; @@ -51,6 +52,7 @@ pub use utility_coin::*; pub mod account_view; pub mod aggregate_signature; +pub mod block_executor; pub mod state_store; #[cfg(test)] mod unit_tests; diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index 16f38e21189ad..52244f2156731 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -40,7 +40,7 @@ pub struct Features { impl Default for Features { fn default() -> Self { Features { - features: vec![0b00100000, 0b00100000, 0b00001100], + features: vec![0b00100000, 0b00100000, 0b00000100], } } } diff --git a/types/src/test_helpers/transaction_test_helpers.rs b/types/src/test_helpers/transaction_test_helpers.rs index 9d4d1de16bc56..eac86008f93a2 100644 --- a/types/src/test_helpers/transaction_test_helpers.rs +++ b/types/src/test_helpers/transaction_test_helpers.rs @@ -15,6 +15,10 @@ use aptos_crypto::{ed25519::*, traits::*, HashValue}; const MAX_GAS_AMOUNT: u64 = 1_000_000; const TEST_GAS_PRICE: u64 = 100; +// The block gas limit parameter for executor tests +pub const BLOCK_GAS_LIMIT: Option = Some(1000); +// pub const BLOCK_GAS_LIMIT: Option = None; + static EMPTY_SCRIPT: &[u8] = include_bytes!("empty_script.mv"); // Create an expiration time 'seconds' after now @@ -239,8 +243,11 @@ pub fn get_test_txn_with_chain_id( SignedTransaction::new(raw_txn, public_key, signature) } -pub fn block(mut user_txns: Vec, maybe_gas_limit: Option) -> Vec { - if maybe_gas_limit.is_none() { +pub fn block( + mut user_txns: Vec, + maybe_block_gas_limit: Option, +) -> Vec { + if maybe_block_gas_limit.is_none() { user_txns.push(Transaction::StateCheckpoint(HashValue::random())); } user_txns diff --git a/types/src/transaction/analyzed_transaction.rs b/types/src/transaction/analyzed_transaction.rs new file mode 100644 index 0000000000000..92f62a9483d8b --- /dev/null +++ b/types/src/transaction/analyzed_transaction.rs @@ -0,0 +1,225 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + access_path::AccessPath, + account_config::{AccountResource, CoinStoreResource}, + state_store::{state_key::StateKey, table::TableHandle}, + transaction::{SignedTransaction, Transaction, TransactionPayload}, +}; +use aptos_crypto::{hash::CryptoHash, HashValue}; +pub use move_core_types::abi::{ + ArgumentABI, ScriptFunctionABI as EntryFunctionABI, TransactionScriptABI, TypeArgumentABI, +}; +use move_core_types::{ + account_address::AccountAddress, language_storage::StructTag, move_resource::MoveStructType, +}; +use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; + +#[derive(Clone, Debug)] +pub struct AnalyzedTransaction { + transaction: Transaction, + /// Set of storage locations that are read by the transaction - this doesn't include location + /// that are written by the transactions to avoid duplication of locations across read and write sets + /// This can be accurate or strictly overestimated. + read_hints: Vec, + /// Set of storage locations that are written by the transaction. This can be accurate or strictly + /// overestimated. + write_hints: Vec, + /// A transaction is predictable if neither the read_hint or the write_hint have wildcards. + predictable_transaction: bool, + /// The hash of the transaction - this is cached for performance reasons. + hash: HashValue, +} + +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] +// TODO(skedia): Evaluate if we need to cache the HashValue for efficiency reasons. +pub enum StorageLocation { + // A specific storage location denoted by an address and a struct tag. + Specific(StateKey), + // Storage location denoted by a struct tag and any arbitrary address. + // Example read(*), write(*) in Move + WildCardStruct(StructTag), + // Storage location denoted by a table handle and any arbitrary item in the table. + WildCardTable(TableHandle), +} + +impl AnalyzedTransaction { + pub fn new( + transaction: Transaction, + read_hints: Vec, + write_hints: Vec, + ) -> Self { + let hints_contain_wildcard = read_hints + .iter() + .chain(write_hints.iter()) + .any(|hint| !matches!(hint, StorageLocation::Specific(_))); + let hash = transaction.hash(); + AnalyzedTransaction { + transaction, + read_hints, + write_hints, + predictable_transaction: !hints_contain_wildcard, + hash, + } + } + + pub fn new_with_no_hints(transaction: Transaction) -> Self { + AnalyzedTransaction::new(transaction, vec![], vec![]) + } + + pub fn into_txn(self) -> Transaction { + self.transaction + } + + pub fn transaction(&self) -> &Transaction { + &self.transaction + } + + pub fn read_hints(&self) -> &[StorageLocation] { + &self.read_hints + } + + pub fn write_hints(&self) -> &[StorageLocation] { + &self.write_hints + } + + pub fn predictable_transaction(&self) -> bool { + self.predictable_transaction + } + + pub fn sender(&self) -> Option { + match &self.transaction { + Transaction::UserTransaction(signed_txn) => Some(signed_txn.sender()), + _ => None, + } + } + + pub fn analyzed_transaction_for_coin_transfer( + signed_txn: SignedTransaction, + sender_address: AccountAddress, + receiver_address: AccountAddress, + receiver_exists: bool, + ) -> Self { + let mut write_hints = vec![ + Self::account_resource_location(sender_address), + Self::coin_store_location(sender_address), + Self::coin_store_location(receiver_address), + ]; + if !receiver_exists { + // If the receiver doesn't exist, we create the receiver account, so we need to write the + // receiver account resource. + write_hints.push(Self::account_resource_location(receiver_address)); + } + AnalyzedTransaction::new( + Transaction::UserTransaction(signed_txn), + // Please note that we omit all the modules we read and the global supply we write to? + vec![], + // read and write locations are same for coin transfer + write_hints, + ) + } + + pub fn account_resource_location(address: AccountAddress) -> StorageLocation { + StorageLocation::Specific(StateKey::access_path(AccessPath::new( + address, + AccountResource::struct_tag().access_vector(), + ))) + } + + pub fn coin_store_location(address: AccountAddress) -> StorageLocation { + StorageLocation::Specific(StateKey::access_path(AccessPath::new( + address, + CoinStoreResource::struct_tag().access_vector(), + ))) + } + + pub fn analyzed_transaction_for_create_account( + signed_txn: SignedTransaction, + sender_address: AccountAddress, + receiver_address: AccountAddress, + ) -> Self { + let read_hints = vec![ + Self::account_resource_location(sender_address), + Self::coin_store_location(sender_address), + Self::account_resource_location(receiver_address), + Self::coin_store_location(receiver_address), + ]; + AnalyzedTransaction::new( + Transaction::UserTransaction(signed_txn), + vec![], + // read and write locations are same for create account + read_hints, + ) + } +} + +impl PartialEq for AnalyzedTransaction { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} + +impl Eq for AnalyzedTransaction {} + +impl Hash for AnalyzedTransaction { + fn hash(&self, state: &mut H) { + state.write(self.hash.as_ref()); + } +} + +impl From for AnalyzedTransaction { + fn from(txn: Transaction) -> Self { + match txn { + Transaction::UserTransaction(signed_txn) => match signed_txn.payload() { + TransactionPayload::EntryFunction(func) => { + match ( + *func.module().address(), + func.module().name().as_str(), + func.function().as_str(), + ) { + (AccountAddress::ONE, "coin", "transfer") => { + let sender_address = signed_txn.sender(); + let receiver_address = bcs::from_bytes(&func.args()[0]).unwrap(); + AnalyzedTransaction::analyzed_transaction_for_coin_transfer( + signed_txn, + sender_address, + receiver_address, + true, + ) + }, + (AccountAddress::ONE, "aptos_account", "transfer") => { + let sender_address = signed_txn.sender(); + let receiver_address = bcs::from_bytes(&func.args()[0]).unwrap(); + AnalyzedTransaction::analyzed_transaction_for_coin_transfer( + signed_txn, + sender_address, + receiver_address, + false, + ) + }, + (AccountAddress::ONE, "aptos_account", "create_account") => { + let sender_address = signed_txn.sender(); + let receiver_address = bcs::from_bytes(&func.args()[0]).unwrap(); + AnalyzedTransaction::analyzed_transaction_for_create_account( + signed_txn, + sender_address, + receiver_address, + ) + }, + _ => todo!("Only coin transfer and create account transactions are supported for now") + } + }, + _ => todo!("Only entry function transactions are supported for now"), + }, + _ => AnalyzedTransaction::new_with_no_hints(txn), + } + } +} + +impl From for Transaction { + fn from(val: AnalyzedTransaction) -> Self { + val.transaction + } +} diff --git a/types/src/transaction/mod.rs b/types/src/transaction/mod.rs index 549b021cd329d..a7e4fd5c4ac83 100644 --- a/types/src/transaction/mod.rs +++ b/types/src/transaction/mod.rs @@ -35,6 +35,7 @@ use std::{ fmt::{Debug, Display, Formatter}, }; +pub mod analyzed_transaction; pub mod authenticator; mod change_set; mod module;