diff --git a/.github/workflows/benchmark-prs.yml b/.github/workflows/benchmark-prs.yml index af1b3ce0fe..13da75ef2d 100644 --- a/.github/workflows/benchmark-prs.yml +++ b/.github/workflows/benchmark-prs.yml @@ -5,390 +5,378 @@ on: pull_request env: CARGO_INCREMENTAL: "0" RUST_BACKTRACE: 1 - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: - # benchmark-cli: - # name: Compare sn_cli benchmarks to main - # # right now only ubuntu, running on multiple systems would require many pushes...\ - # # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing - # # once to the branch.. - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 - - # - uses: dtolnay/rust-toolchain@stable - # with: - # components: rustfmt, clippy - - # - uses: Swatinem/rust-cache@v2 - # continue-on-error: true - - # ######################## - # ### Setup ### - # ######################## - # - run: cargo install cargo-criterion - - # - name: install ripgrep - # run: sudo apt-get -y install ripgrep - - # - name: Download 95mb file to be uploaded with the safe client - # shell: bash - # run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - # # As normal user won't care much about initial client startup, - # # but be more alerted on communication speed during transmission. - # # Meanwhile the criterion testing code includes the client startup as well, - # # it will be better to execute bench test with `local`, - # # to make the measurement results reflect speed improvement or regression more accurately. - # - name: Build sn bins - # run: cargo build --release --bin safe --bin safenode --features local - # timeout-minutes: 30 - - # - name: Build faucet bin - # run: cargo build --release --bin faucet --features local --features gifting --no-default-features - # timeout-minutes: 30 - - # - name: Start a local network - # uses: maidsafe/sn-local-testnet-action@main - # env: - # SN_LOG: "all" - # with: - # action: start - # interval: 2000 - # node-path: target/release/safenode - # faucet-path: target/release/faucet - # platform: ubuntu-latest - # build: true - - # - name: Check SAFE_PEERS was set - # shell: bash - # run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - - # ######################### - # ### Upload large file ### - # ######################### - - # - name: Fund cli wallet - # shell: bash - # run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000 - # env: - # SN_LOG: "all" - - # - name: Start a client instance to compare memory usage - # shell: bash - # run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick - # env: - # SN_LOG: "all" - - # - name: Cleanup uploaded_files folder to avoid pollute download benchmark - # shell: bash - # run: rm -rf $CLIENT_DATA_PATH/uploaded_files - - # ########################### - # ### Client Mem Analysis ### - # ########################### - - # - name: Check client memory usage - # shell: bash - # run: | - # client_peak_mem_limit_mb="1024" # mb - # client_avg_mem_limit_mb="512" # mb - - # peak_mem_usage=$( - # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - # awk -F':' '/"memory_used_mb":/{print $2}' | - # sort -n | - # tail -n 1 - # ) - # echo "Peak memory usage: $peak_mem_usage MB" - # if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then - # echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" - # exit 1 - # fi - - # total_mem=$( - # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - # awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' - # ) - # num_of_times=$( - # rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | - # rg "(\d+) matches" | - # rg "\d+" -o - # ) - # echo "num_of_times: $num_of_times" - # echo "Total memory is: $total_mem" - # average_mem=$(($total_mem/$(($num_of_times)))) - # echo "Average memory is: $average_mem" - - # if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then - # echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" - # exit 1 - # fi - # # Write the client memory usage to a file - # echo '[ - # { - # "name": "client-peak-memory-usage-during-upload", - # "value": '$peak_mem_usage', - # "unit": "MB" - # }, - # { - # "name": "client-average-memory-usage-during-upload", - # "value": '$average_mem', - # "unit": "MB" - # } - # ]' > client_memory_usage.json - - # - name: check client_memory_usage.json - # shell: bash - # run: cat client_memory_usage.json - - # - name: Alert for client memory usage - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # name: "Memory Usage of Client during uploading large file" - # tool: "customSmallerIsBetter" - # output-file-path: client_memory_usage.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/client-mem-usage.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true - - # ######################## - # ### Benchmark ### - # ######################## - # - name: Bench `safe` cli - # shell: bash - # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, - # # passes to tee which displays it in the terminal and writes to output.txt - # run: | - # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt - # cat output.txt | rg benchmark-complete | jq -s 'map({ - # name: (.id | split("/"))[-1], - # unit: "MiB/s", - # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) - # })' > files-benchmark.json - # timeout-minutes: 15 - - # - name: Confirming the number of files uploaded and downloaded during the benchmark test - # shell: bash - # run: | - # ls -l $CLIENT_DATA_PATH - # ls -l $CLIENT_DATA_PATH/uploaded_files - # ls -l $CLIENT_DATA_PATH/safe_files - - # - name: Store benchmark result - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # # What benchmark tool the output.txt came from - # tool: "customBiggerIsBetter" - # output-file-path: files-benchmark.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/benchmark-data.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true - - # - name: Start a client to carry out download to output the logs - # shell: bash - # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - - # - name: Start a client to simulate criterion upload - # shell: bash - # run: | - # ls -l target/release - # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick - - # ######################### - # ### Stop Network ### - # ######################### - - # - name: Stop the local network - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # log_file_prefix: safe_test_logs_benchmark - # platform: ubuntu-latest - # build: true - - # - name: Upload Faucet folder - # uses: actions/upload-artifact@main - # with: - # name: faucet_folder - # path: /home/runner/.local/share/safe/test_faucet - # continue-on-error: true - # if: always() - - # ######################### - # ### Node Mem Analysis ### - # ######################### - - # # The large file uploaded will increase node's peak mem usage a lot - # - name: Check node memory usage - # shell: bash - # run: | - # node_peak_mem_limit_mb="250" # mb - # peak_mem_usage=$( - # rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | - # awk -F':' '/"memory_used_mb":/{print $2}' | - # sort -n | - # tail -n 1 - # ) - - # echo "Memory usage: $peak_mem_usage MB" - # if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then - # echo "Node memory usage exceeded threshold: $peak_mem_usage MB" - # exit 1 - # fi - # # Write the node memory usage to a file - # echo '[ - # { - # "name": "node-memory-usage-through-safe-benchmark", - # "value": '$peak_mem_usage', - # "unit": "MB" - # } - # ]' > node_memory_usage.json - - # - name: check node_memory_usage.json - # shell: bash - # run: cat node_memory_usage.json - - # - name: Alert for node memory usage - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # tool: "customSmallerIsBetter" - # output-file-path: node_memory_usage.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/node-mem-usage.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # Comment on the PR - # comment-always: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true - - # ########################################### - # ### Swarm_driver handling time Analysis ### - # ########################################### - - # - name: Check swarm_driver handling time - # shell: bash - # run: | - # num_of_times=$( - # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - # rg "(\d+) matches" | - # rg "\d+" -o - # ) - # echo "Number of long cmd handling times: $num_of_times" - # total_long_handling_ms=$( - # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - # ) - # echo "Total cmd long handling time is: $total_long_handling_ms ms" - # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - # echo "Average cmd long handling time is: $average_handling_ms ms" - # total_long_handling=$(($total_long_handling_ms)) - # total_num_of_times=$(($num_of_times)) - # num_of_times=$( - # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - # rg "(\d+) matches" | - # rg "\d+" -o - # ) - # echo "Number of long event handling times: $num_of_times" - # total_long_handling_ms=$( - # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - # ) - # echo "Total event long handling time is: $total_long_handling_ms ms" - # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - # echo "Average event long handling time is: $average_handling_ms ms" - # total_long_handling=$(($total_long_handling_ms+$total_long_handling)) - # total_num_of_times=$(($num_of_times+$total_num_of_times)) - # average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) - # echo "Total swarm_driver long handling times is: $total_num_of_times" - # echo "Total swarm_driver long handling duration is: $total_long_handling ms" - # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - # total_num_of_times_limit_hits="30000" # hits - # total_long_handling_limit_ms="400000" # ms - # average_handling_limit_ms="20" # ms - # if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then - # echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" - # exit 1 - # fi - # if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then - # echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" - # exit 1 - # fi - # if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then - # echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" - # exit 1 - # fi - - # # Write the node memory usage to a file - # echo '[ - # { - # "name": "swarm_driver long handling times", - # "value": '$total_num_of_times', - # "unit": "hits" - # }, - # { - # "name": "swarm_driver long handling total_time", - # "value": '$total_long_handling', - # "unit": "ms" - # }, - # { - # "name": "swarm_driver average long handling time", - # "value": '$average_handling_ms', - # "unit": "ms" - # } - # ]' > swarm_driver_long_handlings.json - - # - name: check swarm_driver_long_handlings.json - # shell: bash - # run: cat swarm_driver_long_handlings.json - - # - name: Alert for swarm_driver long handlings - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # tool: "customSmallerIsBetter" - # output-file-path: swarm_driver_long_handlings.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/swarm_driver_long_handlings.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # Comment on the PR - # comment-always: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true + benchmark-cli: + name: Compare autonomi_cli benchmarks to main + # right now only ubuntu, running on multiple systems would require many pushes...\ + # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing + # once to the branch.. + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + + - uses: Swatinem/rust-cache@v2 + continue-on-error: true + + ######################## + ### Setup ### + ######################## + - run: cargo install cargo-criterion + + - name: install ripgrep + run: sudo apt-get -y install ripgrep + + - name: Download 95mb file to be uploaded with the safe client + shell: bash + run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + + # As normal user won't care much about initial client startup, + # but be more alerted on communication speed during transmission. + # Meanwhile the criterion testing code includes the client startup as well, + # it will be better to execute bench test with `local`, + # to make the measurement results reflect speed improvement or regression more accurately. + - name: Build binaries + run: cargo build --release --features local --bin safenode --bin autonomi + timeout-minutes: 30 + + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + env: + SN_LOG: "all" + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ubuntu-latest + build: true + + - name: Check SAFE_PEERS was set + shell: bash + run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + + ######################### + ### Upload large file ### + ######################### + + - name: Start a client instance to compare memory usage + shell: bash + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data.zip" + env: + SN_LOG: "all" + timeout-minutes: 5 + + - name: Cleanup uploaded_files folder to avoid pollute download benchmark + shell: bash + run: | + ls -l $CLIENT_DATA_PATH + rm -rf $CLIENT_DATA_PATH/uploaded_files + + ########################### + ### Client Mem Analysis ### + ########################### + + - name: Check client memory usage + shell: bash + run: | + client_peak_mem_limit_mb="1024" # mb + client_avg_mem_limit_mb="512" # mb + + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + echo "Peak memory usage: $peak_mem_usage MB" + if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then + echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" + exit 1 + fi + + total_mem=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' + ) + num_of_times=$( + rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob autonomi.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "num_of_times: $num_of_times" + echo "Total memory is: $total_mem" + average_mem=$(($total_mem/$(($num_of_times)))) + echo "Average memory is: $average_mem" + + if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then + echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" + exit 1 + fi + # Write the client memory usage to a file + echo '[ + { + "name": "client-peak-memory-usage-during-upload", + "value": '$peak_mem_usage', + "unit": "MB" + }, + { + "name": "client-average-memory-usage-during-upload", + "value": '$average_mem', + "unit": "MB" + } + ]' > client_memory_usage.json + + - name: check client_memory_usage.json + shell: bash + run: cat client_memory_usage.json + + - name: Alert for client memory usage + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "Memory Usage of Client during uploading large file" + tool: "customSmallerIsBetter" + output-file-path: client_memory_usage.json + # Where the previous data file is stored + external-data-json-path: ./cache/client-mem-usage.json + # Workflow will fail when an alert happens + fail-on-alert: true + # GitHub API token to make a commit comment + github-token: ${{ secrets.GITHUB_TOKEN }} + # Enable alert commit comment + comment-on-alert: true + # 200% regression will result in alert + alert-threshold: "200%" + # Enable Job Summary for PRs + summary-always: true + + # ######################## + # ### Benchmark ### + # ######################## + # - name: Bench `safe` cli + # shell: bash + # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, + # # passes to tee which displays it in the terminal and writes to output.txt + # run: | + # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt + # cat output.txt | rg benchmark-complete | jq -s 'map({ + # name: (.id | split("/"))[-1], + # unit: "MiB/s", + # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) + # })' > files-benchmark.json + # timeout-minutes: 15 + + # - name: Confirming the number of files uploaded and downloaded during the benchmark test + # shell: bash + # run: | + # ls -l $CLIENT_DATA_PATH + # ls -l $CLIENT_DATA_PATH/uploaded_files + # ls -l $CLIENT_DATA_PATH/safe_files + + # - name: Store benchmark result + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # # What benchmark tool the output.txt came from + # tool: "customBiggerIsBetter" + # output-file-path: files-benchmark.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/benchmark-data.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # - name: Start a client to carry out download to output the logs + # shell: bash + # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick + + # - name: Start a client to simulate criterion upload + # shell: bash + # run: | + # ls -l target/release + # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick + + ######################### + ### Stop Network ### + ######################### + + - name: Stop the local network + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_benchmark + platform: ubuntu-latest + build: true + + ######################### + ### Node Mem Analysis ### + ######################### + + # The large file uploaded will increase node's peak mem usage a lot + - name: Check node memory usage + shell: bash + run: | + node_peak_mem_limit_mb="250" # mb + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + + echo "Memory usage: $peak_mem_usage MB" + if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then + echo "Node memory usage exceeded threshold: $peak_mem_usage MB" + exit 1 + fi + # Write the node memory usage to a file + echo '[ + { + "name": "node-memory-usage-through-safe-benchmark", + "value": '$peak_mem_usage', + "unit": "MB" + } + ]' > node_memory_usage.json + + - name: check node_memory_usage.json + shell: bash + run: cat node_memory_usage.json + + - name: Alert for node memory usage + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: node_memory_usage.json + # Where the previous data file is stored + external-data-json-path: ./cache/node-mem-usage.json + # Workflow will fail when an alert happens + fail-on-alert: true + # GitHub API token to make a commit comment + github-token: ${{ secrets.GITHUB_TOKEN }} + # Enable alert commit comment + comment-on-alert: true + # Comment on the PR + comment-always: true + # 200% regression will result in alert + alert-threshold: "200%" + # Enable Job Summary for PRs + summary-always: true + + ########################################### + ### Swarm_driver handling time Analysis ### + ########################################### + + - name: Check swarm_driver handling time + shell: bash + run: | + num_of_times=$( + rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "Number of long cmd handling times: $num_of_times" + total_long_handling_ms=$( + rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + ) + echo "Total cmd long handling time is: $total_long_handling_ms ms" + average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + echo "Average cmd long handling time is: $average_handling_ms ms" + total_long_handling=$(($total_long_handling_ms)) + total_num_of_times=$(($num_of_times)) + num_of_times=$( + rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "Number of long event handling times: $num_of_times" + total_long_handling_ms=$( + rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + ) + echo "Total event long handling time is: $total_long_handling_ms ms" + average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + echo "Average event long handling time is: $average_handling_ms ms" + total_long_handling=$(($total_long_handling_ms+$total_long_handling)) + total_num_of_times=$(($num_of_times+$total_num_of_times)) + average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) + echo "Total swarm_driver long handling times is: $total_num_of_times" + echo "Total swarm_driver long handling duration is: $total_long_handling ms" + echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + total_num_of_times_limit_hits="30000" # hits + total_long_handling_limit_ms="400000" # ms + average_handling_limit_ms="20" # ms + if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then + echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" + exit 1 + fi + if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then + echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" + exit 1 + fi + if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then + echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" + exit 1 + fi + + # Write the node memory usage to a file + echo '[ + { + "name": "swarm_driver long handling times", + "value": '$total_num_of_times', + "unit": "hits" + }, + { + "name": "swarm_driver long handling total_time", + "value": '$total_long_handling', + "unit": "ms" + }, + { + "name": "swarm_driver average long handling time", + "value": '$average_handling_ms', + "unit": "ms" + } + ]' > swarm_driver_long_handlings.json + + - name: check swarm_driver_long_handlings.json + shell: bash + run: cat swarm_driver_long_handlings.json + + - name: Alert for swarm_driver long handlings + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: swarm_driver_long_handlings.json + # Where the previous data file is stored + external-data-json-path: ./cache/swarm_driver_long_handlings.json + # Workflow will fail when an alert happens + fail-on-alert: true + # GitHub API token to make a commit comment + github-token: ${{ secrets.GITHUB_TOKEN }} + # Enable alert commit comment + comment-on-alert: true + # Comment on the PR + comment-always: true + # 200% regression will result in alert + alert-threshold: "200%" + # Enable Job Summary for PRs + summary-always: true benchmark-cash: name: Compare sn_transfer benchmarks to main diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index 27a737a7a7..b8c6a10ffe 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -67,7 +67,7 @@ jobs: # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, # passes to tee which displays it in the terminal and writes to output.txt run: | - cargo criterion --features=local --message-format=json 2>&1 -p autonomi | tee -a output.txt + cargo criterion --features=local --message-format=json 2>&1 -p autonomi-cli | tee -a output.txt cat output.txt | rg benchmark-complete | jq -s 'map({ name: (.id | split("/"))[-1], unit: "MiB/s", @@ -158,17 +158,17 @@ jobs: shell: bash run: | peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs/*/*.log -o --no-line-number --no-filename | awk -F':' '/"memory_used_mb":/{print $2}' | sort -n | tail -n 1 ) total_mem=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs/*/*.log -o --no-line-number --no-filename | awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' ) num_of_times=$( - rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | + rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs/*/*.log -c --stats | rg "(\d+) matches" | rg "\d+" -o ) diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index 55d3790bb5..d16b417fca 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -5,517 +5,293 @@ on: # on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors # the merge run checks should show on master and enable this clear test/passing history merge_group: - branches: [ main, alpha*, beta*, rc* ] + branches: [main, alpha*, beta*, rc*] pull_request: - branches: [ "*" ] + branches: ["*"] env: SAFE_DATA_PATH: /home/runner/.local/share/safe - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi NODE_DATA_PATH: /home/runner/.local/share/safe/node - BOOTSTRAP_NODE_DATA_PATH: /home/runner/.local/share/safe/bootstrap_node RESTART_TEST_NODE_DATA_PATH: /home/runner/.local/share/safe/restart_node - FAUCET_LOG_PATH: /home/runner/.local/share/safe/test_faucet/logs - -# jobs: -# memory-check: -# runs-on: ubuntu-latest -# steps: -# - name: Checkout code -# uses: actions/checkout@v4 - -# - name: Check we're on the right commit -# run: git log -1 --oneline - -# - name: Install Rust -# uses: dtolnay/rust-toolchain@stable - -# - uses: Swatinem/rust-cache@v2 -# continue-on-error: true - -# - name: install ripgrep -# shell: bash -# run: sudo apt-get install -y ripgrep - -# - name: Build binaries -# run: cargo build --release --bin safe --bin safenode -# timeout-minutes: 30 - -# - name: Build faucet binary with gifting -# run: cargo build --release --bin faucet --features gifting -# timeout-minutes: 30 - -# - name: Build tests -# run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run -# timeout-minutes: 30 - -# - name: Start a node instance that does not undergo churn -# run: | -# mkdir -p $BOOTSTRAP_NODE_DATA_PATH -# ./target/release/safenode --first \ -# --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap & -# sleep 10 -# env: -# SN_LOG: "all" - -# - name: Set SAFE_PEERS -# run: | -# safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \ -# rg '/ip4.*$' -m1 -o | rg '"' -r '') -# echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV - -# - name: Check SAFE_PEERS was set -# shell: bash -# run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - -# - name: Start a node instance to be restarted -# run: | -# mkdir -p $RESTART_TEST_NODE_DATA_PATH -# ./target/release/safenode \ -# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart & -# sleep 10 -# env: -# SN_LOG: "all" - -# - name: Start a local network -# env: -# SN_LOG: "all" -# uses: maidsafe/sn-local-testnet-action@main -# with: -# action: start -# build: true -# faucet-path: target/release/faucet -# interval: 2000 -# join: true -# node-path: target/release/safenode -# owner-prefix: node -# platform: ubuntu-latest -# set-safe-peers: false - -# # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet -# - name: Check SAFE_PEERS was not changed -# shell: bash -# run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}" - -# - name: Create and fund a wallet to pay for files storage -# run: | -# echo "Obtaining address for use with the faucet..." -# ./target/release/safe --log-output-dest=data-dir wallet create --no-password -# address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) -# echo "Sending tokens to the faucet at $address" -# ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt -# cat initial_balance_from_faucet.txt -# cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex -# cat transfer_hex -# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex -# env: -# SN_LOG: "all" -# timeout-minutes: 15 - -# - name: Move faucet log to the working folder -# run: | -# echo "SAFE_DATA_PATH has: " -# ls -l $SAFE_DATA_PATH -# echo "test_faucet foder has: " -# ls -l $SAFE_DATA_PATH/test_faucet -# echo "logs folder has: " -# ls -l $SAFE_DATA_PATH/test_faucet/logs -# mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log -# continue-on-error: true -# if: always() -# timeout-minutes: 1 - -# - name: Download 95mb file to be uploaded with the safe client -# shell: bash -# run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - -# # The resources file we upload may change, and with it mem consumption. -# # Be aware! -# - name: Start a client to upload files -# # -p makes files public -# run: | -# ls -l -# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p -# env: -# SN_LOG: "all" -# timeout-minutes: 25 - -# # this check needs to be after some transfer activity -# - name: Check we're warned about using default genesis -# run: | -# git log -1 --oneline -# ls -la $RESTART_TEST_NODE_DATA_PATH -# cat $RESTART_TEST_NODE_DATA_PATH/safenode.log -# - name: Check we're warned about using default genesis -# run: | -# git log -1 --oneline -# ls -la $BOOTSTRAP_NODE_DATA_PATH -# cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log - -# - name: Check we're warned about using default genesis -# run: | -# git log -1 --oneline -# ls -la $NODE_DATA_PATH -# rg "USING DEFAULT" "$NODE_DATA_PATH" -u -# shell: bash - -# # Uploading same file using different client shall not incur any payment neither uploads -# # Note rg will throw an error directly in case of failed to find a matching pattern. -# - name: Start a different client to upload the same file -# run: | -# pwd -# mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first -# ls -l $SAFE_DATA_PATH -# ls -l $SAFE_DATA_PATH/client_first -# mkdir $SAFE_DATA_PATH/client -# ls -l $SAFE_DATA_PATH -# mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs -# ls -l $CLIENT_DATA_PATH -# cp ./the-test-data.zip ./the-test-data_1.zip -# ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password -# ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt -# cat initial_balance_from_faucet_1.txt -# cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex -# cat transfer_hex -# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex -# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt -# cat second_upload.txt -# rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats -# env: -# SN_LOG: "all" -# timeout-minutes: 25 - -# - name: Stop the restart node -# run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) - -# - name: Start the restart node again -# run: | -# ./target/release/safenode \ -# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted & -# sleep 10 -# env: -# SN_LOG: "all" - -# - name: Assert we've reloaded some chunks -# run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH - -# - name: Chunks data integrity during nodes churn -# run: cargo test --release -p sn_node --test data_with_churn -- --nocapture -# env: -# TEST_DURATION_MINS: 5 -# TEST_TOTAL_CHURN_CYCLES: 15 -# SN_LOG: "all" -# timeout-minutes: 30 - -# - name: Check current files -# run: ls -la -# - name: Check safenode file -# run: ls /home/runner/work/safe_network/safe_network/target/release - -# - name: Check there was no restart issues -# run: | -# if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then -# echo "Restart issues detected" -# exit 1 -# else -# echo "No restart issues detected" -# fi - -# - name: Verify the routing tables of the nodes -# run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture -# env: -# SLEEP_BEFORE_VERIFICATION: 300 -# timeout-minutes: 10 - -# - name: Verify restart of nodes using rg -# shell: bash -# timeout-minutes: 1 -# # get the counts, then the specific line, and then the digit count only -# # then check we have an expected level of restarts -# # TODO: make this use an env var, or relate to testnet size -# run: | -# restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Restart $restart_count nodes" -# peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "PeerRemovedFromRoutingTable $peer_removed times" -# if [ $peer_removed -lt $restart_count ]; then -# echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" -# exit 1 -# fi -# node_count=$(ls $NODE_DATA_PATH | wc -l) -# echo "Node dir count is $node_count" -# # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here -# # if [ $restart_count -lt $node_count ]; then -# # echo "Restart count of: $restart_count is less than the node count of: $node_count" -# # exit 1 -# # fi - -# - name: Verify data replication using rg -# shell: bash -# timeout-minutes: 1 -# # get the counts, then the specific line, and then the digit count only -# # then check we have an expected level of replication -# # TODO: make this use an env var, or relate to testnet size -# # As the bootstrap_node using separate folder for logging, -# # hence the folder input to rg needs to cover that as well. -# run: | -# sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Sent $sending_list_count replication lists" -# received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Received $received_list_count replication lists" -# fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Carried out $fetching_attempt_count fetching attempts" -# if: always() - -# - name: Start a client to download files -# run: | -# ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick -# ls -l $CLIENT_DATA_PATH/safe_files -# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) -# if [ $downloaded_files -lt 1 ]; then -# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" -# exit 1 -# fi -# env: -# SN_LOG: "all" -# timeout-minutes: 10 - -# # Download the same files again to ensure files won't get corrupted. -# - name: Start a client to download the same files again -# run: | -# ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick -# ls -l $CLIENT_DATA_PATH/safe_files -# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) -# if [ $downloaded_files -lt 1 ]; then -# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" -# exit 1 -# fi -# file_size1=$(stat -c "%s" ./the-test-data_1.zip) -# file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip) -# if [ $file_size1 != $file_size2 ]; then -# echo "The downloaded file has a different size $file_size2 to the original $file_size1." -# exit 1 -# fi -# env: -# SN_LOG: "all" -# timeout-minutes: 10 - -# - name: Audit from genesis to collect entire spend DAG and dump to a dot file -# run: | -# ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt -# echo "==============================================================================" -# cat spend_dag_and_statistics.txt -# env: -# SN_LOG: "all" -# timeout-minutes: 5 -# if: always() - -# - name: Ensure discord_ids decrypted -# run: | -# rg 'node_' ./spend_dag_and_statistics.txt -o -# timeout-minutes: 1 -# if: always() - -# - name: Check nodes running -# shell: bash -# timeout-minutes: 1 -# continue-on-error: true -# run: pgrep safenode | wc -l -# if: always() - -# - name: Wait before verifying reward forwarding -# run: sleep 300 - -# - name: Stop the local network and upload logs -# if: always() -# uses: maidsafe/sn-local-testnet-action@main -# with: -# action: stop -# log_file_prefix: safe_test_logs_memcheck -# platform: ubuntu-latest -# build: true - -# - name: Check node memory usage -# shell: bash -# # The resources file and churning chunk_size we upload may change, and with it mem consumption. -# # This is set to a value high enough to allow for some variation depending on -# # resources and node location in the network, but hopefully low enough to catch -# # any wild memory issues -# # Any changes to this value should be carefully considered and tested! -# # As we have a bootstrap node acting as an access point for churning nodes and client, -# # The memory usage here will be significantly higher here than in the benchmark test, -# # where we don't have a bootstrap node. -# run: | -# node_peak_mem_limit_mb="300" # mb - -# peak_mem_usage=$( -# rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | -# awk -F':' '/"memory_used_mb":/{print $2}' | -# sort -n | -# tail -n 1 -# ) -# echo "Node memory usage: $peak_mem_usage MB" - -# if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then -# echo "Node memory usage exceeded threshold: $peak_mem_usage MB" -# exit 1 -# fi -# if: always() - -# - name: Check client memory usage -# shell: bash -# # limits here are lower that benchmark tests as there is less going on. -# run: | -# client_peak_mem_limit_mb="1024" # mb -# client_avg_mem_limit_mb="512" # mb - -# peak_mem_usage=$( -# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | -# awk -F':' '/"memory_used_mb":/{print $2}' | -# sort -n | -# tail -n 1 -# ) -# echo "Peak memory usage: $peak_mem_usage MB" -# if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then -# echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" -# exit 1 -# fi - -# total_mem=$( -# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | -# awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' -# ) -# num_of_times=$( -# rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | -# rg "(\d+) matches" | -# rg "\d+" -o -# ) -# echo "num_of_times: $num_of_times" -# echo "Total memory is: $total_mem" -# average_mem=$(($total_mem/$(($num_of_times)))) -# echo "Average memory is: $average_mem" - -# if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then -# echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" -# exit 1 -# fi - -# - name: Check node swarm_driver handling statistics -# shell: bash -# # With the latest improvements, swarm_driver will be in high chance -# # has no super long handling (longer than 1s). -# # As the `rg` cmd will fail the shell directly if no entry find, -# # hence not covering it. -# # Be aware that if do need to looking for handlings longer than second, it shall be: -# # rg "SwarmCmd handled in [^m,ยต,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats -# run: | -# num_of_times=$( -# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | -# rg "(\d+) matches" | -# rg "\d+" -o -# ) -# echo "Number of long cmd handling times: $num_of_times" -# total_long_handling_ms=$( -# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | -# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' -# ) -# echo "Total cmd long handling time is: $total_long_handling_ms ms" -# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) -# echo "Average cmd long handling time is: $average_handling_ms ms" -# total_long_handling=$(($total_long_handling_ms)) -# total_num_of_times=$(($num_of_times)) -# num_of_times=$( -# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | -# rg "(\d+) matches" | -# rg "\d+" -o -# ) -# echo "Number of long event handling times: $num_of_times" -# total_long_handling_ms=$( -# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | -# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' -# ) -# echo "Total event long handling time is: $total_long_handling_ms ms" -# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) -# echo "Average event long handling time is: $average_handling_ms ms" -# total_long_handling=$(($total_long_handling_ms+$total_long_handling)) -# total_num_of_times=$(($num_of_times+$total_num_of_times)) -# average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) -# echo "Total swarm_driver long handling times is: $total_num_of_times" -# echo "Total swarm_driver long handling duration is: $total_long_handling ms" -# echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - -# - name: Verify reward forwarding using rg -# shell: bash -# timeout-minutes: 1 -# run: | -# min_reward_forwarding_times="100" -# reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Carried out $reward_forwarding_count reward forwardings" -# if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then -# echo "Reward forwarding times below the threshold: $min_reward_forwarding_times" -# exit 1 -# fi -# if: always() - -# - name: Upload payment wallet initialization log -# uses: actions/upload-artifact@main -# with: -# name: payment_wallet_initialization_log -# path: initial_balance_from_faucet.txt -# continue-on-error: true -# if: always() - -# - name: Move faucet log to the working folder -# run: | -# echo "current folder is:" -# pwd -# echo "SAFE_DATA_PATH has: " -# ls -l $SAFE_DATA_PATH -# echo "test_faucet foder has: " -# ls -l $SAFE_DATA_PATH/test_faucet -# echo "logs folder has: " -# ls -l $SAFE_DATA_PATH/test_faucet/logs -# mv $FAUCET_LOG_PATH/*.log ./faucet_log.log -# env: -# SN_LOG: "all" -# continue-on-error: true -# if: always() -# timeout-minutes: 1 - -# - name: Move bootstrap_node log to the working directory -# run: | -# ls -l $BOOTSTRAP_NODE_DATA_PATH -# mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log -# continue-on-error: true -# if: always() -# timeout-minutes: 1 - -# - name: Upload faucet log -# uses: actions/upload-artifact@main -# with: -# name: memory_check_faucet_log -# path: faucet_log.log -# continue-on-error: true -# if: always() - -# - name: Upload bootstrap_node log -# uses: actions/upload-artifact@main -# with: -# name: memory_check_bootstrap_node_log -# path: bootstrap_node.log -# continue-on-error: true -# if: always() - -# - name: Upload spend DAG and statistics -# uses: actions/upload-artifact@main -# with: -# name: memory_check_spend_dag_and_statistics -# path: spend_dag_and_statistics.txt -# continue-on-error: true -# if: always() + +jobs: + memory-check: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check we're on the right commit + run: git log -1 --oneline + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + + - uses: Swatinem/rust-cache@v2 + continue-on-error: true + + - name: install ripgrep + shell: bash + run: sudo apt-get install -y ripgrep + + - name: Build binaries + run: cargo build --release --features local --bin safenode --bin autonomi + timeout-minutes: 30 + + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ubuntu-latest + build: true + + - name: Check SAFE_PEERS was set + shell: bash + run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + + - name: Start a node instance to be restarted + run: | + mkdir -p $RESTART_TEST_NODE_DATA_PATH + ./target/release/safenode \ + --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --rewards-address "0x03B770D9cD32077cC0bF330c13C114a87643B124" & + sleep 10 + env: + SN_LOG: "all" + + - name: Download 95mb file to be uploaded with the safe client + shell: bash + run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + + - name: File upload + run: ./target/release/autonomi --log-output-dest=data-dir file upload --public "./the-test-data.zip" > ./upload_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: showing the upload terminal output + run: cat upload_output + shell: bash + if: always() + + - name: parse address + run: | + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash + + # Uploading same file using different client shall not incur any payment neither uploads + # Note rg will throw an error directly in case of failed to find a matching pattern. + - name: Start a different client to upload the same file + run: | + pwd + ls -l $SAFE_DATA_PATH + mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + ls -l $SAFE_DATA_PATH + ls -l $SAFE_DATA_PATH/client_first + ls -l $SAFE_DATA_PATH/client_first/logs + mkdir $SAFE_DATA_PATH/client + ls -l $SAFE_DATA_PATH + cp ./the-test-data.zip ./the-test-data_1.zip + ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data_1.zip" > ./second_upload 2>&1 + env: + SN_LOG: "all" + timeout-minutes: 25 + + - name: showing the second upload terminal output + run: cat second_upload + shell: bash + if: always() + + - name: Stop the restart node + run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) + + - name: Start the restart node again + run: | + ./target/release/safenode \ + --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --rewards-address "0x03B770D9cD32077cC0bF330c13C114a87643B124" & + sleep 10 + env: + SN_LOG: "all" + + # Records are encrypted, and seeds will change after restart + # Currently, there will be `Existing record found`, but NO `Existing record loaded` + # Due to the failure on decryption (as different seed used) + - name: Assert we've reloaded some chunks + run: rg "Existing record found" $RESTART_TEST_NODE_DATA_PATH + + - name: Wait at least 1min for replication to happen # it is throttled to once/30s. + run: sleep 60 + + - name: Verify data replication using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of replication + run: | + sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Sent $sending_list_count replication lists" + received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Received $received_list_count replication lists" + fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Carried out $fetching_attempt_count fetching attempts" + if: always() + + - name: File Download + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources + env: + SN_LOG: "v" + timeout-minutes: 2 + + - name: Check nodes running + shell: bash + timeout-minutes: 1 + continue-on-error: true + run: pgrep safenode | wc -l + if: always() + + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_memcheck + platform: ubuntu-latest + build: true + + - name: Check node memory usage + shell: bash + # The resources file and churning chunk_size we upload may change, and with it mem consumption. + # This is set to a value high enough to allow for some variation depending on + # resources and node location in the network, but hopefully low enough to catch + # any wild memory issues + # Any changes to this value should be carefully considered and tested! + # As we have a bootstrap node acting as an access point for churning nodes and client, + # The memory usage here will be significantly higher here than in the benchmark test, + # where we don't have a bootstrap node. + run: | + node_peak_mem_limit_mb="300" # mb + + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + echo "Node memory usage: $peak_mem_usage MB" + + if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then + echo "Node memory usage exceeded threshold: $peak_mem_usage MB" + exit 1 + fi + if: always() + + - name: Check client memory usage + shell: bash + # limits here are lower that benchmark tests as there is less going on. + run: | + client_peak_mem_limit_mb="1024" # mb + client_avg_mem_limit_mb="512" # mb + + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + echo "Peak memory usage: $peak_mem_usage MB" + if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then + echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" + exit 1 + fi + + total_mem=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' + ) + num_of_times=$( + rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob autonomi.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "num_of_times: $num_of_times" + echo "Total memory is: $total_mem" + average_mem=$(($total_mem/$(($num_of_times)))) + echo "Average memory is: $average_mem" + + if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then + echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" + exit 1 + fi + + # Logging of handling time is on Trace level, + # meanwhile the local_network startup tool sets the logging level on Debug. + # + # - name: Check node swarm_driver handling statistics + # shell: bash + # # With the latest improvements, swarm_driver will be in high chance + # # has no super long handling (longer than 1s). + # # As the `rg` cmd will fail the shell directly if no entry find, + # # hence not covering it. + # # Be aware that if do need to looking for handlings longer than second, it shall be: + # # rg "SwarmCmd handled in [^m,ยต,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats + # run: | + # num_of_times=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long cmd handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total cmd long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average cmd long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms)) + # total_num_of_times=$(($num_of_times)) + # num_of_times=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long event handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total event long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average event long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms+$total_long_handling)) + # total_num_of_times=$(($num_of_times+$total_num_of_times)) + # average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) + # echo "Total swarm_driver long handling times is: $total_num_of_times" + # echo "Total swarm_driver long handling duration is: $total_long_handling ms" + # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + + - name: Move restart_node log to the working directory + run: | + ls -l $RESTART_TEST_NODE_DATA_PATH + mv $RESTART_TEST_NODE_DATA_PATH/safenode.log ./restart_node.log + continue-on-error: true + if: always() + timeout-minutes: 1 + + - name: Upload restart_node log + uses: actions/upload-artifact@main + with: + name: memory_check_restart_node_log + path: restart_node.log + continue-on-error: true + if: always() diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 98ee999b06..db89c867be 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -110,6 +110,10 @@ jobs: - uses: Swatinem/rust-cache@v2 + - name: Run autonomi tests + timeout-minutes: 25 + run: cargo test --release --package autonomi --lib --features="full,fs" + - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib @@ -186,7 +190,7 @@ jobs: echo "EVM_NETWORK has been set to $EVM_NETWORK" fi - # only these unit tests require a network, the rest are run above + # only these unit tests require a network, the rest are run above in unit test section - name: Run autonomi --tests run: cargo test --package autonomi --tests -- --nocapture env: @@ -313,7 +317,7 @@ jobs: - name: Delete current register signing key shell: bash - run: rm -rf ${{ matrix.safe_path }}/client + run: rm -rf ${{ matrix.safe_path }}/autonomi - name: Generate new register signing key run: ./target/release/autonomi --log-output-dest=data-dir register generate-key @@ -531,15 +535,19 @@ jobs: # platform: ${{ matrix.os }} # build: true - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + # # incase the faucet is not ready yet + # - name: 30s sleep for faucet completion + # run: sleep 30 + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi # - name: execute token_distribution tests # run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 @@ -623,6 +631,10 @@ jobs: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 30 + # Sleep for a while to allow restarted nodes can be detected by others + - name: Sleep a while + run: sleep 300 + - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@main @@ -631,7 +643,14 @@ jobs: log_file_prefix: safe_test_logs_churn platform: ${{ matrix.os }} - - name: Verify restart of nodes using rg + - name: Get total node count + shell: bash + timeout-minutes: 1 + run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" + + - name: Get restart of nodes using rg shell: bash timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only @@ -640,16 +659,23 @@ jobs: run: | restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) - echo "Restart $restart_count nodes" + echo "Restarted $restart_count nodes" + + # `PeerRemovedFromRoutingTable` now only happens when a peer reported as `BadNode`. + # Otherwise kad will remove a `dropped out node` directly from RT. + # So, the detection of the removal explicity will now have much less chance, + # due to the removal of connection_issue tracking. + - name: Get peers removed from nodes using rg + shell: bash + timeout-minutes: 1 + run: | peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 0; } + if [ -z "$peer_removed" ]; then + echo "No peer removal count found" exit 1 fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" + echo "PeerRemovedFromRoutingTable $peer_removed times" # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then @@ -763,12 +789,16 @@ jobs: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 + # Sleep for a while to allow restarted nodes can be detected by others + - name: Sleep a while + run: sleep 300 + - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@main with: action: stop - log_file_prefix: safe_test_logs_data_location + log_file_prefix: safe_test_logs_data_location_routing_table platform: ${{ matrix.os }} - name: Verify restart of nodes using rg @@ -776,20 +806,29 @@ jobs: timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size + # + # `PeerRemovedFromRoutingTable` now only happens when a peer reported as `BadNode`. + # Otherwise kad will remove a `dropped out node` directly from RT. + # So, the detection of the removal explicity will now have much less chance, + # due to the removal of connection_issue tracking. + # + # With the further reduction of replication frequency, + # it now becomes harder to detect a `dropped out node` as a `failed to replicate` node. + # Hence now remove the assertion check and replace with a print out only. run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) echo "Restart $restart_count nodes" + if ! rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats + then + echo "No peer removal count found" + exit 0 + fi peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - exit 1 - fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" # Only error out after uploading the logs - name: Don't log raw data @@ -860,15 +899,15 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet first time - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt - # echo "----------" - # cat first.txt - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet first time + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # echo "----------" + # cat first.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Move faucet log to the working folder # run: | @@ -894,44 +933,64 @@ jobs: # continue-on-error: true # if: always() - # - name: Create and fund a wallet second time - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt - # echo "----------" - # cat second.txt - # if grep "genesis is already spent" second.txt; then - # echo "Duplicated faucet rejected" - # else - # echo "Duplicated faucet not rejected!" - # exit 1 - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Create and fund a wallet with different keypair - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then - # echo "Faucet with different genesis key not rejected!" - # exit 1 - # else - # echo "Faucet with different genesis key rejected" - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Cleanup prior faucet and cashnotes + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/autonomi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create a new wallet + # run: ~/safe --log-output-dest=data-dir wallet create --no-password + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Attempt second faucet genesis disbursement + # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 || true + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: cat second.txt + # run: cat second.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Verify a second disbursement is rejected + # run: | + # if grep "Faucet disbursement has already occured" second.txt; then + # echo "Duplicated faucet rejected" + # else + # echo "Duplicated faucet not rejected!" + # exit 1 + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create and fund a wallet with different keypair + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/autonomi + # ~/safe --log-output-dest=data-dir wallet create --no-password + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # echo "Faucet with different genesis key not rejected!" + # exit 1 + # else + # echo "Faucet with different genesis key rejected" + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Build faucet binary again without the gifting feature # run: cargo build --release --bin faucet @@ -943,7 +1002,7 @@ jobs: # ls -l /home/runner/.local/share/safe # rm -rf /home/runner/.local/share/safe/test_faucet # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client + # rm -rf /home/runner/.local/share/safe/autonomi # target/release/faucet server & # sleep 60 # env: @@ -970,152 +1029,135 @@ jobs: # platform: ubuntu-latest # log_file_prefix: safe_test_logs_faucet - # large_file_upload_test: - # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: Large file upload - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 - - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 + large_file_upload_test: + if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + name: Large file upload + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 - # - name: install ripgrep - # shell: bash - # run: sudo apt-get install -y ripgrep + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 - # - name: Check the available space - # run: | - # df - # echo "Home dir:" - # du -sh /home/runner/ - # echo "Home subdirs:" - # du -sh /home/runner/*/ - # echo "PWD:" - # du -sh . - # echo "PWD subdirs:" - # du -sh */ - - # - name: Download material, 1.1G - # shell: bash - # run: | - # wget https://releases.ubuntu.com/14.04.6/ubuntu-14.04.6-desktop-i386.iso - # ls -l + - name: install ripgrep + shell: bash + run: sudo apt-get install -y ripgrep - # - name: Build binaries - # run: cargo build --release --bin safenode --bin safe - # timeout-minutes: 30 + - name: Check the available space + run: | + df + echo "Home dir:" + du -sh /home/runner/ + echo "Home subdirs:" + du -sh /home/runner/*/ + echo "PWD:" + du -sh . + echo "PWD subdirs:" + du -sh */ + + - name: Download material (135MB) + shell: bash + run: | + mkdir test_data_1 + cd test_data_1 + wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safe-qiWithListeners-x86_64.tar.gz + wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode-qiWithListeners-x86_64.tar.gz + ls -l + cd .. + tar -cvzf test_data_1.tar.gz test_data_1 + ls -l - # - name: Build faucet binary - # run: cargo build --release --bin faucet --features gifting - # timeout-minutes: 30 + - name: Build binaries + run: cargo build --release --features local --bin safenode --bin autonomi + timeout-minutes: 30 - # - name: Start a local network - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: start - # interval: 2000 - # node-path: target/release/safenode - # faucet-path: target/release/faucet - # platform: ubuntu-latest - # build: true + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ubuntu-latest + build: true - # - name: Check we're _not_ warned about using default genesis - # run: | - # if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then - # exit 1 - # fi - # shell: bash + - name: Check if SAFE_PEERS and EVM_NETWORK are set + shell: bash + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" + fi - # # The test currently fails because the GH runner runs out of disk space. So we clear out the target dir here. - # # Might be related to additional deps used in the codebase. - # - name: Move built binaries and clear out target dir - # shell: bash - # run: | - # mv target/release/faucet ~/faucet - # mv target/release/safe ~/safe - # rm -rf target + - name: Check the available space post download + run: | + df + echo "Home dir:" + du -sh /home/runner/ + echo "Home subdirs:" + du -sh /home/runner/*/ + echo "PWD:" + du -sh . + echo "PWD subdirs:" + du -sh */ - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash - # - name: Check the available space post download - # run: | - # df - # echo "Home dir:" - # du -sh /home/runner/ - # echo "Home subdirs:" - # du -sh /home/runner/*/ - # echo "PWD:" - # du -sh . - # echo "PWD subdirs:" - # du -sh */ - - # - name: Create and fund a wallet to pay for files storage - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + - name: File upload + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./test_data_1.tar.gz" > ./upload_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 - # - name: Start a client to upload - # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick - # env: - # SN_LOG: "all" - # timeout-minutes: 30 + - name: showing the upload terminal output + run: cat upload_output + shell: bash + if: always() - # - name: Stop the local network and upload logs - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # platform: ubuntu-latest - # log_file_prefix: safe_test_logs_large_file_upload - # build: true + - name: parse address + run: | + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash - # - name: check there is no failed replication fetch - # shell: bash - # run: | - # if grep -r "failed to fetch" $NODE_DATA_PATH - # then - # echo "We find failed replication fetch" - # exit 1 - # fi - # env: - # NODE_DATA_PATH: /home/runner/.local/share/safe/node - # timeout-minutes: 1 + - name: File Download + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources > ./download_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 - # - name: Check the home dir leftover space - # run: | - # df - # du -sh /home/runner/ + - name: showing the download terminal output + run: | + cat download_output + ls -l + cd downloaded_resources + ls -l + shell: bash + if: always() - # - name: Confirm the wallet files (cash_notes, confirmed_spends) - # run: | - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # ls $CLIENT_DATA_PATH/wallet/confirmed_spends -l - # ls $CLIENT_DATA_PATH/logs -l - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 1 + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + platform: ubuntu-latest + log_file_prefix: safe_test_logs_large_file_upload + build: true # replication_bench_with_heavy_upload: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" # name: Replication bench with heavy upload # runs-on: ubuntu-latest + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi # steps: # - uses: actions/checkout@v4 @@ -1192,14 +1234,28 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet to pay for files storage - # run: | - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Sleep 15s + # shell: bash + # run: sleep 15 + + # - name: Check faucet has been funded + # shell: bash + # run: | + # cash_note_count=$(ls -l /home/runner/.local/share/safe/test_faucet/wallet/cash_notes/ | wc -l) + # echo $cash_note_count + # if [ "$cash_note_count" -eq 0 ]; then + # echo "Error: Expected at least 1 cash note, but found $cash_note_count" + # exit 1 + # fi + + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload first file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick @@ -1207,29 +1263,32 @@ jobs: # SN_LOG: "all" # timeout-minutes: 5 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Check current directories + # run: | + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # timeout-minutes: 1 + + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 @@ -1241,52 +1300,49 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) - # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then - # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) - # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) + # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then + # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) + # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 # timeout-minutes: 6 - # # Start a different client to avoid local wallet slow down with more payments handled. - # - name: Start a different client - # run: | - # pwd - # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - # ls -l $SAFE_DATA_PATH - # ls -l $SAFE_DATA_PATH/client_first - # mkdir $SAFE_DATA_PATH/client - # ls -l $SAFE_DATA_PATH - # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - # ls -l $CLIENT_DATA_PATH - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # SAFE_DATA_PATH: /home/runner/.local/share/safe - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 25 + # # Start a different client to avoid local wallet slow down with more payments handled. + # - name: Start a different client + # run: | + # pwd + # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + # ls -l $SAFE_DATA_PATH + # ls -l $SAFE_DATA_PATH/client_first + # mkdir $SAFE_DATA_PATH/client + # ls -l $SAFE_DATA_PATH + # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs + # ls -l $CLIENT_DATA_PATH + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # timeout-minutes: 25 # - name: Use second client to upload third file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick @@ -1294,29 +1350,27 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # timeout-minutes: 10 # - name: Stop the local network and upload logs # if: always() diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index aac0ac9ad4..843507abff 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -15,7 +15,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + include: + - os: ubuntu-latest + safe_path: /home/runner/.local/share/safe + - os: windows-latest + safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + - os: macos-latest + safe_path: /Users/runner/Library/Application\ Support/safe steps: - uses: actions/checkout@v4 @@ -26,77 +32,181 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --bin safenode --bin safe --bin faucet + run: cargo build --release --features local --bin safenode --bin autonomi timeout-minutes: 30 - name: Start a local network uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - - name: Check contact peer + - name: Check if SAFE_PEERS and EVM_NETWORK are set shell: bash - run: echo "Peer is $SAFE_PEERS" + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" + fi # only these unit tests require a network, the rest are run above in unit test section - - name: Run sn_client --tests - run: cargo test --package sn_client --release --tests + - name: Run autonomi --tests + run: cargo test --package autonomi --tests -- --nocapture env: - SN_LOG: "all" + SN_LOG: "v" # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 15 - - name: Create and fund a wallet to pay for files storage + + # FIXME: do this in a generic way for localtestnets + - name: export default secret key + if: matrix.os != 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + - name: Set secret key for Windows + if: matrix.os == 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: Get file cost + run: ./target/release/autonomi --log-output-dest=data-dir file cost "./resources" + env: + SN_LOG: "v" + timeout-minutes: 15 + + - name: File upload + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 15 + + - name: parse address (unix) + if: matrix.os != 'windows-latest' run: | - cargo run --bin faucet --release -- --log-output-dest=data-dir send 1000000 $(cargo run --bin safe --release -- --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - cargo run --bin safe --release -- --log-output-dest=data-dir wallet receive --file transfer_hex + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse address (win) + if: matrix.os == 'windows-latest' + run: | + $UPLOAD_ADDRESS = rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: File Download + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 5 + + - name: Generate register signing key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key - - name: Start a client to carry out chunk actions - run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick + - name: Create register (writeable by owner) + run: ./target/release/autonomi --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 10 + + - name: parse register address (unix) + if: matrix.os != 'windows-latest' + run: | + REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse register address (win) + if: matrix.os == 'windows-latest' + run: | + $REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - # Client FoldersApi tests against local network - - name: Client FoldersApi tests against local network - run: cargo test --release --package sn_client --test folders_api + - name: Get register + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" + timeout-minutes: 5 + + - name: Edit register + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 + env: + SN_LOG: "v" timeout-minutes: 10 - # CLI Acc-Packet files and folders tests against local network - - name: CLI Acc-Packet files and folders tests - run: cargo test --release -p sn_cli test_acc_packet -- --nocapture + - name: Get register (after edit) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" + timeout-minutes: 5 + + - name: Create Public Register (writeable by anyone) + run: ./target/release/autonomi --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: parse public register address (unix) + if: matrix.os != 'windows-latest' + run: | + PUBLIC_REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output) + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse public register address (win) + if: matrix.os == 'windows-latest' + run: | + $PUBLIC_REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: Get Public Register (current key is the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: Edit Public Register (current key is the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222 + env: + SN_LOG: "v" timeout-minutes: 10 - - name: Start a client to create a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao + - name: Delete current register signing key + shell: bash + run: rm -rf ${{ matrix.safe_path }}/autonomi + + - name: Generate new register signing key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key + + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 2 - - name: Start a client to get a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao + - name: Edit Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333 env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 10 - - name: Start a client to edit a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 2 - name: Stop the local network and upload logs @@ -134,31 +244,17 @@ jobs: run: cargo test --release --lib --bins --no-run timeout-minutes: 30 - - name: Run CLI tests - timeout-minutes: 25 - run: cargo test --release --package sn_cli -- --skip test_acc_packet_ - - - name: Run client tests - timeout-minutes: 25 - # we do not run the `--tests` here are they are run in the e2e job - # as they rquire a network - run: | - cargo test --release --package sn_client --doc - cargo test --release --package sn_client --lib - cargo test --release --package sn_client --bins - cargo test --release --package sn_client --examples - - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib - name: Run network tests timeout-minutes: 25 - run: cargo test --release -p sn_networking --features="open-metrics" + run: cargo test --release --package sn_networking --features="open-metrics" - name: Run protocol tests timeout-minutes: 25 - run: cargo test --release -p sn_protocol + run: cargo test --release --package sn_protocol - name: Run transfers tests timeout-minutes: 25 @@ -167,13 +263,12 @@ jobs: - name: Run logging tests timeout-minutes: 25 run: cargo test --release --package sn_logging - + - name: Run register tests - shell: bash timeout-minutes: 50 + run: cargo test --release --package sn_registers env: PROPTEST_CASES: 512 - run: cargo test --release -p sn_registers - name: post notification to slack on failure if: ${{ failure() }} @@ -183,210 +278,6 @@ jobs: SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" SLACK_TITLE: "Nightly Unit Test Run Failed" - spend_test: - name: spend tests against network - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build binaries - run: cargo build --release --features=local --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: execute the sequential transfers test - run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - SN_LOG: "all" - timeout-minutes: 10 - - - name: execute the storage payment tests - run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - SN_LOG: "all" - timeout-minutes: 10 - - - name: execute the double spend tests - run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Small wait to allow reward receipt - run: sleep 30 - timeout-minutes: 1 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend - platform: ${{ matrix.os }} - - - name: post notification to slack on failure - if: ${{ failure() }} - uses: bryannice/gitactions-slack-notification@2.0.0 - env: - SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - SLACK_TITLE: "Nightly Spend Test Run Failed" - - # runs with increased node count - spend_simulation: - name: spend simulation - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build binaries - run: cargo build --release --features=local --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-count: 50 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: execute the spend simulation test - run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Small wait to allow reward receipt - run: sleep 30 - timeout-minutes: 1 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend_simulation - platform: ${{ matrix.os }} - - - name: post notification to slack on failure - if: ${{ failure() }} - uses: bryannice/gitactions-slack-notification@2.0.0 - env: - SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - SLACK_TITLE: "Nightly Spend Test Run Failed" - - token_distribution_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: token distribution test - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --features=local,distribution --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release --features=local,distribution --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: execute token_distribution tests - run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_token_distribution - platform: ${{ matrix.os }} - churn: name: Network churning tests runs-on: ${{ matrix.os }} @@ -412,7 +303,7 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode timeout-minutes: 30 - name: Build churn tests @@ -427,14 +318,13 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - name: Chunks data integrity during nodes churn (during 10min) (in theory) - run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture + run: cargo test --release -p sn_node --features=local --test data_with_churn -- --nocapture env: TEST_DURATION_MINS: 60 TEST_CHURN_CYCLES: 6 @@ -442,7 +332,46 @@ jobs: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 90 - - name: Verify restart of nodes using rg + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_churn + platform: ${{ matrix.os }} + + + - name: Get total node count + shell: bash + timeout-minutes: 1 + run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" + + - name: Get restart of nodes using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of restarts + # TODO: make this use an env var, or relate to testnet size + run: | + restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Restarted $restart_count nodes" + + - name: Get peers removed from nodes using rg + shell: bash + timeout-minutes: 1 + run: | + peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; } + if [ -z "$peer_removed" ]; then + echo "No peer removal count found" + exit 1 + fi + echo "PeerRemovedFromRoutingTable $peer_removed times" + + - name: Verify peers removed exceed restarted node counts shell: bash timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only @@ -459,8 +388,6 @@ jobs: echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" exit 1 fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then @@ -484,14 +411,6 @@ jobs: exit 1 fi - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_churn - platform: ${{ matrix.os }} - - name: post notification to slack on failure if: ${{ failure() }} uses: bryannice/gitactions-slack-notification@2.0.0 @@ -537,7 +456,7 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode timeout-minutes: 30 - name: Build data location and routing table tests @@ -552,31 +471,38 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - name: Verify the Routing table of the nodes - run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features=local --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture + run: cargo test --release -p sn_node --features=local --test verify_data_location -- --nocapture env: SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 90 - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features=local --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_data_location + platform: ${{ matrix.os }} + - name: Verify restart of nodes using rg shell: bash timeout-minutes: 1 @@ -597,14 +523,6 @@ jobs: node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) echo "Node dir count is $node_count" - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_data_location - platform: ${{ matrix.os }} - - name: post notification to slack on failure if: ${{ failure() }} uses: bryannice/gitactions-slack-notification@2.0.0 diff --git a/.github/workflows/nightly_wan.yml b/.github/workflows/nightly_wan.yml index 9c84f58488..e5f4a42511 100644 --- a/.github/workflows/nightly_wan.yml +++ b/.github/workflows/nightly_wan.yml @@ -129,7 +129,7 @@ jobs: path: | ~/.local/share/safe/node/*/logs/*.log* ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* + ~/.local/share/safe/autonomi/logs/*/*.log* - name: destroy network if: always() @@ -235,7 +235,7 @@ jobs: # path: | # ~/.local/share/safe/node/*/logs/*.log* # ~/.local/share/safe/*/*/*.log* - # ~/.local/share/safe/client/logs/*/*.log* + # ~/.local/share/safe/autonomi/logs/*/*.log* # - name: destroy network # uses: maidsafe/sn-testnet-control-action/destroy-network@main @@ -349,7 +349,7 @@ jobs: # path: | # ~/.local/share/safe/node/*/logs/*.log* # ~/.local/share/safe/*/*/*.log* - # ~/.local/share/safe/client/logs/*/*.log* + # ~/.local/share/safe/autonomi/logs/*/*.log* # # - name: Stop the WAN network # if: always() @@ -555,7 +555,7 @@ jobs: # path: | # ~/.local/share/safe/node/*/logs/*.log* # ~/.local/share/safe/*/*/*.log* - # ~/.local/share/safe/client/logs/*/*.log* + # ~/.local/share/safe/autonomi/logs/*/*.log* # # - name: Stop the WAN network # if: always() diff --git a/.github/workflows/nightly_wan_churn.yml b/.github/workflows/nightly_wan_churn.yml index 5101f8fdd6..2cbf72fd8c 100644 --- a/.github/workflows/nightly_wan_churn.yml +++ b/.github/workflows/nightly_wan_churn.yml @@ -127,7 +127,7 @@ jobs: path: | ~/.local/share/safe/node/*/logs/*.log* ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* + ~/.local/share/safe/autonomi/logs/*/*.log* - name: Stop the WAN network if: always() diff --git a/.github/workflows/node_man_tests.yml b/.github/workflows/node_man_tests.yml index 55cd701cbf..54d6d3d625 100644 --- a/.github/workflows/node_man_tests.yml +++ b/.github/workflows/node_man_tests.yml @@ -25,7 +25,7 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: cargo cache registry, index and build - uses: actions/cache@v4.0.2 + uses: actions/cache@v4.1.2 with: path: | ~/.cargo/registry diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f4d77a9c6..d68be75785 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,80 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2024-10-28 + +## Autonomi API/CLI + +#### Added + +- Private data support. +- Local user data support. +- Network Vault containing user data encrypted. +- Archives with Metadata. +- Prepaid upload support for data_put using receipts. + +#### Changed + +- Contract token approval amount set to infinite before doing data payments. + +### Client + +#### Added + +- Expose APIs in WASM (e.g. archives, vault and user data within vault). +- Uploads are not run in parallel. +- Support for local wallets. +- Provide `wallet create` command. +- Provide `wallet balance` command. + +#### Changed + +- Take metadata from file system and add `uploaded` field for time of upload. + +#### Fixed + +- Make sure we use the new client path throughout the codebase + +### Network + +#### Added + +- Get range used for store cost and register queries. +- Re-enabled large_file_upload, memcheck, benchmark CI tests. + +#### Changed + +- Scratchpad modifications to support multiple data encodings. +- Registers are now merged at the network level, preventing failures during update and during + replication. +- Libp2p config and get range tweaks reduce intensity of operations. Brings down CPU usage + considerably. +- Libp2pโ€™s native kad bootstrap interval introduced in 0.54.1 is intensive, and as we roll our own, + we significantly reduce the kad period to lighten the CPU load. +- Wipe nodeโ€™s storage dir when restarting for new network + +#### Fixed + +- Fixes in networking code for WASM compatibility (replacing `std::time` with compatible + alternative). +- Event dropped errors should not happen if the event is not dropped. +- Reduce outdated connection pruning frequency. + +### Node Manager + +#### Fixed + +- Local node register is cleaned up when --clean flag applied (prevents some errors when register + changes). + +### Launchpad + +#### Fixed + +- Status screen is updated after nodes have been reset. +- Rewards Address is required before starting nodes. User input is required. +- Spinner does not stop spinning after two minutes when nodes are running. + ## 2024-10-24 ### Network diff --git a/Cargo.lock b/Cargo.lock index a5c06f7ce9..dfcaa5e8c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" +checksum = "d8cbebb817e6ada1abb27e642592a39eebc963eb0b9e78f66c467549f3903770" dependencies = [ "alloy-consensus", "alloy-contract", @@ -151,9 +151,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" +checksum = "cdf02dfacfc815214f9b54ff50d54900ba527a68fd73e2c5637ced3460005045" dependencies = [ "alloy-eips", "alloy-primitives", @@ -167,9 +167,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" +checksum = "d45354c6946d064827d3b85041876aad9490b634f1761139934f8b1f65686b09" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -228,20 +228,21 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "c15873ee28dfe5a1aeddd762483bc7f378b465ec49bdce8165c4c46b4f55cb0a" dependencies = [ "alloy-primitives", "alloy-rlp", + "derive_more", "serde", ] [[package]] name = "alloy-eips" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" +checksum = "769da342b6bcd945013925ef4c40763cc82f11e002c60702dba8b444bb60e5a7" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -257,9 +258,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +checksum = "c698ce0ada980b17f0323e1a28c7da8a2e9abc6dff5be9ee33d1525b28ac46b6" dependencies = [ "alloy-primitives", "alloy-serde", @@ -280,9 +281,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +checksum = "c1050e1d65524c030b17442b6546b564da51fdab7f71bd534b001ba65f2ebb16" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -294,9 +295,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" +checksum = "da34a18446a27734473af3d77eb21c5ebbdf97ea8eb65c39c0b50916bc659023" dependencies = [ "alloy-consensus", "alloy-eips", @@ -315,9 +316,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" +checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695" dependencies = [ "alloy-consensus", "alloy-eips", @@ -328,9 +329,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +checksum = "439fc6a933b9f8e8b272a8cac35dbeabaf2b2eaf9590482bebedb5782153118e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -372,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" +checksum = "c45dbc0e3630becef9e988b69d43339f68d67e32a854e3c855bc28bd5031895b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -387,6 +388,7 @@ dependencies = [ "alloy-rpc-client", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", + "alloy-signer", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -397,14 +399,17 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "parking_lot", "pin-project", "reqwest 0.12.7", + "schnellru", "serde", "serde_json", "thiserror", "tokio", "tracing", "url", + "wasmtimer", ] [[package]] @@ -431,9 +436,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +checksum = "917e5504e4f8f7e39bdc322ff81589ed54c1e462240adaeb58162c2d986a5a2b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -449,13 +454,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" +checksum = "07c7eb2dc6db1dd41e5e7bd2b98a38813854efc30e034afd90d1e420e7f3de2b" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -466,9 +472,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" +checksum = "2640928d9b1d43bb1cec7a0d615e10c2b407c5bd8ff1fcbe49e6318a2b62d731" dependencies = [ "alloy-primitives", "alloy-serde", @@ -477,9 +483,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" +checksum = "e855b0daccf2320ba415753c3fed422abe9d3ad5d77b2d6cafcc9bcf32fe387f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -496,9 +502,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" +checksum = "35c2661ca6785add8fc37aff8005439c806ffad58254c19939c6f59ac0d6596e" dependencies = [ "alloy-primitives", "serde", @@ -507,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" +checksum = "67eca011160d18a7dc6d8cdc1e8dc13e2e86c908f8e41b02aa76e429d6fe7085" dependencies = [ "alloy-primitives", "async-trait", @@ -521,9 +527,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" +checksum = "1c54b195a6ee5a83f32e7c697b4e6b565966737ed5a2ef9176bbbb39f720d023" dependencies = [ "alloy-consensus", "alloy-network", @@ -610,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +checksum = "3e4a136e733f55fef0870b81e1f8f1db28e78973d1b1ae5a5df642ba39538a07" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -626,13 +632,14 @@ dependencies = [ "tracing", "url", "wasm-bindgen-futures", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +checksum = "1a6b358a89b6d107b92d09b61a61fbc04243942182709752c796f4b29402cead" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -1069,9 +1076,12 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.2.0" +version = "0.2.2" dependencies = [ + "alloy", "bip39", + "blst", + "blstrs 0.7.1", "blsttc", "bytes", "console_error_panic_hook", @@ -1113,23 +1123,32 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.2" +version = "0.1.3" dependencies = [ "autonomi", "clap", "color-eyre", + "const-hex", "criterion", "dirs-next", "eyre", + "hex 0.4.3", "indicatif", + "prettytable", "rand 0.8.5", "rayon", + "ring 0.17.8", + "rpassword", + "serde", + "serde_json", "sn_build_info", "sn_logging", "sn_peers_acquisition", "tempfile", + "thiserror", "tokio", "tracing", + "walkdir", ] [[package]] @@ -1412,7 +1431,23 @@ dependencies = [ "byte-slice-cast", "ff 0.12.1", "group 0.12.1", - "pairing", + "pairing 0.22.0", + "rand_core 0.6.4", + "serde", + "subtle", +] + +[[package]] +name = "blstrs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" +dependencies = [ + "blst", + "byte-slice-cast", + "ff 0.13.0", + "group 0.13.0", + "pairing 0.23.0", "rand_core 0.6.4", "serde", "subtle", @@ -1425,12 +1460,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1186a39763321a0b73d1a10aa4fc067c5d042308509e8f6cc31d2c2a7ac61ac2" dependencies = [ "blst", - "blstrs", + "blstrs 0.6.2", "ff 0.12.1", "group 0.12.1", "hex 0.4.3", "hex_fmt", - "pairing", + "pairing 0.22.0", "rand 0.8.5", "rand_chacha 0.3.1", "serde", @@ -1887,7 +1922,7 @@ version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ - "encode_unicode", + "encode_unicode 0.3.6", "lazy_static", "libc", "unicode-width", @@ -1906,9 +1941,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if", "cpufeatures", @@ -2184,6 +2219,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "ctr" version = "0.9.2" @@ -2677,6 +2733,12 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "encoding_rs" version = "0.8.34" @@ -2747,7 +2809,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.1" +version = "0.1.2" dependencies = [ "clap", "dirs-next", @@ -2758,7 +2820,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.1" +version = "0.1.2" dependencies = [ "alloy", "dirs-next", @@ -2838,6 +2900,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec", "rand_core 0.6.4", "subtle", ] @@ -3771,7 +3834,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", + "rand 0.8.5", "rand_core 0.6.4", + "rand_xorshift 0.3.0", "subtle", ] @@ -5565,7 +5630,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.8" +version = "0.2.9" dependencies = [ "clap", "clap-verbosity-flag", @@ -5682,7 +5747,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.4.1" +version = "0.4.2" dependencies = [ "atty", "better-panic", @@ -6074,6 +6139,15 @@ dependencies = [ "group 0.12.1", ] +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group 0.13.0", +] + [[package]] name = "pairing-plus" version = "0.19.0" @@ -6483,6 +6557,20 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettytable" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46480520d1b77c9a3482d39939fcf96831537a250ec62d4fd8fbdf8e0302e781" +dependencies = [ + "csv", + "encode_unicode 1.0.0", + "is-terminal", + "lazy_static", + "term", + "unicode-width", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -7325,6 +7413,17 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rpassword" +version = "7.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" +dependencies = [ + "libc", + "rtoolbox", + "windows-sys 0.48.0", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -7340,6 +7439,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "rtoolbox" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ruint" version = "1.12.3" @@ -7567,6 +7676,17 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schnellru" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" +dependencies = [ + "ahash", + "cfg-if", + "hashbrown 0.13.2", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -7763,9 +7883,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -8027,7 +8147,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.11.0" +version = "0.11.1" dependencies = [ "assert_cmd", "assert_fs", @@ -8103,7 +8223,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.16" +version = "0.1.17" dependencies = [ "chrono", "tracing", @@ -8145,7 +8265,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.1" +version = "0.1.2" dependencies = [ "custom_debug", "evmlib", @@ -8168,7 +8288,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.37" +version = "0.2.38" dependencies = [ "chrono", "color-eyre", @@ -8193,7 +8313,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.17" +version = "0.1.18" dependencies = [ "clap", "color-eyre", @@ -8207,7 +8327,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.19.0" +version = "0.19.1" dependencies = [ "aes-gcm-siv", "async-trait", @@ -8252,7 +8372,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.112.1" +version = "0.112.2" dependencies = [ "assert_fs", "async-trait", @@ -8309,7 +8429,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.32" +version = "0.6.33" dependencies = [ "assert_fs", "async-trait", @@ -8336,7 +8456,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.4" +version = "0.5.5" dependencies = [ "clap", "lazy_static", @@ -8352,7 +8472,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.12" +version = "0.17.13" dependencies = [ "blsttc", "bytes", @@ -8382,7 +8502,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.4.0" +version = "0.4.1" dependencies = [ "blsttc", "crdts", @@ -8399,7 +8519,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.4.0" +version = "0.4.1" dependencies = [ "async-trait", "dirs-next", @@ -8425,7 +8545,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.20.0" +version = "0.20.1" dependencies = [ "assert_fs", "blsttc", @@ -8740,6 +8860,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "terminal_size" version = "0.3.0" @@ -8758,7 +8889,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.8" +version = "0.4.9" dependencies = [ "bytes", "color-eyre", @@ -8902,7 +9033,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.55" +version = "0.1.56" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/README.md b/README.md index 48751adf0e..67ea01d426 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Libp2p.
### For Users -- [CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line +- [CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi-cli/README.md) The Command Line Interface, allowing users to interact with the network from their terminal. - [Node](https://github.com/maidsafe//safe_network/blob/main/sn_node/README.md) The backbone of the safe network. Nodes can be run on commodity hardware and provide storage space and validation of @@ -32,10 +32,10 @@ You should build from the `stable` branch, as follows: ``` git checkout stable -export FOUNDATION_PK=88a82d718d16dccc839188eddc9a46cb216667c940cd46285199458c919a170a55490db09763ae216ed25e9db78c3576 -export GENESIS_PK=aa3526db2dbc43998e0b541b8455e2ce9dd4f1cad80090e671da16e3cd11cd5e3550f74c3cefd09ad253d93cacae2320 -export NETWORK_ROYALTIES_PK=8b5463a2c8142959a7b7cfd9295587812eb07ccbe13a85865503c8004eeeb6889ccace3588dcf9f7396784d9ee48f4d5 -export PAYMENT_FORWARD_PK=87d5b511a497183c945df63ab8790a4b94cfe452d00bfbdb39e41ee861384fe0de716a224da1c6fd11356de49877dfc2 +export FOUNDATION_PK=b20c916c7a28707018292f06dfdb66ab88ebcbad9c78d18135e843a91b1d66b875b24d2c27d8d1ad4637c2d5811896fe +export GENESIS_PK=93f7355906fa8c1a639bac80f4619dbb4cf5f71c47827d1ff2c30f0d133f6b841859662cbf7e0bbceca0eb0f521f6ebc +export NETWORK_ROYALTIES_PK=af451aa34a0d16c50eb217b91ab6b2ca75ef43b9c20449384ff1e90dbf8477351499cca985828e33b208805dadc80c63 +export PAYMENT_FORWARD_PK=adc6401588af49c60af6717a60546207abddb4e150014b4ab6c407ef6d7b3d3899b8892a91ab23042378b7b285e655fc cargo build --release --features=network-contacts --bin safenode ``` @@ -69,7 +69,7 @@ cargo build --release --features=network-contacts --bin safenode - [Autonomi API](https://github.com/maidsafe/safe_network/blob/main/autonomi/README.md) The client APIs allowing use of the Autonomi Network to users and developers. -- [Autonomi CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line +- [Autonomi CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi-cli/README.md) The Command Line Interface, allowing users to interact with the network from their terminal. - [Node](https://github.com/maidsafe/safe_network/blob/main/sn_node/README.md) The backbone of the autonomi network. Nodes can be run on commodity hardware and run the Network. diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 05208d3325..fb49e41f33 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "autonomi-cli" description = "Autonomi CLI" license = "GPL-3.0" -version = "0.1.2" +version = "0.1.3" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -24,16 +24,22 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.2.0", features = [ +autonomi = { path = "../autonomi", version = "0.2.2", features = [ "data", "fs", + "vault", "registers", "loud", ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" +const-hex = "1.13.1" dirs-next = "~2.0.0" +prettytable = "0.10.0" +thiserror = "1.0" indicatif = { version = "0.17.5", features = ["tokio"] } +rand = { version = "~0.8.5", features = ["small_rng"] } +rpassword = "7.0" tokio = { version = "1.32.0", features = [ "io-util", "macros", @@ -44,12 +50,17 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +walkdir = "2.5.0" +serde_json = "1.0.132" +serde = "1.0.210" +hex = "0.4.3" +ring = "0.17.8" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.2.0", features = [ +autonomi = { path = "../autonomi", version = "0.2.2", features = [ "data", "fs", ] } diff --git a/autonomi-cli/README.md b/autonomi-cli/README.md index 8bc2277655..6da4930612 100644 --- a/autonomi-cli/README.md +++ b/autonomi-cli/README.md @@ -1,7 +1,7 @@ # A CLI for the Autonomi Network ``` -Usage: autonomi_cli [OPTIONS] +Usage: autonomi [OPTIONS] Commands: file Operations related to file handling diff --git a/autonomi-cli/benches/files.rs b/autonomi-cli/benches/files.rs index f545936334..4b4794c16e 100644 --- a/autonomi-cli/benches/files.rs +++ b/autonomi-cli/benches/files.rs @@ -99,7 +99,7 @@ fn get_cli_path() -> PathBuf { path.push("target"); } path.push("release"); - path.push("autonomi_cli"); + path.push("autonomi"); path } diff --git a/autonomi-cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs index af0db16c2c..9233507264 100644 --- a/autonomi-cli/src/access/data_dir.rs +++ b/autonomi-cli/src/access/data_dir.rs @@ -6,14 +6,23 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use color_eyre::eyre::{eyre, Context, Result}; +use color_eyre::{ + eyre::{eyre, Context, Result}, + Section, +}; use std::path::PathBuf; pub fn get_client_data_dir_path() -> Result { let mut home_dirs = dirs_next::data_dir() .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; home_dirs.push("safe"); - home_dirs.push("client"); - std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; + home_dirs.push("autonomi"); + std::fs::create_dir_all(home_dirs.as_path()) + .wrap_err("Failed to create data dir") + .with_suggestion(|| { + format!( + "make sure you have the correct permissions to access the data dir: {home_dirs:?}" + ) + })?; Ok(home_dirs) } diff --git a/autonomi-cli/src/access/keys.rs b/autonomi-cli/src/access/keys.rs index 18310f4831..cfaa5284b7 100644 --- a/autonomi-cli/src/access/keys.rs +++ b/autonomi-cli/src/access/keys.rs @@ -6,9 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::wallet::load_wallet_private_key; use autonomi::client::registers::RegisterSecretKey; +use autonomi::client::vault::VaultSecretKey; use autonomi::{get_evm_network_from_env, Wallet}; -use color_eyre::eyre::{Context, Result}; +use color_eyre::eyre::{eyre, Context, Result}; use color_eyre::Section; use std::env; use std::fs; @@ -17,13 +19,12 @@ use std::path::PathBuf; const SECRET_KEY_ENV: &str = "SECRET_KEY"; const REGISTER_SIGNING_KEY_ENV: &str = "REGISTER_SIGNING_KEY"; -const SECRET_KEY_FILE: &str = "secret_key"; const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; /// EVM wallet -pub fn load_evm_wallet() -> Result { +pub fn load_evm_wallet_from_env() -> Result { let secret_key = - get_secret_key().wrap_err("The secret key is required to perform this action")?; + get_secret_key_from_env().wrap_err("The secret key is required to perform this action")?; let network = get_evm_network_from_env()?; let wallet = Wallet::new_from_private_key(network, &secret_key) .wrap_err("Failed to load EVM wallet from key")?; @@ -31,24 +32,16 @@ pub fn load_evm_wallet() -> Result { } /// EVM wallet private key -pub fn get_secret_key() -> Result { - // try env var first - let why_env_failed = match env::var(SECRET_KEY_ENV) { - Ok(key) => return Ok(key), - Err(e) => e, - }; - - // try from data dir - let dir = super::data_dir::get_client_data_dir_path() - .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) - .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var"))?; +pub fn get_secret_key_from_env() -> Result { + env::var(SECRET_KEY_ENV).wrap_err(eyre!( + "make sure you've provided the {SECRET_KEY_ENV} env var" + )) +} - // load the key from file - let key_path = dir.join(SECRET_KEY_FILE); - fs::read_to_string(&key_path) - .wrap_err("Failed to read secret key from file") - .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var or have the key in a file at {key_path:?}")) - .with_suggestion(|| "the secret key should be a hex encoded string of your evm wallet private key") +pub fn get_vault_secret_key() -> Result { + let secret_key = load_wallet_private_key()?; + autonomi::client::vault::derive_vault_key(&secret_key) + .wrap_err("Failed to derive vault secret key from EVM secret key") } pub fn create_register_signing_key_file(key: RegisterSecretKey) -> Result { diff --git a/autonomi-cli/src/access/mod.rs b/autonomi-cli/src/access/mod.rs index ac80eeca88..327dc6db51 100644 --- a/autonomi-cli/src/access/mod.rs +++ b/autonomi-cli/src/access/mod.rs @@ -9,3 +9,4 @@ pub mod data_dir; pub mod keys; pub mod network; +pub mod user_data; diff --git a/autonomi-cli/src/access/user_data.rs b/autonomi-cli/src/access/user_data.rs new file mode 100644 index 0000000000..57deb85785 --- /dev/null +++ b/autonomi-cli/src/access/user_data.rs @@ -0,0 +1,177 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::collections::HashMap; + +use autonomi::client::{ + address::{addr_to_str, str_to_addr}, + archive::ArchiveAddr, + archive_private::PrivateArchiveAccess, + registers::{RegisterAddress, RegisterSecretKey}, + vault::UserData, +}; +use color_eyre::eyre::Result; + +use super::{ + data_dir::get_client_data_dir_path, + keys::{create_register_signing_key_file, get_register_signing_key}, +}; + +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +struct PrivateFileArchive { + name: String, + secret_access: String, +} + +pub fn get_local_user_data() -> Result { + let register_sk = get_register_signing_key().map(|k| k.to_hex()).ok(); + let registers = get_local_registers()?; + let file_archives = get_local_public_file_archives()?; + let private_file_archives = get_local_private_file_archives()?; + + let user_data = UserData { + register_sk, + registers, + file_archives, + private_file_archives, + }; + Ok(user_data) +} + +pub fn get_local_private_file_archives() -> Result> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let private_file_archives_path = user_data_path.join("private_file_archives"); + std::fs::create_dir_all(&private_file_archives_path)?; + + let mut private_file_archives = HashMap::new(); + for entry in walkdir::WalkDir::new(private_file_archives_path) + .min_depth(1) + .max_depth(1) + { + let entry = entry?; + let file_content = std::fs::read_to_string(entry.path())?; + let private_file_archive: PrivateFileArchive = serde_json::from_str(&file_content)?; + let private_file_archive_access = + PrivateArchiveAccess::from_hex(&private_file_archive.secret_access)?; + private_file_archives.insert(private_file_archive_access, private_file_archive.name); + } + Ok(private_file_archives) +} + +pub fn get_local_private_archive_access(local_addr: &str) -> Result { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let private_file_archives_path = user_data_path.join("private_file_archives"); + let file_path = private_file_archives_path.join(local_addr); + let file_content = std::fs::read_to_string(file_path)?; + let private_file_archive: PrivateFileArchive = serde_json::from_str(&file_content)?; + let private_file_archive_access = + PrivateArchiveAccess::from_hex(&private_file_archive.secret_access)?; + Ok(private_file_archive_access) +} + +pub fn get_local_registers() -> Result> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let registers_path = user_data_path.join("registers"); + std::fs::create_dir_all(®isters_path)?; + + let mut registers = HashMap::new(); + for entry in walkdir::WalkDir::new(registers_path) + .min_depth(1) + .max_depth(1) + { + let entry = entry?; + let file_name = entry.file_name().to_string_lossy(); + let register_address = RegisterAddress::from_hex(&file_name)?; + let file_content = std::fs::read_to_string(entry.path())?; + let register_name = file_content; + registers.insert(register_address, register_name); + } + Ok(registers) +} + +pub fn get_local_public_file_archives() -> Result> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let file_archives_path = user_data_path.join("file_archives"); + std::fs::create_dir_all(&file_archives_path)?; + + let mut file_archives = HashMap::new(); + for entry in walkdir::WalkDir::new(file_archives_path) + .min_depth(1) + .max_depth(1) + { + let entry = entry?; + let file_name = entry.file_name().to_string_lossy(); + let file_archive_address = str_to_addr(&file_name)?; + let file_archive_name = std::fs::read_to_string(entry.path())?; + file_archives.insert(file_archive_address, file_archive_name); + } + Ok(file_archives) +} + +pub fn write_local_user_data(user_data: &UserData) -> Result<()> { + if let Some(register_key) = &user_data.register_sk { + let sk = RegisterSecretKey::from_hex(register_key)?; + create_register_signing_key_file(sk)?; + } + + for (register, name) in user_data.registers.iter() { + write_local_register(register, name)?; + } + + for (archive, name) in user_data.file_archives.iter() { + write_local_public_file_archive(addr_to_str(*archive), name)?; + } + + for (archive, name) in user_data.private_file_archives.iter() { + write_local_private_file_archive(archive.to_hex(), archive.address(), name)?; + } + + Ok(()) +} + +pub fn write_local_register(register: &RegisterAddress, name: &str) -> Result<()> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let registers_path = user_data_path.join("registers"); + std::fs::create_dir_all(®isters_path)?; + std::fs::write(registers_path.join(register.to_hex()), name)?; + Ok(()) +} + +pub fn write_local_public_file_archive(archive: String, name: &str) -> Result<()> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let file_archives_path = user_data_path.join("file_archives"); + std::fs::create_dir_all(&file_archives_path)?; + std::fs::write(file_archives_path.join(archive), name)?; + Ok(()) +} + +pub fn write_local_private_file_archive( + archive: String, + local_addr: String, + name: &str, +) -> Result<()> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let private_file_archives_path = user_data_path.join("private_file_archives"); + std::fs::create_dir_all(&private_file_archives_path)?; + let file_name = local_addr; + let content = serde_json::to_string(&PrivateFileArchive { + name: name.to_string(), + secret_access: archive, + })?; + std::fs::write(private_file_archives_path.join(file_name), content)?; + Ok(()) +} diff --git a/autonomi-cli/src/actions/connect.rs b/autonomi-cli/src/actions/connect.rs index 9eccb3bbfb..cfe971d14e 100644 --- a/autonomi-cli/src/actions/connect.rs +++ b/autonomi-cli/src/actions/connect.rs @@ -24,10 +24,12 @@ pub async fn connect_to_network(peers: Vec) -> Result { match Client::connect(&peers).await { Ok(client) => { + info!("Connected to the Network"); progress_bar.finish_with_message("Connected to the Network"); Ok(client) } Err(e) => { + error!("Failed to connect to the network: {e}"); progress_bar.finish_with_message("Failed to connect to the network"); bail!("Failed to connect to the network: {e}") } diff --git a/autonomi-cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs index ba004930e3..ff737ac2c1 100644 --- a/autonomi-cli/src/actions/download.rs +++ b/autonomi-cli/src/actions/download.rs @@ -7,22 +7,94 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::get_progress_bar; -use autonomi::{client::address::str_to_addr, Client}; -use color_eyre::eyre::{eyre, Context, Result}; +use autonomi::{ + client::{address::str_to_addr, archive::ArchiveAddr, archive_private::PrivateArchiveAccess}, + Client, +}; +use color_eyre::{ + eyre::{eyre, Context, Result}, + Section, +}; use std::path::PathBuf; pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { - let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; + let public_address = str_to_addr(addr).ok(); + let private_address = crate::user_data::get_local_private_archive_access(addr) + .inspect_err(|e| error!("Failed to get private archive access: {e}")) + .ok(); + + match (public_address, private_address) { + (Some(public_address), _) => download_public(addr, public_address, dest_path, client).await, + (_, Some(private_address)) => download_private(addr, private_address, dest_path, client).await, + _ => Err(eyre!("Failed to parse data address {addr}")) + .with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7") + .with_suggestion(|| "Private addresses look like this: 1358645341480028172") + .with_suggestion(|| "Try the `file list` command to get addresses you have access to"), + } +} + +async fn download_private( + addr: &str, + private_address: PrivateArchiveAccess, + dest_path: &str, + client: &mut Client, +) -> Result<()> { + let archive = client + .private_archive_get(private_address) + .await + .wrap_err("Failed to fetch data from address")?; + + let progress_bar = get_progress_bar(archive.iter().count() as u64)?; + let mut all_errs = vec![]; + for (path, access, _meta) in archive.iter() { + progress_bar.println(format!("Fetching file: {path:?}...")); + let bytes = match client.private_data_get(access.clone()).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file {path:?}: {e}"); + all_errs.push(err); + continue; + } + }; + + let path = PathBuf::from(dest_path).join(path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + progress_bar.clone().inc(1); + } + progress_bar.finish_and_clear(); + + if all_errs.is_empty() { + info!("Successfully downloaded private data with local address: {addr}"); + println!("Successfully downloaded private data with local address: {addr}"); + Ok(()) + } else { + let err_no = all_errs.len(); + eprintln!("{err_no} errors while downloading private data with local address: {addr}"); + eprintln!("{all_errs:#?}"); + error!("Errors while downloading private data with local address {addr}: {all_errs:#?}"); + Err(eyre!("Errors while downloading private data")) + } +} + +async fn download_public( + addr: &str, + address: ArchiveAddr, + dest_path: &str, + client: &mut Client, +) -> Result<()> { let archive = client .archive_get(address) .await .wrap_err("Failed to fetch data from address")?; - let progress_bar = get_progress_bar(archive.map.len() as u64)?; + let progress_bar = get_progress_bar(archive.iter().count() as u64)?; let mut all_errs = vec![]; - for (path, addr) in archive.map { + for (path, addr, _meta) in archive.iter() { progress_bar.println(format!("Fetching file: {path:?}...")); - let bytes = match client.data_get(addr).await { + let bytes = match client.data_get(*addr).await { Ok(bytes) => bytes, Err(e) => { let err = format!("Failed to fetch file {path:?}: {e}"); @@ -41,12 +113,14 @@ pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Resul progress_bar.finish_and_clear(); if all_errs.is_empty() { + info!("Successfully downloaded data at: {addr}"); println!("Successfully downloaded data at: {addr}"); Ok(()) } else { let err_no = all_errs.len(); eprintln!("{err_no} errors while downloading data at: {addr}"); eprintln!("{all_errs:#?}"); + error!("Errors while downloading data at {addr}: {all_errs:#?}"); Err(eyre!("Errors while downloading data")) } } diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index bb718df43a..663898b6ea 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -9,6 +9,7 @@ mod file; mod register; mod vault; +mod wallet; use clap::Subcommand; use color_eyre::Result; @@ -34,6 +35,12 @@ pub enum SubCmd { #[command(subcommand)] command: VaultCmd, }, + + /// Operations related to wallet management. + Wallet { + #[command(subcommand)] + command: WalletCmd, + }, } #[derive(Subcommand, Debug)] @@ -44,10 +51,13 @@ pub enum FileCmd { file: String, }, - /// Upload a file and pay for it. + /// Upload a file and pay for it. Data on the Network is private by default. Upload { /// The file to upload. file: String, + /// Upload the file as public. Everyone can see public data on the Network. + #[arg(short, long)] + public: bool, }, /// Download a file from the given address. @@ -123,10 +133,42 @@ pub enum VaultCmd { Cost, /// Create a vault at a deterministic address based on your `SECRET_KEY`. + /// Pushing an encrypted backup of your local user data to the network Create, + /// Load an existing vault from the network. + /// Use this when loading your user data to a new device. + /// You need to have your original `SECRET_KEY` to load the vault. + Load, + /// Sync vault with the network, including registers and files. - Sync, + /// Loads existing user data from the network and merges it with your local user data. + /// Pushes your local user data to the network. + Sync { + /// Force push your local user data to the network. + /// This will overwrite any existing data in your vault. + #[arg(short, long)] + force: bool, + }, +} + +#[derive(Subcommand, Debug)] +pub enum WalletCmd { + /// Create a wallet. + Create { + /// Optional flag to not add a password. + #[clap(long, action)] + no_password: bool, + /// Optional hex-encoded private key. + #[clap(long)] + private_key: Option, + /// Optional password to encrypt the wallet with. + #[clap(long, short)] + password: Option, + }, + + /// Check the balance of the wallet. + Balance, } pub async fn handle_subcommand(opt: Opt) -> Result<()> { @@ -136,11 +178,11 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { match cmd { SubCmd::File { command } => match command { FileCmd::Cost { file } => file::cost(&file, peers.await?).await, - FileCmd::Upload { file } => file::upload(&file, peers.await?).await, + FileCmd::Upload { file, public } => file::upload(&file, public, peers.await?).await, FileCmd::Download { addr, dest_file } => { file::download(&addr, &dest_file, peers.await?).await } - FileCmd::List => file::list(peers.await?), + FileCmd::List => file::list(), }, SubCmd::Register { command } => match command { RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite), @@ -156,12 +198,21 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { value, } => register::edit(address, name, &value, peers.await?).await, RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await, - RegisterCmd::List => register::list(peers.await?), + RegisterCmd::List => register::list(), }, SubCmd::Vault { command } => match command { - VaultCmd::Cost => vault::cost(peers.await?), - VaultCmd::Create => vault::create(peers.await?), - VaultCmd::Sync => vault::sync(peers.await?), + VaultCmd::Cost => vault::cost(peers.await?).await, + VaultCmd::Create => vault::create(peers.await?).await, + VaultCmd::Load => vault::load(peers.await?).await, + VaultCmd::Sync { force } => vault::sync(peers.await?, force).await, + }, + SubCmd::Wallet { command } => match command { + WalletCmd::Create { + no_password, + private_key, + password, + } => wallet::create(no_password, private_key, password), + WalletCmd::Balance => Ok(wallet::balance().await?), }, } } diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index d99a848214..6d3f051015 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -7,16 +7,19 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::utils::collect_upload_summary; +use crate::wallet::load_wallet; use autonomi::client::address::addr_to_str; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; +use color_eyre::Section; use std::path::PathBuf; pub async fn cost(file: &str, peers: Vec) -> Result<()> { let client = crate::actions::connect_to_network(peers).await?; println!("Getting upload cost..."); + info!("Calculating cost for file: {file}"); let cost = client .file_cost(&PathBuf::from(file)) .await @@ -24,42 +27,112 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { println!("Estimate cost to upload file: {file}"); println!("Total cost: {cost}"); + info!("Total cost: {cost} for file: {file}"); Ok(()) } -pub async fn upload(file: &str, peers: Vec) -> Result<()> { - let wallet = crate::keys::load_evm_wallet()?; + +pub async fn upload(file: &str, public: bool, peers: Vec) -> Result<()> { + let wallet = load_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); println!("Uploading data to network..."); + info!( + "Uploading {} file: {file}", + if public { "public" } else { "private" } + ); - let xor_name = client - .dir_upload(PathBuf::from(file), &wallet) - .await - .wrap_err("Failed to upload file")?; - let addr = addr_to_str(xor_name); - - println!("Successfully uploaded: {file}"); - println!("At address: {addr}"); - if let Ok(()) = upload_completed_tx.send(()) { - let summary = upload_summary_thread.await?; - if summary.record_count == 0 { - println!("All chunks already exist on the network"); - } else { - println!("Number of chunks uploaded: {}", summary.record_count); - println!("Total cost: {} AttoTokens", summary.tokens_spent); - } + let dir_path = PathBuf::from(file); + let name = dir_path + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or(file.to_string()); + + // upload dir + let local_addr; + let archive = if public { + let xor_name = client + .dir_upload(dir_path, &wallet) + .await + .wrap_err("Failed to upload file")?; + local_addr = addr_to_str(xor_name); + local_addr.clone() + } else { + let private_data_access = client + .private_dir_upload(dir_path, &wallet) + .await + .wrap_err("Failed to upload file")?; + local_addr = private_data_access.address(); + private_data_access.to_hex() + }; + + // wait for upload to complete + if let Err(e) = upload_completed_tx.send(()) { + error!("Failed to send upload completed event: {e:?}"); + eprintln!("Failed to send upload completed event: {e:?}"); } + // get summary + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("All chunks already exist on the network."); + } else { + println!("Successfully uploaded: {file}"); + println!("At address: {local_addr}"); + info!("Successfully uploaded: {file} at address: {local_addr}"); + println!("Number of chunks uploaded: {}", summary.record_count); + println!("Total cost: {} AttoTokens", summary.tokens_spent); + } + info!("Summary for upload of file {file} at {local_addr:?}: {summary:?}"); + + // save to local user data + let writer = if public { + crate::user_data::write_local_public_file_archive(archive, &name) + } else { + crate::user_data::write_local_private_file_archive(archive, local_addr, &name) + }; + writer + .wrap_err("Failed to save file to local user data") + .with_suggestion(|| "Local user data saves the file address above to disk, without it you need to keep track of the address yourself")?; + info!("Saved file to local user data"); + Ok(()) } + pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; crate::actions::download(addr, dest_path, &mut client).await } -pub fn list(_peers: Vec) -> Result<()> { - println!("The file list feature is coming soon!"); +pub fn list() -> Result<()> { + // get public file archives + println!("Retrieving local user data..."); + let file_archives = crate::user_data::get_local_public_file_archives() + .wrap_err("Failed to get local public file archives")?; + + println!( + "โœ… You have {} public file archive(s):", + file_archives.len() + ); + for (addr, name) in file_archives { + println!("{}: {}", name, addr_to_str(addr)); + } + + // get private file archives + println!(); + let private_file_archives = crate::user_data::get_local_private_file_archives() + .wrap_err("Failed to get local private file archives")?; + + println!( + "โœ… You have {} private file archive(s):", + private_file_archives.len() + ); + for (addr, name) in private_file_archives { + println!("{}: {}", name, addr.address()); + } + + println!(); + println!("> Note that private data addresses are not network addresses, they are only used for referring to private data client side."); Ok(()) } diff --git a/autonomi-cli/src/commands/register.rs b/autonomi-cli/src/commands/register.rs index d559e6cc55..0aad3ab844 100644 --- a/autonomi-cli/src/commands/register.rs +++ b/autonomi-cli/src/commands/register.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::utils::collect_upload_summary; +use crate::wallet::load_wallet; use autonomi::client::registers::RegisterAddress; use autonomi::client::registers::RegisterPermissions; use autonomi::client::registers::RegisterSecretKey; @@ -21,6 +22,7 @@ pub fn generate_key(overwrite: bool) -> Result<()> { // check if the key already exists let key_path = crate::keys::get_register_signing_key_path()?; if key_path.exists() && !overwrite { + error!("Register key already exists at: {key_path:?}"); return Err(eyre!("Register key already exists at: {}", key_path.display())) .with_suggestion(|| "if you want to overwrite the existing key, run the command with the --overwrite flag") .with_warning(|| "overwriting the existing key might result in loss of access to any existing registers created using that key"); @@ -30,6 +32,7 @@ pub fn generate_key(overwrite: bool) -> Result<()> { let key = RegisterSecretKey::random(); let path = crate::keys::create_register_signing_key_file(key) .wrap_err("Failed to create new register key")?; + info!("Created new register key at: {path:?}"); println!("โœ… Created new register key at: {}", path.display()); Ok(()) } @@ -43,12 +46,13 @@ pub async fn cost(name: &str, peers: Vec) -> Result<()> { .register_cost(name.to_string(), register_key) .await .wrap_err("Failed to get cost for register")?; + info!("Estimated cost to create a register with name {name}: {cost}"); println!("โœ… The estimated cost to create a register with name {name} is: {cost}"); Ok(()) } pub async fn create(name: &str, value: &str, public: bool, peers: Vec) -> Result<()> { - let wallet = crate::keys::load_evm_wallet()?; + let wallet = load_wallet()?; let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let mut client = crate::actions::connect_to_network(peers).await?; @@ -56,8 +60,10 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); println!("Creating register with name: {name}"); + info!("Creating register with name: {name}"); let register = if public { println!("With public write access"); + info!("With public write access"); let permissions = RegisterPermissions::new_anyone_can_write(); client .register_create_with_permissions( @@ -71,6 +77,7 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec .wrap_err("Failed to create register")? } else { println!("With private write access"); + info!("With private write access"); client .register_create( value.as_bytes().to_vec().into(), @@ -84,18 +91,28 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec let address = register.address(); - println!("โœ… Register created at address: {address}"); - println!("With name: {name}"); - println!("And initial value: [{value}]"); + if let Err(e) = upload_completed_tx.send(()) { + error!("Failed to send upload completed event: {e:?}"); + eprintln!("Failed to send upload completed event: {e:?}"); + } - if let Ok(()) = upload_completed_tx.send(()) { - let summary = upload_summary_thread.await?; - if summary.record_count == 0 { - println!("The register was already created on the network. No tokens were spent."); - } else { - println!("Total cost: {} AttoTokens", summary.tokens_spent); - } + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("โœ… The register already exists on the network at address: {address}."); + println!("No tokens were spent."); + } else { + println!("โœ… Register created at address: {address}"); + println!("With name: {name}"); + println!("And initial value: [{value}]"); + info!("Register created at address: {address} with name: {name}"); + println!("Total cost: {} AttoTokens", summary.tokens_spent); } + info!("Summary of register creation: {summary:?}"); + + crate::user_data::write_local_register(address, name) + .wrap_err("Failed to save register to local user data") + .with_suggestion(|| "Local user data saves the register address above to disk, without it you need to keep track of the address yourself")?; + info!("Saved register to local user data"); Ok(()) } @@ -116,13 +133,16 @@ pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<( }; println!("Getting register at address: {address}"); + info!("Getting register at address: {address}"); let register = client .register_get(address) .await @@ -157,6 +179,7 @@ pub async fn get(address: String, name: bool, peers: Vec) -> Result<( let values = register.values(); println!("โœ… Register found at address: {address}"); + info!("Register found at address: {address}"); match values.as_slice() { [one] => println!("With value: [{:?}]", String::from_utf8_lossy(one)), _ => { @@ -169,7 +192,12 @@ pub async fn get(address: String, name: bool, peers: Vec) -> Result<( Ok(()) } -pub fn list(_peers: Vec) -> Result<()> { - println!("The register feature is coming soon!"); +pub fn list() -> Result<()> { + println!("Retrieving local user data..."); + let registers = crate::user_data::get_local_registers()?; + println!("โœ… You have {} register(s):", registers.len()); + for (addr, name) in registers { + println!("{}: {}", name, addr.to_hex()); + } Ok(()) } diff --git a/autonomi-cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs index 9a8d708824..e7ce3f95c8 100644 --- a/autonomi-cli/src/commands/vault.rs +++ b/autonomi-cli/src/commands/vault.rs @@ -6,20 +6,108 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::wallet::load_wallet; use autonomi::Multiaddr; +use color_eyre::eyre::Context; use color_eyre::eyre::Result; +use color_eyre::Section; -pub fn cost(_peers: Vec) -> Result<()> { - println!("The vault feature is coming soon!"); +pub async fn cost(peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let vault_sk = crate::keys::get_vault_secret_key()?; + + println!("Getting cost to create a new vault..."); + let total_cost = client.vault_cost(&vault_sk).await?; + + if total_cost.is_zero() { + println!("Vault already exists, modifying an existing vault is free"); + } else { + println!("Cost to create a new vault: {total_cost} AttoTokens"); + } Ok(()) } -pub fn create(_peers: Vec) -> Result<()> { - println!("The vault feature is coming soon!"); +pub async fn create(peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let wallet = load_wallet()?; + let vault_sk = crate::keys::get_vault_secret_key()?; + + println!("Retrieving local user data..."); + let local_user_data = crate::user_data::get_local_user_data()?; + let file_archives_len = local_user_data.file_archives.len(); + let private_file_archives_len = local_user_data.private_file_archives.len(); + let registers_len = local_user_data.registers.len(); + + println!("Pushing to network vault..."); + let total_cost = client + .put_user_data_to_vault(&vault_sk, &wallet, local_user_data) + .await?; + + if total_cost.is_zero() { + println!("โœ… Successfully pushed user data to existing vault"); + } else { + println!("โœ… Successfully created new vault containing local user data"); + } + + println!("Total cost: {total_cost} AttoTokens"); + println!("Vault contains:"); + println!("{file_archives_len} public file archive(s)"); + println!("{private_file_archives_len} private file archive(s)"); + println!("{registers_len} register(s)"); Ok(()) } -pub fn sync(_peers: Vec) -> Result<()> { - println!("The vault feature is coming soon!"); +pub async fn sync(peers: Vec, force: bool) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let vault_sk = crate::keys::get_vault_secret_key()?; + let wallet = load_wallet()?; + + println!("Fetching vault from network..."); + let net_user_data = client + .get_user_data_from_vault(&vault_sk) + .await + .wrap_err("Failed to fetch vault from network") + .with_suggestion(|| "Make sure you have already created a vault on the network")?; + + if force { + println!("The force flag was provided, overwriting user data in the vault with local user data..."); + } else { + println!("Syncing vault with local user data..."); + crate::user_data::write_local_user_data(&net_user_data)?; + } + + println!("Pushing local user data to network vault..."); + let local_user_data = crate::user_data::get_local_user_data()?; + let file_archives_len = local_user_data.file_archives.len(); + let private_file_archives_len = local_user_data.private_file_archives.len(); + let registers_len = local_user_data.registers.len(); + client + .put_user_data_to_vault(&vault_sk, &wallet, local_user_data) + .await?; + + println!("โœ… Successfully synced vault"); + println!("Vault contains:"); + println!("{file_archives_len} public file archive(s)"); + println!("{private_file_archives_len} private file archive(s)"); + println!("{registers_len} register(s)"); + Ok(()) +} + +pub async fn load(peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let vault_sk = crate::keys::get_vault_secret_key()?; + + println!("Retrieving vault from network..."); + let user_data = client.get_user_data_from_vault(&vault_sk).await?; + println!("Writing user data to disk..."); + crate::user_data::write_local_user_data(&user_data)?; + + println!("โœ… Successfully loaded vault with:"); + println!("{} public file archive(s)", user_data.file_archives.len()); + println!( + "{} private file archive(s)", + user_data.private_file_archives.len() + ); + println!("{} register(s)", user_data.registers.len()); Ok(()) } diff --git a/autonomi-cli/src/commands/wallet.rs b/autonomi-cli/src/commands/wallet.rs new file mode 100644 index 0000000000..3b31a873b2 --- /dev/null +++ b/autonomi-cli/src/commands/wallet.rs @@ -0,0 +1,85 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::wallet::fs::{select_wallet, store_private_key}; +use crate::wallet::input::request_password; +use crate::wallet::DUMMY_NETWORK; +use autonomi::Wallet; +use color_eyre::eyre::eyre; +use color_eyre::Result; +use prettytable::{Cell, Row, Table}; + +const WALLET_PASSWORD_REQUIRED: bool = false; + +pub fn create( + no_password: bool, + private_key: Option, + password: Option, +) -> Result<()> { + if no_password && password.is_some() { + return Err(eyre!( + "Only one of `--no-password` or `--password` may be specified" + )); + } + + // Set a password for encryption or not + let encryption_password: Option = match (no_password, password) { + (true, _) => None, + (false, Some(pass)) => Some(pass.to_owned()), + (false, None) => request_password(WALLET_PASSWORD_REQUIRED), + }; + + let wallet_private_key = if let Some(private_key) = private_key { + // Validate imported key + Wallet::new_from_private_key(DUMMY_NETWORK, &private_key) + .map_err(|_| eyre!("Please provide a valid secret key in hex format"))?; + + private_key + } else { + // Create a new key + Wallet::random_private_key() + }; + + let wallet_address = Wallet::new_from_private_key(DUMMY_NETWORK, &wallet_private_key) + .expect("Infallible") + .address() + .to_string(); + + // Save the private key file + let file_path = store_private_key(&wallet_private_key, encryption_password)?; + + println!("Wallet address: {wallet_address}"); + println!("Stored wallet in: {file_path:?}"); + + Ok(()) +} + +pub async fn balance() -> Result<()> { + let wallet = select_wallet()?; + + let token_balance = wallet.balance_of_tokens().await?; + let gas_balance = wallet.balance_of_gas_tokens().await?; + + println!("Wallet balances: {}", wallet.address()); + + let mut table = Table::new(); + + table.add_row(Row::new(vec![ + Cell::new("Token Balance"), + Cell::new(&token_balance.to_string()), + ])); + + table.add_row(Row::new(vec![ + Cell::new("Gas Balance"), + Cell::new(&gas_balance.to_string()), + ])); + + table.printstd(); + + Ok(()) +} diff --git a/autonomi-cli/src/main.rs b/autonomi-cli/src/main.rs index de4cdcf4c4..f86d74f484 100644 --- a/autonomi-cli/src/main.rs +++ b/autonomi-cli/src/main.rs @@ -14,10 +14,12 @@ mod actions; mod commands; mod opt; mod utils; +mod wallet; pub use access::data_dir; pub use access::keys; pub use access::network; +pub use access::user_data; use clap::Parser; use color_eyre::Result; diff --git a/autonomi-cli/src/opt.rs b/autonomi-cli/src/opt.rs index 8f3fb20967..a49f6029b1 100644 --- a/autonomi-cli/src/opt.rs +++ b/autonomi-cli/src/opt.rs @@ -27,9 +27,9 @@ pub(crate) struct Opt { /// `data-dir` is the default value. /// /// The data directory location is platform specific: - /// - Linux: $HOME/.local/share/safe/client/logs - /// - macOS: $HOME/Library/Application Support/safe/client/logs - /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs + /// - Linux: $HOME/.local/share/safe/autonomi/logs + /// - macOS: $HOME/Library/Application Support/safe/autonomi/logs + /// - Windows: C:\Users\\AppData\Roaming\safe\autonomi\logs #[allow(rustdoc::invalid_html_tags)] #[clap(long, value_parser = LogOutputDest::parse_from_str, verbatim_doc_comment, default_value = "data-dir")] pub log_output_dest: LogOutputDest, diff --git a/autonomi-cli/src/wallet/encryption.rs b/autonomi-cli/src/wallet/encryption.rs new file mode 100644 index 0000000000..bc673574ce --- /dev/null +++ b/autonomi-cli/src/wallet/encryption.rs @@ -0,0 +1,171 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::wallet::error::Error; +use rand::Rng; +use ring::aead::{BoundKey, Nonce, NonceSequence}; +use ring::error::Unspecified; +use std::num::NonZeroU32; +use std::sync::LazyLock; + +const SALT_LENGTH: usize = 8; +const NONCE_LENGTH: usize = 12; + +/// Number of iterations for pbkdf2. +static ITERATIONS: LazyLock = + LazyLock::new(|| NonZeroU32::new(100_000).expect("Infallible")); + +struct NonceSeq([u8; 12]); + +impl NonceSequence for NonceSeq { + fn advance(&mut self) -> Result { + Nonce::try_assume_unique_for_key(&self.0) + } +} + +pub fn encrypt_private_key(private_key: &str, password: &str) -> Result { + // Generate a random salt + // Salt is used to ensure unique derived keys even for identical passwords + let mut salt = [0u8; SALT_LENGTH]; + rand::thread_rng().fill(&mut salt); + + // Generate a random nonce + // Nonce is used to ensure unique encryption outputs even for identical inputs + let mut nonce = [0u8; NONCE_LENGTH]; + rand::thread_rng().fill(&mut nonce); + + let mut key = [0; 32]; + + // Derive a key from the password using PBKDF2 with HMAC + // PBKDF2 is used for key derivation to mitigate brute-force attacks by making key derivation computationally expensive + // HMAC is used as the pseudorandom function for its security properties + ring::pbkdf2::derive( + ring::pbkdf2::PBKDF2_HMAC_SHA512, + *ITERATIONS, + &salt, + password.as_bytes(), + &mut key, + ); + + // Create an unbound key using CHACHA20_POLY1305 algorithm + // CHACHA20_POLY1305 is a fast and secure AEAD (Authenticated Encryption with Associated Data) algorithm + let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) + .map_err(|_| Error::FailedToEncryptKey(String::from("Could not create unbound key")))?; + + // Create a sealing key with the unbound key and nonce + let mut sealing_key = ring::aead::SealingKey::new(unbound_key, NonceSeq(nonce)); + let aad = ring::aead::Aad::from(&[]); + + // Convert the secret key to bytes + let private_key_bytes = String::from(private_key).into_bytes(); + let mut encrypted_private_key = private_key_bytes; + + // seal_in_place_append_tag encrypts the data and appends an authentication tag to ensure data integrity + sealing_key + .seal_in_place_append_tag(aad, &mut encrypted_private_key) + .map_err(|_| Error::FailedToEncryptKey(String::from("Could not seal sealing key")))?; + + let mut encrypted_data = Vec::new(); + encrypted_data.extend_from_slice(&salt); + encrypted_data.extend_from_slice(&nonce); + encrypted_data.extend_from_slice(&encrypted_private_key); + + // Return the encrypted secret key along with salt and nonce encoded as hex strings + Ok(hex::encode(encrypted_data)) +} + +pub fn decrypt_private_key(encrypted_data: &str, password: &str) -> Result { + let encrypted_data = hex::decode(encrypted_data) + .map_err(|_| Error::FailedToDecryptKey(String::from("Encrypted data is invalid")))?; + + let salt: [u8; SALT_LENGTH] = encrypted_data[..SALT_LENGTH] + .try_into() + .map_err(|_| Error::FailedToDecryptKey(String::from("Could not find salt")))?; + + let nonce: [u8; NONCE_LENGTH] = encrypted_data[SALT_LENGTH..SALT_LENGTH + NONCE_LENGTH] + .try_into() + .map_err(|_| Error::FailedToDecryptKey(String::from("Could not find nonce")))?; + + let encrypted_private_key = &encrypted_data[SALT_LENGTH + NONCE_LENGTH..]; + + let mut key = [0; 32]; + + // Reconstruct the key from salt and password + ring::pbkdf2::derive( + ring::pbkdf2::PBKDF2_HMAC_SHA512, + *ITERATIONS, + &salt, + password.as_bytes(), + &mut key, + ); + + // Create an unbound key from the previously reconstructed key + let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) + .map_err(|_| Error::FailedToDecryptKey(String::from("Could not create unbound key")))?; + + // Create an opening key using the unbound key and original nonce + let mut opening_key = ring::aead::OpeningKey::new(unbound_key, NonceSeq(nonce)); + let aad = ring::aead::Aad::from(&[]); + + let mut encrypted_private_key = encrypted_private_key.to_vec(); + + // Decrypt the encrypted secret key bytes + let decrypted_data = opening_key + .open_in_place(aad, &mut encrypted_private_key) + .map_err(|_| { + Error::FailedToDecryptKey(String::from( + "Could not open encrypted key, please check the password", + )) + })?; + + let mut private_key_bytes = [0u8; 66]; + private_key_bytes.copy_from_slice(&decrypted_data[0..66]); + + // Create secret key from decrypted byte + Ok(String::from_utf8(private_key_bytes.to_vec()).expect("not able to convert private key")) +} + +#[cfg(test)] +mod tests { + use super::*; + use autonomi::Wallet; + + #[test] + fn test_encrypt_decrypt_private_key() { + let key = Wallet::random_private_key(); + let password = "password123".to_string(); + + let encrypted_key = + encrypt_private_key(&key, &password).expect("Failed to encrypt the private key"); + + let decrypted_key = decrypt_private_key(&encrypted_key, &password) + .expect("Failed to decrypt the private key"); + + assert_eq!( + decrypted_key, key, + "Decrypted key does not match the original private key" + ); + } + + #[test] + fn test_wrong_password() { + let key = Wallet::random_private_key(); + let password = "password123".to_string(); + + let encrypted_key = + encrypt_private_key(&key, &password).expect("Failed to encrypt the private key"); + + let wrong_password = "password456".to_string(); + let result = decrypt_private_key(&encrypted_key, &wrong_password); + + assert!( + result.is_err(), + "Decryption should not succeed with a wrong password" + ); + } +} diff --git a/autonomi-cli/src/wallet/error.rs b/autonomi-cli/src/wallet/error.rs new file mode 100644 index 0000000000..b32455566d --- /dev/null +++ b/autonomi-cli/src/wallet/error.rs @@ -0,0 +1,31 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Private key is invalid")] + InvalidPrivateKey, + #[error("Private key file is invalid")] + InvalidPrivateKeyFile, + #[error("Failed to encrypt private key: {0}")] + FailedToEncryptKey(String), + #[error("Failed to decrypt private key: {0}")] + FailedToDecryptKey(String), + #[error("Failed to write private key to disk: {0}")] + FailedToStorePrivateKey(String), + #[error("Failed to find wallets folder")] + WalletsFolderNotFound, + #[error("Failed to create wallets folder")] + FailedToCreateWalletsFolder, + #[error("Could not find private key file")] + PrivateKeyFileNotFound, + #[error("No wallets found. Create one using `wallet create`")] + NoWalletsFound, + #[error("Invalid wallet selection input")] + InvalidSelection, +} diff --git a/autonomi-cli/src/wallet/fs.rs b/autonomi-cli/src/wallet/fs.rs new file mode 100644 index 0000000000..a467961016 --- /dev/null +++ b/autonomi-cli/src/wallet/fs.rs @@ -0,0 +1,202 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::wallet::encryption::{decrypt_private_key, encrypt_private_key}; +use crate::wallet::error::Error; +use crate::wallet::input::{get_password_input, get_wallet_selection_input}; +use crate::wallet::DUMMY_NETWORK; +use autonomi::{get_evm_network_from_env, RewardsAddress, Wallet}; +use const_hex::traits::FromHex; +use prettytable::{Cell, Row, Table}; +use std::ffi::OsString; +use std::io::Read; +use std::path::PathBuf; +use std::sync::OnceLock; + +const ENCRYPTED_PRIVATE_KEY_EXT: &str = ".encrypted"; + +pub static SELECTED_WALLET_ADDRESS: OnceLock = OnceLock::new(); + +/// Creates the wallets folder if it is missing and returns the folder path. +pub(crate) fn get_client_wallet_dir_path() -> Result { + let mut home_dirs = dirs_next::data_dir().ok_or(Error::WalletsFolderNotFound)?; + home_dirs.push("safe"); + home_dirs.push("autonomi"); + home_dirs.push("wallets"); + + std::fs::create_dir_all(home_dirs.as_path()).map_err(|_| Error::FailedToCreateWalletsFolder)?; + + Ok(home_dirs) +} + +/// Writes the private key (hex-encoded) to disk. +/// +/// When a password is set, the private key file will be encrypted. +pub(crate) fn store_private_key( + private_key: &str, + encryption_password: Option, +) -> Result { + let wallet = Wallet::new_from_private_key(DUMMY_NETWORK, private_key) + .map_err(|_| Error::InvalidPrivateKey)?; + + // Wallet address + let wallet_address = wallet.address().to_string(); + let wallets_folder = get_client_wallet_dir_path()?; + + // If `encryption_password` is provided, the private key will be encrypted with the password. + // Else it will be saved as plain text. + if let Some(password) = encryption_password.as_ref() { + let encrypted_key = encrypt_private_key(private_key, password)?; + let file_name = format!("{wallet_address}{ENCRYPTED_PRIVATE_KEY_EXT}"); + let file_path = wallets_folder.join(file_name); + + std::fs::write(file_path.clone(), encrypted_key) + .map_err(|err| Error::FailedToStorePrivateKey(err.to_string()))?; + + Ok(file_path.into_os_string()) + } else { + let file_path = wallets_folder.join(wallet_address); + + std::fs::write(file_path.clone(), private_key) + .map_err(|err| Error::FailedToStorePrivateKey(err.to_string()))?; + + Ok(file_path.into_os_string()) + } +} + +/// Loads the private key (hex-encoded) from disk. +/// +/// If the private key file is encrypted, the function will prompt for the decryption password in the CLI. +pub(crate) fn load_private_key(wallet_address: &str) -> Result { + let wallets_folder = get_client_wallet_dir_path()?; + + let mut file_name = wallet_address.to_string(); + + // Check if a file with the encrypted extension exists + let encrypted_file_path = + wallets_folder.join(format!("{wallet_address}{ENCRYPTED_PRIVATE_KEY_EXT}")); + + let is_encrypted = encrypted_file_path.exists(); + + if is_encrypted { + file_name.push_str(ENCRYPTED_PRIVATE_KEY_EXT); + } + + let file_path = wallets_folder.join(file_name); + + let mut file = std::fs::File::open(&file_path).map_err(|_| Error::PrivateKeyFileNotFound)?; + + let mut buffer = String::new(); + file.read_to_string(&mut buffer) + .map_err(|_| Error::InvalidPrivateKeyFile)?; + + // If the file is encrypted, prompt for the password and decrypt the key. + if is_encrypted { + let password = get_password_input("Enter password to decrypt wallet:"); + + decrypt_private_key(&buffer, &password) + } else { + Ok(buffer) + } +} + +pub(crate) fn load_wallet_from_address(wallet_address: &str) -> Result { + let network = get_evm_network_from_env().expect("Could not load EVM network from environment"); + let private_key = load_private_key(wallet_address)?; + let wallet = + Wallet::new_from_private_key(network, &private_key).expect("Could not initialize wallet"); + Ok(wallet) +} + +pub(crate) fn select_wallet() -> Result { + let wallet_address = select_wallet_address()?; + load_wallet_from_address(&wallet_address) +} + +pub(crate) fn select_wallet_private_key() -> Result { + let wallet_address = select_wallet_address()?; + load_private_key(&wallet_address) +} + +pub(crate) fn select_wallet_address() -> Result { + // Try if a wallet address was already selected this session + if let Some(wallet_address) = SELECTED_WALLET_ADDRESS.get() { + return Ok(wallet_address.clone()); + } + + let wallets_folder = get_client_wallet_dir_path()?; + let wallet_files = get_wallet_files(&wallets_folder)?; + + let wallet_address = match wallet_files.len() { + 0 => Err(Error::NoWalletsFound), + 1 => Ok(filter_wallet_file_extension(&wallet_files[0])), + _ => get_wallet_selection(wallet_files), + }?; + + Ok(SELECTED_WALLET_ADDRESS + .get_or_init(|| wallet_address) + .to_string()) +} + +fn get_wallet_selection(wallet_files: Vec) -> Result { + list_wallets(&wallet_files); + + let selected_index = get_wallet_selection_input("Select by index:") + .parse::() + .map_err(|_| Error::InvalidSelection)?; + + if selected_index < 1 || selected_index > wallet_files.len() { + return Err(Error::InvalidSelection); + } + + Ok(filter_wallet_file_extension( + &wallet_files[selected_index - 1], + )) +} + +fn list_wallets(wallet_files: &[String]) { + println!("Wallets:"); + + let mut table = Table::new(); + + table.add_row(Row::new(vec![ + Cell::new("Index"), + Cell::new("Address"), + Cell::new("Encrypted"), + ])); + + for (index, wallet_file) in wallet_files.iter().enumerate() { + let encrypted = wallet_file.contains(ENCRYPTED_PRIVATE_KEY_EXT); + + table.add_row(Row::new(vec![ + Cell::new(&(index + 1).to_string()), + Cell::new(&filter_wallet_file_extension(wallet_file)), + Cell::new(&encrypted.to_string()), + ])); + } + + table.printstd(); +} + +fn get_wallet_files(wallets_folder: &PathBuf) -> Result, Error> { + let wallet_files = std::fs::read_dir(wallets_folder) + .map_err(|_| Error::WalletsFolderNotFound)? + .filter_map(Result::ok) + .filter_map(|dir_entry| dir_entry.file_name().into_string().ok()) + .filter(|file_name| { + let cleaned_file_name = filter_wallet_file_extension(file_name); + RewardsAddress::from_hex(cleaned_file_name).is_ok() + }) + .collect::>(); + + Ok(wallet_files) +} + +fn filter_wallet_file_extension(wallet_file: &str) -> String { + wallet_file.replace(ENCRYPTED_PRIVATE_KEY_EXT, "") +} diff --git a/autonomi-cli/src/wallet/input.rs b/autonomi-cli/src/wallet/input.rs new file mode 100644 index 0000000000..94e3223cd8 --- /dev/null +++ b/autonomi-cli/src/wallet/input.rs @@ -0,0 +1,68 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub(crate) fn get_wallet_selection_input(prompt: &str) -> String { + println!("{prompt}"); + + let mut buffer = String::new(); + let stdin = std::io::stdin(); + + if stdin.read_line(&mut buffer).is_err() { + // consider if error should process::exit(1) here + return "".to_string(); + }; + + // Remove leading and trailing whitespace + buffer.trim().to_owned() +} + +pub(crate) fn get_password_input(prompt: &str) -> String { + rpassword::prompt_password(prompt) + .map(|str| str.trim().into()) + .unwrap_or_default() +} + +pub(crate) fn confirm_password(password: &str) -> bool { + const MAX_RETRIES: u8 = 2; + + for _ in 0..MAX_RETRIES { + if get_password_input("Repeat password: ") == password { + return true; + } + println!("Passwords do not match."); + } + + false +} + +pub(crate) fn request_password(required: bool) -> Option { + let prompt = if required { + "Enter password: " + } else { + "Enter password (leave empty for none): " + }; + + loop { + let password = get_password_input(prompt); + + if password.is_empty() { + if required { + println!("Password is required."); + continue; + } + + return None; + } + + if confirm_password(&password) { + return Some(password); + } + + println!("Please set a new password."); + } +} diff --git a/autonomi-cli/src/wallet/mod.rs b/autonomi-cli/src/wallet/mod.rs new file mode 100644 index 0000000000..b0dddfb889 --- /dev/null +++ b/autonomi-cli/src/wallet/mod.rs @@ -0,0 +1,42 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::keys::{get_secret_key_from_env, load_evm_wallet_from_env}; +use crate::wallet::fs::{select_wallet, select_wallet_private_key}; +use autonomi::{EvmNetwork, Wallet}; + +pub(crate) mod encryption; +pub(crate) mod error; +pub(crate) mod fs; +pub(crate) mod input; + +pub const DUMMY_NETWORK: EvmNetwork = EvmNetwork::ArbitrumSepolia; + +/// Load wallet from ENV or disk +pub(crate) fn load_wallet() -> color_eyre::Result { + // First try wallet from ENV + if let Ok(wallet) = load_evm_wallet_from_env() { + return Ok(wallet); + } + + let wallet = select_wallet()?; + + Ok(wallet) +} + +/// Load wallet private key from ENV or disk +pub(crate) fn load_wallet_private_key() -> color_eyre::Result { + // First try wallet private key from ENV + if let Ok(private_key) = get_secret_key_from_env() { + return Ok(private_key); + } + + let private_key = select_wallet_private_key()?; + + Ok(private_key) +} diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index b44ca2233c..6f5491a4f3 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.2.0" +version = "0.2.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -13,14 +13,15 @@ repository = "https://github.com/maidsafe/safe_network" crate-type = ["cdylib", "rlib"] [features] -default = ["data"] +default = ["data", "vault"] full = ["data", "registers", "vault"] data = [] -vault = ["data"] +vault = ["data", "registers"] fs = ["tokio/fs", "data"] local = ["sn_networking/local", "sn_evm/local"] registers = ["data"] loud = [] +external-signer = ["sn_evm/external-signer", "data"] [dependencies] bip39 = "2.0.0" @@ -37,11 +38,11 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.19.0" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_protocol = { version = "0.17.12", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_networking = { path = "../sn_networking", version = "0.19.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_protocol = { version = "0.17.13", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -51,12 +52,16 @@ futures = "0.3.30" wasm-bindgen = "0.2.93" wasm-bindgen-futures = "0.4.43" serde-wasm-bindgen = "0.6.5" +sha2 = "0.10.6" +blst = "0.3.13" +blstrs = "0.7.1" [dev-dependencies] +alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } @@ -66,12 +71,13 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1.1", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.2", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-web = "0.1.3" +xor_name = { version = "5.0.0", features = ["serialize-hex"] } [lints] workspace = true diff --git a/autonomi/README.md b/autonomi/README.md index 3b27c6b0f0..5b95af38e4 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -26,7 +26,7 @@ autonomi = { path = "../autonomi", version = "0.1.0" } cargo run --bin evm_testnet ``` -3. Run a local network with the `local` feature and use the local evm node. +3. Run a local network with the `local` feature and use the local evm node. ```sh cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-local @@ -35,9 +35,9 @@ cargo run --bin=safenode-manager --features=local -- local run --build --clean - 4. Then run the tests with the `local` feature and pass the EVM params again: ```sh -$ EVM_NETWORK=local cargo test --package=autonomi --features=local +EVM_NETWORK=local cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture +RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture ``` ### Using a live testnet or mainnet @@ -55,25 +55,64 @@ cargo run --bin=safenode-manager --features=local -- local run --build --clean - payment tokens on the network (in this case Arbitrum One): ```sh -$ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local +EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture +RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture ``` ### WebAssembly To run a WASM test + - Install `wasm-pack` -- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you have `rustup`: `rustup target add wasm32-unknown-unknown`.) -- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, e.g. `/ip4//tcp//ws/p2p/`. +- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you + have `rustup`: `rustup target add wasm32-unknown-unknown`.) +- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, + e.g. `/ip4//tcp//ws/p2p/`. - As well as the other environment variables needed for EVM payments (e.g. `RPC_URL`). - Optionally specify the specific test, e.g. `-- put` to run `put()` in `wasm.rs` only. Example: -````sh + +```sh SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --firefox autonomi --features=data,files --test wasm -- put ``` +#### Test from JS in the browser + +`wasm-pack test` does not execute JavaScript, but runs mostly WebAssembly. Again make sure the environment variables are +set and build the JS package: + +```sh +wasm-pack build --dev --target=web autonomi --features=vault +``` + +Then cd into `autonomi/tests-js`, and use `npm` to install and serve the test html file. + +``` +cd autonomi/tests-js +npm install +npm run serve +``` + +Then go to `http://127.0.0.1:8080/tests-js` in the browser. Here, enter a `ws` multiaddr of a local node and press ' +run'. + +#### MetaMask example + +There is a MetaMask example for doing a simple put operation. + +Build the package with the `external-signer` feature (and again with the env variables) and run a webserver, e.g. with +Python: + +```sh +wasm-pack build --dev --target=web autonomi --features=external-signer +python -m http.server --directory=autonomi 8000 +``` + +Then visit `http://127.0.0.1:8000/examples/metamask` in your (modern) browser. + +Here, enter a `ws` multiaddr of a local node and press 'run'. ## Faucet (local) diff --git a/autonomi/WASM_docs.md b/autonomi/WASM_docs.md index 995809b8bd..6cf080113f 100644 --- a/autonomi/WASM_docs.md +++ b/autonomi/WASM_docs.md @@ -1,160 +1,24 @@ -## JavaScript Autonomi API Documentation +# JavaScript Autonomi API Documentation Note that this is a first version and will be subject to change. -### **Client** +The entry point for connecting to the network is {@link Client.connect}. -The `Client` object allows interaction with the network to store and retrieve data. Below are the available methods for the `Client` class. +This API is a wrapper around the Rust API, found here: https://docs.rs/autonomi/latest/autonomi. The Rust API contains more detailed documentation on concepts and some types. -#### **Constructor** +## Addresses -```javascript -let client = await new Client([multiaddress]); -``` - -- **multiaddress** (Array of Strings): A list of network addresses for the client to connect to. - -Example: -```javascript -let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); -``` - -#### **Methods** - -##### **put(data, wallet)** - -Uploads a piece of encrypted data to the network. - -```javascript -let result = await client.put(data, wallet); -``` - -- **data** (Uint8Array): The data to be stored. -- **wallet** (Wallet): The wallet used to pay for the storage. - -Returns: -- **result** (XorName): The XOR address of the stored data. - -Example: -```javascript -let wallet = getFundedWallet(); -let data = new Uint8Array([1, 2, 3]); -let result = await client.put(data, wallet); -``` - -##### **get(data_map_addr)** - -Fetches encrypted data from the network using its XOR address. - -```javascript -let data = await client.get(data_map_addr); -``` - -- **data_map_addr** (XorName): The XOR address of the data to fetch. - -Returns: -- **data** (Uint8Array): The fetched data. - -Example: -```javascript -let data = await client.get(result); -``` - -##### **cost(data)** - -Gets the cost of storing the provided data on the network. - -```javascript -let cost = await client.cost(data); -``` - -- **data** (Uint8Array): The data whose storage cost you want to calculate. - -Returns: -- **cost** (AttoTokens): The calculated cost for storing the data. - -Example: -```javascript -let cost = await client.cost(new Uint8Array([1, 2, 3])); -``` - ---- - -### **Wallet** - -The `Wallet` object represents an Ethereum wallet used for data payments. - -#### **Methods** - -##### **new_from_private_key(network, private_key)** - -Creates a new wallet using the given private key. - -```javascript -let wallet = Wallet.new_from_private_key(network, private_key); -``` +For addresses (chunk, data, archives, etc) we're using hex-encoded strings containing a 256-bit XOR addresse. For example: `abcdefg012345678900000000000000000000000000000000000000000000000`. -- **network** (EvmNetwork): The network to which the wallet connects. -- **private_key** (String): The private key of the wallet. - -Returns: -- **wallet** (Wallet): The created wallet. - -Example: -```javascript -let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); -``` - -##### **address()** - -Gets the walletโ€™s address. +## Example ```javascript -let address = wallet.address(); -``` - -Returns: -- **address** (Address): The wallet's address. +import init, { Client, Wallet, getEvmNetwork } from 'autonomi'; -Example: -```javascript -let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); -let address = wallet.address(); -``` - ---- - -### **EvmNetwork** - -The `EvmNetwork` object represents the blockchain network. - -#### **Methods** - -##### **default()** - -Connects to the default network. - -```javascript -let network = EvmNetwork.default(); -``` - -Returns: -- **network** (EvmNetwork): The default network. - -Example: -```javascript -let network = EvmNetwork.default(); -``` - ---- - -### Example Usage: - -```javascript let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); console.log("connected"); -let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +let wallet = Wallet.new_from_private_key(getEvmNetwork, "your_private_key_here"); console.log("wallet retrieved"); let data = new Uint8Array([1, 2, 3]); @@ -164,7 +28,3 @@ console.log("Data stored at:", result); let fetchedData = await client.get(result); console.log("Data retrieved:", fetchedData); ``` - ---- - -This documentation covers the basic usage of `Client`, `Wallet`, and `EvmNetwork` types in the JavaScript API. \ No newline at end of file diff --git a/autonomi/examples/metamask/index.html b/autonomi/examples/metamask/index.html new file mode 100644 index 0000000000..50844bd7f9 --- /dev/null +++ b/autonomi/examples/metamask/index.html @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/autonomi/examples/metamask/index.js b/autonomi/examples/metamask/index.js new file mode 100644 index 0000000000..633eb36317 --- /dev/null +++ b/autonomi/examples/metamask/index.js @@ -0,0 +1,149 @@ +import init, * as autonomi from '../../pkg/autonomi.js'; + +export async function externalSignerPut(peerAddr) { + try { + // Check if MetaMask (window.ethereum) is available + if (typeof window.ethereum === 'undefined') { + throw new Error('MetaMask is not installed'); + } + + // Request account access from MetaMask + const accounts = await window.ethereum.request({method: 'eth_requestAccounts'}); + const sender = accounts[0]; // Get the first account + + // Setup API client + await init(); + + autonomi.logInit("autonomi=trace"); + + const client = await autonomi.Client.connect([peerAddr]); + + // Generate 1MB of random bytes in a Uint8Array + const data = new Uint8Array(1024 * 1024).map(() => Math.floor(Math.random() * 256)); + + // Get quotes and payment information (this would need actual implementation) + const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); + + // Get the EVM network + let evmNetwork = autonomi.getEvmNetwork(); + + // Form quotes payment calldata + const payForQuotesCalldata = autonomi.getPayForQuotesCalldata( + evmNetwork, + quotePayments + ); + + // Form approve to spend tokens calldata + const approveCalldata = autonomi.getApproveToSpendTokensCalldata( + evmNetwork, + payForQuotesCalldata.approve_spender, + payForQuotesCalldata.approve_amount + ); + + console.log("Sending approve transaction.."); + + // Approve to spend tokens + let txHash = await sendTransaction({ + from: sender, + to: approveCalldata[1], + data: approveCalldata[0] + }); + + await waitForTransactionConfirmation(txHash); + + let payments = {}; + + // Execute batched quote payment transactions + for (const [calldata, quoteHashes] of payForQuotesCalldata.batched_calldata_map) { + console.log("Sending batched data payment transaction.."); + + let txHash = await sendTransaction({ + from: sender, + to: payForQuotesCalldata.to, + data: calldata + }); + + await waitForTransactionConfirmation(txHash); + + // Record the transaction hashes for each quote + quoteHashes.forEach(quoteHash => { + payments[quoteHash] = txHash; + }); + } + + // Generate payment proof + const proof = autonomi.getPaymentProofFromQuotesAndPayments(quotes, payments); + + // Submit the data with proof of payment + const addr = await client.dataPutWithProof(data, proof); + + // Wait for a few seconds to allow data to propagate + await new Promise(resolve => setTimeout(resolve, 10000)); + + // Fetch the data back + const fetchedData = await client.dataGet(addr); + + if (fetchedData.toString() === data.toString()) { + console.log("Fetched data matches the original data!"); + } else { + throw new Error("Fetched data does not match original data!") + } + + console.log("Data successfully put and verified!"); + + } catch (error) { + console.error("An error occurred:", error); + } +} + +// Helper function to send a transaction through MetaMask using Ethereum JSON-RPC +async function sendTransaction({from, to, data}) { + const transactionParams = { + from: from, // Sender address + to: to, // Destination address + data: data, // Calldata (transaction input) + }; + + try { + // Send the transaction via MetaMask and get the transaction hash + const txHash = await window.ethereum.request({ + method: 'eth_sendTransaction', + params: [transactionParams] + }); + + console.log(`Transaction sent with hash: ${txHash}`); + return txHash; // Return the transaction hash + + } catch (error) { + console.error("Failed to send transaction:", error); + throw error; + } +} + +async function waitForTransactionConfirmation(txHash) { + const delay = (ms) => new Promise(resolve => setTimeout(resolve, ms)); + + // Poll for the transaction receipt + while (true) { + // Query the transaction receipt + const receipt = await window.ethereum.request({ + method: 'eth_getTransactionReceipt', + params: [txHash], + }); + + // If the receipt is found, the transaction has been mined + if (receipt !== null) { + // Check if the transaction was successful (status is '0x1') + if (receipt.status === '0x1') { + console.log('Transaction successful!', receipt); + return receipt; // Return the transaction receipt + } else { + console.log('Transaction failed!', receipt); + throw new Error('Transaction failed'); + } + } + + // Wait for 1 second before checking again + await delay(1000); + } +} \ No newline at end of file diff --git a/autonomi/index.html b/autonomi/index.html deleted file mode 100644 index bd806016ca..0000000000 --- a/autonomi/index.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index d3cf9714ec..04ad120b19 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -6,29 +6,136 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use sn_networking::target_arch::{Duration, SystemTime, UNIX_EPOCH}; use super::{ - data::DataAddr, - data::{GetError, PutError}, + data::{CostError, DataAddr, GetError, PutError}, Client, }; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use sn_evm::EvmWallet; +use sn_evm::{AttoTokens, EvmWallet}; use xor_name::XorName; /// The address of an archive on the network. Points to an [`Archive`]. pub type ArchiveAddr = XorName; +use thiserror::Error; + +#[derive(Error, Debug, PartialEq, Eq)] +pub enum RenameError { + #[error("File not found in archive: {0}")] + FileNotFound(PathBuf), +} + /// An archive of files that containing file paths, their metadata and the files data addresses /// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. -#[derive(Debug, Clone, Serialize, Deserialize)] +/// Archives are public meaning anyone can read the data in the archive. For private archives use [`crate::client::archive_private::PrivateArchive`]. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct Archive { - pub map: HashMap, + map: HashMap, +} + +/// Metadata for a file in an archive. Time values are UNIX timestamps. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct Metadata { + /// When the file was (last) uploaded to the network. + pub uploaded: u64, + /// File creation time on local file system. See [`std::fs::Metadata::created`] for details per OS. + pub created: u64, + /// Last file modification time taken from local file system. See [`std::fs::Metadata::modified`] for details per OS. + pub modified: u64, +} + +impl Metadata { + /// Create a new metadata struct + pub fn new() -> Self { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + Self { + uploaded: now, + created: now, + modified: now, + } + } +} + +impl Default for Metadata { + fn default() -> Self { + Self::new() + } } impl Archive { + /// Create a new emtpy local archive + /// Note that this does not upload the archive to the network + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Rename a file in an archive + /// Note that this does not upload the archive to the network + pub fn rename_file(&mut self, old_path: &Path, new_path: &Path) -> Result<(), RenameError> { + let (data_addr, mut meta) = self + .map + .remove(old_path) + .ok_or(RenameError::FileNotFound(old_path.to_path_buf()))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + meta.modified = now; + self.map.insert(new_path.to_path_buf(), (data_addr, meta)); + Ok(()) + } + + /// Add a file to a local archive + /// Note that this does not upload the archive to the network + pub fn add_file(&mut self, path: PathBuf, data_addr: DataAddr, meta: Metadata) { + self.map.insert(path, (data_addr, meta)); + } + + /// Add a file to a local archive, with default metadata + /// Note that this does not upload the archive to the network + pub fn add_new_file(&mut self, path: PathBuf, data_addr: DataAddr) { + self.map.insert(path, (data_addr, Metadata::new())); + } + + /// List all files in the archive + pub fn files(&self) -> Vec<(PathBuf, Metadata)> { + self.map + .iter() + .map(|(path, (_, meta))| (path.clone(), meta.clone())) + .collect() + } + + /// List all data addresses of the files in the archive + pub fn addresses(&self) -> Vec { + self.map.values().map(|(addr, _)| *addr).collect() + } + + /// Iterate over the archive items + /// Returns an iterator over (PathBuf, DataAddr, Metadata) + pub fn iter(&self) -> impl Iterator { + self.map + .iter() + .map(|(path, (addr, meta))| (path, addr, meta)) + } + + /// Get the underlying map + pub fn map(&self) -> &HashMap { + &self.map + } + /// Deserialize from bytes. pub fn from_bytes(data: Bytes) -> Result { let root: Archive = rmp_serde::from_slice(&data[..])?; @@ -63,4 +170,12 @@ impl Client { .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; self.data_put(bytes, wallet).await } + + /// Get the cost to upload an archive + pub async fn archive_cost(&self, archive: Archive) -> Result { + let bytes = archive + .into_bytes() + .map_err(|e| CostError::Serialization(format!("Failed to serialize archive: {e:?}")))?; + self.data_cost(bytes).await + } } diff --git a/autonomi/src/client/archive_private.rs b/autonomi/src/client/archive_private.rs new file mode 100644 index 0000000000..a7ba854380 --- /dev/null +++ b/autonomi/src/client/archive_private.rs @@ -0,0 +1,140 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use sn_networking::target_arch::{Duration, SystemTime, UNIX_EPOCH}; + +use super::{ + archive::{Metadata, RenameError}, + data::{GetError, PutError}, + data_private::PrivateDataAccess, + Client, +}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_evm::EvmWallet; + +/// The address of a private archive +/// Contains the [`PrivateDataAccess`] leading to the [`PrivateArchive`] data +pub type PrivateArchiveAccess = PrivateDataAccess; + +/// A private archive of files that containing file paths, their metadata and the files data maps +/// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] +pub struct PrivateArchive { + map: HashMap, +} + +impl PrivateArchive { + /// Create a new emtpy local archive + /// Note that this does not upload the archive to the network + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Rename a file in an archive + /// Note that this does not upload the archive to the network + pub fn rename_file(&mut self, old_path: &Path, new_path: &Path) -> Result<(), RenameError> { + let (data_addr, mut meta) = self + .map + .remove(old_path) + .ok_or(RenameError::FileNotFound(old_path.to_path_buf()))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + meta.modified = now; + self.map.insert(new_path.to_path_buf(), (data_addr, meta)); + Ok(()) + } + + /// Add a file to a local archive + /// Note that this does not upload the archive to the network + pub fn add_file(&mut self, path: PathBuf, data_map: PrivateDataAccess, meta: Metadata) { + self.map.insert(path, (data_map, meta)); + } + + /// Add a file to a local archive, with default metadata + /// Note that this does not upload the archive to the network + pub fn add_new_file(&mut self, path: PathBuf, data_map: PrivateDataAccess) { + self.map.insert(path, (data_map, Metadata::new())); + } + + /// List all files in the archive + pub fn files(&self) -> Vec<(PathBuf, Metadata)> { + self.map + .iter() + .map(|(path, (_, meta))| (path.clone(), meta.clone())) + .collect() + } + + /// List all data addresses of the files in the archive + pub fn addresses(&self) -> Vec { + self.map + .values() + .map(|(data_map, _)| data_map.clone()) + .collect() + } + + /// Iterate over the archive items + /// Returns an iterator over (PathBuf, SecretDataMap, Metadata) + pub fn iter(&self) -> impl Iterator { + self.map + .iter() + .map(|(path, (data_map, meta))| (path, data_map, meta)) + } + + /// Get the underlying map + pub fn map(&self) -> &HashMap { + &self.map + } + + /// Deserialize from bytes. + pub fn from_bytes(data: Bytes) -> Result { + let root: PrivateArchive = rmp_serde::from_slice(&data[..])?; + + Ok(root) + } + + /// Serialize to bytes. + pub fn into_bytes(&self) -> Result { + let root_serialized = rmp_serde::to_vec(&self)?; + let root_serialized = Bytes::from(root_serialized); + + Ok(root_serialized) + } +} + +impl Client { + /// Fetch a private archive from the network + pub async fn private_archive_get( + &self, + addr: PrivateArchiveAccess, + ) -> Result { + let data = self.private_data_get(addr).await?; + Ok(PrivateArchive::from_bytes(data)?) + } + + /// Upload a private archive to the network + pub async fn private_archive_put( + &self, + archive: PrivateArchive, + wallet: &EvmWallet, + ) -> Result { + let bytes = archive + .into_bytes() + .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; + self.private_data_put(bytes, wallet).await + } +} diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 055016f291..0a6be8598a 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -8,11 +8,12 @@ use bytes::Bytes; use libp2p::kad::Quorum; -use tokio::task::JoinError; use std::collections::HashSet; +use std::sync::LazyLock; use xor_name::XorName; +use crate::client::utils::process_tasks_with_max_concurrency; use crate::client::{ClientEvent, UploadSummary}; use crate::{self_encryption::encrypt, Client}; use sn_evm::{Amount, AttoTokens}; @@ -23,6 +24,22 @@ use sn_protocol::{ NetworkAddress, }; +/// Number of chunks to upload in parallel. +/// Can be overridden by the `CHUNK_UPLOAD_BATCH_SIZE` environment variable. +pub static CHUNK_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { + let batch_size = std::env::var("CHUNK_UPLOAD_BATCH_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or( + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(1) + * 8, + ); + info!("Chunk upload batch size: {}", batch_size); + batch_size +}); + /// Raw Data Address (points to a DataMap) pub type DataAddr = XorName; /// Raw Chunk Address (points to a [`Chunk`]) @@ -33,31 +50,31 @@ pub type ChunkAddr = XorName; pub enum PutError { #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), - #[error("Error getting Vault XorName data.")] - VaultXorName, #[error("A network error occurred.")] Network(#[from] NetworkError), + #[error("Error occurred during cost estimation.")] + CostError(#[from] CostError), #[error("Error occurred during payment.")] PayError(#[from] PayError), - #[error("Failed to serialize {0}")] + #[error("Serialization error: {0}")] Serialization(String), #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), + #[error("The vault owner key does not match the client's public key")] + VaultBadOwner, + #[error("Payment unexpectedly invalid for {0:?}")] + PaymentUnexpectedlyInvalid(NetworkAddress), } /// Errors that can occur during the pay operation. #[derive(Debug, thiserror::Error)] pub enum PayError { - #[error("Could not get store quote for: {0:?} after several retries")] - CouldNotGetStoreQuote(XorName), - #[error("Could not get store costs: {0:?}")] - CouldNotGetStoreCosts(NetworkError), - #[error("Could not simultaneously fetch store costs: {0:?}")] - JoinError(JoinError), #[error("Wallet error: {0:?}")] EvmWalletError(#[from] EvmWalletError), #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), + #[error("Cost error: {0:?}")] + Cost(#[from] CostError), } /// Errors that can occur during the get operation. @@ -75,6 +92,19 @@ pub enum GetError { Protocol(#[from] sn_protocol::Error), } +/// Errors that can occur during the cost calculation. +#[derive(Debug, thiserror::Error)] +pub enum CostError { + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), + #[error("Could not get store quote for: {0:?} after several retries")] + CouldNotGetStoreQuote(XorName), + #[error("Could not get store costs: {0:?}")] + CouldNotGetStoreCosts(NetworkError), + #[error("Failed to serialize {0}")] + Serialization(String), +} + impl Client { /// Fetch a blob of data from the network pub async fn data_get(&self, addr: DataAddr) -> Result { @@ -87,17 +117,15 @@ impl Client { Ok(data) } - /// Upload a piece of data to the network. This data will be self-encrypted. + /// Upload a piece of data to the network. /// Returns the Data Address at which the data was stored. + /// This data is publicly accessible. pub async fn data_put(&self, data: Bytes, wallet: &EvmWallet) -> Result { let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; - info!( - "Uploading datamap chunk to the network at: {:?}", - data_map_chunk.address() - ); - + let data_map_addr = data_map_chunk.address(); debug!("Encryption took: {:.2?}", now.elapsed()); + info!("Uploading datamap chunk to the network at: {data_map_addr:?}"); let map_xor_name = *data_map_chunk.address().xorname(); let mut xor_names = vec![map_xor_name]; @@ -113,29 +141,39 @@ impl Client { .await .inspect_err(|err| error!("Error paying for data: {err:?}"))?; - let mut record_count = 0; - - // Upload data map - if let Some(proof) = payment_proofs.get(&map_xor_name) { - debug!("Uploading data map chunk: {map_xor_name:?}"); - self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) - .await - .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; - record_count += 1; - } - - // Upload the rest of the chunks + // Upload all the chunks in parallel including the data map chunk debug!("Uploading {} chunks", chunks.len()); - for chunk in chunks { + let mut upload_tasks = vec![]; + for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) { + let self_clone = self.clone(); + let address = *chunk.address(); if let Some(proof) = payment_proofs.get(chunk.name()) { - let address = *chunk.address(); - self.chunk_upload_with_payment(chunk, proof.clone()) - .await - .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; - record_count += 1; + let proof_clone = proof.clone(); + upload_tasks.push(async move { + self_clone + .chunk_upload_with_payment(chunk, proof_clone) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) + }); + } else { + debug!("Chunk at {address:?} was already paid for so skipping"); } } - + let uploads = + process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE).await; + + // Check for errors + let total_uploads = uploads.len(); + let ok_uploads = uploads + .iter() + .filter_map(|up| up.is_ok().then_some(())) + .count(); + info!("Uploaded {} chunks out of {}", ok_uploads, total_uploads); + let uploads: Result, _> = uploads.into_iter().collect(); + uploads.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + let record_count = ok_uploads; + + // Reporting if let Some(channel) = self.client_event_sender.as_ref() { let tokens_spent = payment_proofs .values() @@ -184,7 +222,7 @@ impl Client { } /// Get the estimated cost of storing a piece of data. - pub async fn data_cost(&self, data: Bytes) -> Result { + pub async fn data_cost(&self, data: Bytes) -> Result { let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs new file mode 100644 index 0000000000..d2ecaf0a2b --- /dev/null +++ b/autonomi/src/client/data_private.rs @@ -0,0 +1,144 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::hash::{DefaultHasher, Hash, Hasher}; + +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_evm::{Amount, EvmWallet}; +use sn_protocol::storage::Chunk; + +use super::data::CHUNK_UPLOAD_BATCH_SIZE; +use super::data::{GetError, PutError}; +use crate::client::utils::process_tasks_with_max_concurrency; +use crate::client::{ClientEvent, UploadSummary}; +use crate::{self_encryption::encrypt, Client}; + +/// Private data on the network can be accessed with this +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct PrivateDataAccess(Chunk); + +impl PrivateDataAccess { + pub fn to_hex(&self) -> String { + hex::encode(self.0.value()) + } + + pub fn from_hex(hex: &str) -> Result { + let data = hex::decode(hex)?; + Ok(Self(Chunk::new(Bytes::from(data)))) + } + + /// Get a private address for [`PrivateDataAccess`]. Note that this is not a network address, it is only used for refering to private data client side. + pub fn address(&self) -> String { + hash_to_short_string(&self.to_hex()) + } +} + +fn hash_to_short_string(input: &str) -> String { + let mut hasher = DefaultHasher::new(); + input.hash(&mut hasher); + let hash_value = hasher.finish(); + hash_value.to_string() +} + +impl Client { + /// Fetch a blob of private data from the network + pub async fn private_data_get(&self, data_map: PrivateDataAccess) -> Result { + info!( + "Fetching private data from Data Map {:?}", + data_map.0.address() + ); + let data = self.fetch_from_data_map_chunk(data_map.0.value()).await?; + + Ok(data) + } + + /// Upload a piece of private data to the network. This data will be self-encrypted. + /// Returns the [`PrivateDataAccess`] containing the map to the encrypted chunks. + /// This data is private and only accessible with the [`PrivateDataAccess`]. + pub async fn private_data_put( + &self, + data: Bytes, + wallet: &EvmWallet, + ) -> Result { + let now = sn_networking::target_arch::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; + debug!("Encryption took: {:.2?}", now.elapsed()); + + // Pay for all chunks + let xor_names: Vec<_> = chunks.iter().map(|chunk| *chunk.name()).collect(); + info!("Paying for {} addresses", xor_names.len()); + let (payment_proofs, _free_chunks) = self + .pay(xor_names.into_iter(), wallet) + .await + .inspect_err(|err| error!("Error paying for data: {err:?}"))?; + + // Upload the chunks with the payments + debug!("Uploading {} chunks", chunks.len()); + let mut upload_tasks = vec![]; + for chunk in chunks { + let self_clone = self.clone(); + let address = *chunk.address(); + if let Some(proof) = payment_proofs.get(chunk.name()) { + let proof_clone = proof.clone(); + upload_tasks.push(async move { + self_clone + .chunk_upload_with_payment(chunk, proof_clone) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) + }); + } else { + debug!("Chunk at {address:?} was already paid for so skipping"); + } + } + let uploads = + process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE).await; + + // Check for errors + let total_uploads = uploads.len(); + let ok_uploads = uploads + .iter() + .filter_map(|up| up.is_ok().then_some(())) + .count(); + info!("Uploaded {} chunks out of {}", ok_uploads, total_uploads); + let uploads: Result, _> = uploads.into_iter().collect(); + uploads.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + let record_count = ok_uploads; + + // Reporting + if let Some(channel) = self.client_event_sender.as_ref() { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .sum::(); + + let summary = UploadSummary { + record_count, + tokens_spent, + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err:?}"); + } + } + + Ok(PrivateDataAccess(data_map_chunk)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hex() { + let data_map = PrivateDataAccess(Chunk::new(Bytes::from_static(b"hello"))); + let hex = data_map.to_hex(); + let data_map2 = PrivateDataAccess::from_hex(&hex).expect("Failed to decode hex"); + assert_eq!(data_map, data_map2); + } +} diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs new file mode 100644 index 0000000000..b17002bd9c --- /dev/null +++ b/autonomi/src/client/external_signer.rs @@ -0,0 +1,108 @@ +use crate::client::data::{DataAddr, PutError}; +use crate::client::utils::extract_quote_payments; +use crate::self_encryption::encrypt; +use crate::Client; +use bytes::Bytes; +use sn_evm::{PaymentQuote, ProofOfPayment, QuotePayment}; +use sn_protocol::storage::Chunk; +use std::collections::HashMap; +use xor_name::XorName; + +#[allow(unused_imports)] +pub use sn_evm::external_signer::*; + +impl Client { + /// Upload a piece of data to the network. This data will be self-encrypted. + /// Payment will not be done automatically as opposed to the regular `data_put`, so the proof of payment has to be provided. + /// Returns the Data Address at which the data was stored. + pub async fn data_put_with_proof_of_payment( + &self, + data: Bytes, + proof: HashMap, + ) -> Result { + let (data_map_chunk, chunks, _) = encrypt_data(data)?; + self.upload_data_map(&proof, &data_map_chunk).await?; + self.upload_chunks(&chunks, &proof).await?; + Ok(*data_map_chunk.address().xorname()) + } + + /// Get quotes for data. + /// Returns a cost map, data payments to be executed and a list of free (already paid for) chunks. + pub async fn get_quotes_for_data( + &self, + data: Bytes, + ) -> Result< + ( + HashMap, + Vec, + Vec, + ), + PutError, + > { + // Encrypt the data as chunks + let (_data_map_chunk, _chunks, xor_names) = encrypt_data(data)?; + + let cost_map: HashMap = self + .get_store_quotes(xor_names.into_iter()) + .await? + .into_iter() + .map(|(name, (_, _, q))| (name, q)) + .collect(); + + let (quote_payments, free_chunks) = extract_quote_payments(&cost_map); + Ok((cost_map, quote_payments, free_chunks)) + } + + async fn upload_data_map( + &self, + payment_proofs: &HashMap, + data_map_chunk: &Chunk, + ) -> Result<(), PutError> { + let map_xor_name = data_map_chunk.name(); + + if let Some(proof) = payment_proofs.get(map_xor_name) { + debug!("Uploading data map chunk: {map_xor_name:?}"); + self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) + .await + .inspect_err(|err| error!("Error uploading data map chunk: {err:?}")) + } else { + Ok(()) + } + } + + async fn upload_chunks( + &self, + chunks: &[Chunk], + payment_proofs: &HashMap, + ) -> Result<(), PutError> { + debug!("Uploading {} chunks", chunks.len()); + for chunk in chunks { + if let Some(proof) = payment_proofs.get(chunk.name()) { + let address = *chunk.address(); + self.chunk_upload_with_payment(chunk.clone(), proof.clone()) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; + } + } + Ok(()) + } +} + +/// Encrypts data as chunks. +/// +/// Returns the data map chunk, file chunks and a list of all content addresses including the data map. +fn encrypt_data(data: Bytes) -> Result<(Chunk, Vec, Vec), PutError> { + let now = sn_networking::target_arch::Instant::now(); + let result = encrypt(data)?; + + debug!("Encryption took: {:.2?}", now.elapsed()); + + let map_xor_name = *result.0.address().xorname(); + let mut xor_names = vec![map_xor_name]; + + for chunk in &result.1 { + xor_names.push(*chunk.name()); + } + + Ok((result.0, result.1, xor_names)) +} diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 8fff06324c..c1505224bc 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -6,15 +6,35 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::client::archive::Metadata; +use crate::client::data::CostError; +use crate::client::utils::process_tasks_with_max_concurrency; use crate::client::Client; use bytes::Bytes; use sn_evm::EvmWallet; -use std::collections::HashMap; +use sn_networking::target_arch::{Duration, SystemTime}; use std::path::PathBuf; +use std::sync::LazyLock; use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; +/// Number of files to upload in parallel. +/// Can be overridden by the `FILE_UPLOAD_BATCH_SIZE` environment variable. +pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { + let batch_size = std::env::var("FILE_UPLOAD_BATCH_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or( + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(1) + * 8, + ); + info!("File upload batch size: {}", batch_size); + batch_size +}); + /// Errors that can occur during the file upload operation. #[cfg(feature = "fs")] #[derive(Debug, thiserror::Error)] @@ -43,6 +63,22 @@ pub enum DownloadError { IoError(#[from] std::io::Error), } +#[cfg(feature = "fs")] +/// Errors that can occur during the file cost calculation. +#[derive(Debug, thiserror::Error)] +pub enum FileCostError { + #[error("Cost error: {0}")] + Cost(#[from] CostError), + #[error("IO failure")] + IoError(#[from] std::io::Error), + #[error("Serialization error")] + Serialization(#[from] rmp_serde::encode::Error), + #[error("Self encryption error")] + SelfEncryption(#[from] crate::self_encryption::Error), + #[error("Walkdir error")] + WalkDir(#[from] walkdir::Error), +} + impl Client { /// Download file from network to local file system pub async fn file_download( @@ -65,8 +101,8 @@ impl Client { to_dest: PathBuf, ) -> Result<(), DownloadError> { let archive = self.archive_get(archive_addr).await?; - for (path, addr) in archive.map { - self.file_download(addr, to_dest.join(path)).await?; + for (path, addr, _meta) in archive.iter() { + self.file_download(*addr, to_dest.join(path)).await?; } Ok(()) } @@ -78,29 +114,51 @@ impl Client { dir_path: PathBuf, wallet: &EvmWallet, ) -> Result { - let mut map = HashMap::new(); + info!("Uploading directory: {dir_path:?}"); + let start = tokio::time::Instant::now(); + // start upload of files in parallel + let mut upload_tasks = Vec::new(); for entry in walkdir::WalkDir::new(dir_path) { let entry = entry?; - if !entry.file_type().is_file() { continue; } + let metadata = metadata_from_entry(&entry); let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - #[cfg(feature = "loud")] - println!("Uploading file: {path:?}"); - let file = self.file_upload(path.clone(), wallet).await?; + upload_tasks.push(async move { + let file = self.file_upload(path.clone(), wallet).await; + (path, metadata, file) + }); + } - map.insert(path, file); + // wait for all files to be uploaded + let uploads = + process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await; + info!( + "Upload of {} files completed in {:?}", + uploads.len(), + start.elapsed() + ); + let mut archive = Archive::new(); + for (path, metadata, maybe_file) in uploads.into_iter() { + match maybe_file { + Ok(file) => archive.add_file(path, file, metadata), + Err(err) => { + error!("Failed to upload file: {path:?}: {err:?}"); + return Err(err); + } + } } - let archive = Archive { map }; + // upload archive let archive_serialized = archive.into_bytes()?; - let arch_addr = self.data_put(archive_serialized, wallet).await?; + info!("Complete archive upload completed in {:?}", start.elapsed()); + #[cfg(feature = "loud")] + println!("Upload completed in {:?}", start.elapsed()); Ok(arch_addr) } @@ -111,6 +169,10 @@ impl Client { path: PathBuf, wallet: &EvmWallet, ) -> Result { + info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.data_put(data, wallet).await?; @@ -119,8 +181,8 @@ impl Client { /// Get the cost to upload a file/dir to the network. /// quick and dirty implementation, please refactor once files are cleanly implemented - pub async fn file_cost(&self, path: &PathBuf) -> Result { - let mut map = HashMap::new(); + pub async fn file_cost(&self, path: &PathBuf) -> Result { + let mut archive = Archive::new(); let mut total_cost = sn_evm::Amount::ZERO; for entry in walkdir::WalkDir::new(path) { @@ -135,29 +197,74 @@ impl Client { let data = tokio::fs::read(&path).await?; let file_bytes = Bytes::from(data); - let file_cost = self.data_cost(file_bytes.clone()).await.expect("TODO"); + let file_cost = self.data_cost(file_bytes.clone()).await?; total_cost += file_cost.as_atto(); // re-do encryption to get the correct map xorname here // this code needs refactor let now = sn_networking::target_arch::Instant::now(); - let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes).expect("TODO"); + let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes)?; tracing::debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); - map.insert(path, map_xor_name); + archive.add_file(path, map_xor_name, Metadata::new()); } - let root = Archive { map }; - let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); + let root_serialized = rmp_serde::to_vec(&archive)?; - let archive_cost = self - .data_cost(Bytes::from(root_serialized)) - .await - .expect("TODO"); + let archive_cost = self.data_cost(Bytes::from(root_serialized)).await?; total_cost += archive_cost.as_atto(); Ok(total_cost.into()) } } + +// Get metadata from directory entry. Defaults to `0` for creation and modification times if +// any error is encountered. Logs errors upon error. +pub(crate) fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata { + let fs_metadata = match entry.metadata() { + Ok(metadata) => metadata, + Err(err) => { + tracing::warn!( + "Failed to get metadata for `{}`: {err}", + entry.path().display() + ); + return Metadata { + uploaded: 0, + created: 0, + modified: 0, + }; + } + }; + + let unix_time = |property: &'static str, time: std::io::Result| { + time.inspect_err(|err| { + tracing::warn!( + "Failed to get '{property}' metadata for `{}`: {err}", + entry.path().display() + ); + }) + .unwrap_or(SystemTime::UNIX_EPOCH) + .duration_since(SystemTime::UNIX_EPOCH) + .inspect_err(|err| { + tracing::warn!( + "'{property}' metadata of `{}` is before UNIX epoch: {err}", + entry.path().display() + ); + }) + .unwrap_or(Duration::from_secs(0)) + .as_secs() + }; + let created = unix_time("created", fs_metadata.created()); + let modified = unix_time("modified", fs_metadata.modified()); + + Metadata { + uploaded: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(), + created, + modified, + } +} diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs new file mode 100644 index 0000000000..08d453ae37 --- /dev/null +++ b/autonomi/src/client/fs_private.rs @@ -0,0 +1,132 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::client::utils::process_tasks_with_max_concurrency; +use crate::client::Client; +use bytes::Bytes; +use sn_evm::EvmWallet; +use std::path::PathBuf; + +use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; +use super::data_private::PrivateDataAccess; +use super::fs::{DownloadError, UploadError}; + +use super::fs::FILE_UPLOAD_BATCH_SIZE; + +impl Client { + /// Download a private file from network to local file system + pub async fn private_file_download( + &self, + data_access: PrivateDataAccess, + to_dest: PathBuf, + ) -> Result<(), DownloadError> { + let data = self.private_data_get(data_access).await?; + if let Some(parent) = to_dest.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(to_dest, data).await?; + Ok(()) + } + + /// Download a private directory from network to local file system + pub async fn private_dir_download( + &self, + archive_access: PrivateArchiveAccess, + to_dest: PathBuf, + ) -> Result<(), DownloadError> { + let archive = self.private_archive_get(archive_access).await?; + for (path, addr, _meta) in archive.iter() { + self.private_file_download(addr.clone(), to_dest.join(path)) + .await?; + } + Ok(()) + } + + /// Upload a private directory to the network. The directory is recursively walked. + /// Reads all files, splits into chunks, uploads chunks, uploads private archive, returns [`PrivateArchiveAccess`] (pointing to the private archive) + pub async fn private_dir_upload( + &self, + dir_path: PathBuf, + wallet: &EvmWallet, + ) -> Result { + info!("Uploading directory as private: {dir_path:?}"); + let start = tokio::time::Instant::now(); + + // start upload of file in parallel + let mut upload_tasks = Vec::new(); + for entry in walkdir::WalkDir::new(dir_path) { + let entry = entry?; + if !entry.file_type().is_file() { + continue; + } + + let metadata = super::fs::metadata_from_entry(&entry); + let path = entry.path().to_path_buf(); + upload_tasks.push(async move { + let file = self.private_file_upload(path.clone(), wallet).await; + (path, metadata, file) + }); + } + + // wait for all files to be uploaded + let uploads = + process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await; + info!( + "Upload of {} files completed in {:?}", + uploads.len(), + start.elapsed() + ); + let mut archive = PrivateArchive::new(); + for (path, metadata, maybe_file) in uploads.into_iter() { + match maybe_file { + Ok(file) => archive.add_file(path, file, metadata), + Err(err) => { + error!("Failed to upload file: {path:?}: {err:?}"); + return Err(err); + } + } + } + + // upload archive + let archive_serialized = archive.into_bytes()?; + let arch_addr = self.private_data_put(archive_serialized, wallet).await?; + + info!( + "Complete private archive upload completed in {:?}", + start.elapsed() + ); + #[cfg(feature = "loud")] + println!("Upload completed in {:?}", start.elapsed()); + Ok(arch_addr) + } + + /// Upload a private file to the network. + /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns [`PrivateDataAccess`] (pointing to the datamap) + async fn private_file_upload( + &self, + path: PathBuf, + wallet: &EvmWallet, + ) -> Result { + info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + let addr = self.private_data_put(data, wallet).await?; + Ok(addr) + } +} diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index f19216fe84..0938dcbf9d 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -11,9 +11,17 @@ pub mod address; #[cfg(feature = "data")] pub mod archive; #[cfg(feature = "data")] +pub mod archive_private; +#[cfg(feature = "data")] pub mod data; +#[cfg(feature = "data")] +pub mod data_private; +#[cfg(feature = "external-signer")] +pub mod external_signer; #[cfg(feature = "fs")] pub mod fs; +#[cfg(feature = "fs")] +pub mod fs_private; #[cfg(feature = "registers")] pub mod registers; #[cfg(feature = "vault")] @@ -34,7 +42,7 @@ use std::{collections::HashSet, sync::Arc, time::Duration}; use tokio::sync::mpsc; /// Time before considering the connection timed out. -pub const CONNECT_TIMEOUT_SECS: u64 = 20; +pub const CONNECT_TIMEOUT_SECS: u64 = 10; const CLIENT_EVENT_CHANNEL_SIZE: usize = 100; @@ -197,11 +205,13 @@ async fn handle_event_receiver( } /// Events that can be broadcasted by the client. +#[derive(Debug, Clone)] pub enum ClientEvent { UploadComplete(UploadSummary), } /// Summary of an upload operation. +#[derive(Debug, Clone)] pub struct UploadSummary { pub record_count: usize, pub tokens_spent: Amount, diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index fb3c55fa6c..52f8944e1e 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -32,8 +32,12 @@ use sn_registers::{Permissions, RegisterCrdt, RegisterOp, SignedRegister}; use std::collections::BTreeSet; use xor_name::XorName; +use super::data::CostError; + #[derive(Debug, thiserror::Error)] pub enum RegisterError { + #[error("Cost error: {0}")] + Cost(#[from] CostError), #[error("Network error")] Network(#[from] NetworkError), #[error("Serialization error")] @@ -143,23 +147,11 @@ impl Client { try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; signed_reg } - // manage forked register case Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { - debug!("Forked register detected for {address:?} merging forks"); - let mut registers: Vec = vec![]; - for (_, (record, _)) in result_map { - registers.push( - try_deserialize_record(&record) - .map_err(|_| RegisterError::Serialization)?, - ); - } - let register = registers.iter().fold(registers[0].clone(), |mut acc, x| { - if let Err(e) = acc.merge(x) { - warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address()); - } - acc - }); - register + error!("Got split record error for register at address: {address}. This should've been handled at the network layer"); + Err(RegisterError::Network(NetworkError::GetRecordError( + GetRecordError::SplitRecord { result_map }, + )))? } Err(e) => { error!("Failed to get register {address:?} from network: {e}"); diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 0714f60d9d..95d70b6e4d 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -6,16 +6,12 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::{ - collections::{BTreeMap, HashMap}, - num::NonZero, -}; - use bytes::Bytes; +use futures::stream::{FuturesUnordered, StreamExt}; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_evm::{EvmWallet, ProofOfPayment, QuoteHash, QuotePayment, TxHash}; +use sn_evm::{EvmWallet, PaymentQuote, ProofOfPayment, QuotePayment}; use sn_networking::{ GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind, }; @@ -24,14 +20,15 @@ use sn_protocol::{ storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, NetworkAddress, }; +use std::{collections::HashMap, future::Future, num::NonZero}; use xor_name::XorName; -use crate::self_encryption::DataMapLevel; - use super::{ - data::{GetError, PayError, PutError}, + data::{CostError, GetError, PayError, PutError}, Client, }; +use crate::self_encryption::DataMapLevel; +use crate::utils::payment_proof_from_quotes_and_payments; impl Client { /// Fetch and decrypt all chunks in the data map. @@ -152,9 +149,20 @@ impl Client { content_addrs: impl Iterator, wallet: &EvmWallet, ) -> Result<(HashMap, Vec), PayError> { - let cost_map = self.get_store_quotes(content_addrs).await?; + let cost_map = self + .get_store_quotes(content_addrs) + .await? + .into_iter() + .map(|(name, (_, _, q))| (name, q)) + .collect(); + let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); + // Make sure nobody else can use the wallet while we are paying + debug!("Waiting for wallet lock"); + let lock_guard = wallet.lock().await; + debug!("Locked wallet"); + // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. // TODO: retry when it fails? // Execute chunk payments @@ -163,7 +171,11 @@ impl Client { .await .map_err(|err| PayError::from(err.0))?; - let proofs = construct_proofs(&cost_map, &payments); + // payment is done, unlock the wallet for other threads + drop(lock_guard); + debug!("Unlocked wallet"); + + let proofs = payment_proof_from_quotes_and_payments(&cost_map, &payments); trace!( "Chunk payments of {} chunks completed. {} chunks were free / already paid for", @@ -177,7 +189,7 @@ impl Client { pub(crate) async fn get_store_quotes( &self, content_addrs: impl Iterator, - ) -> Result, PayError> { + ) -> Result, CostError> { let futures: Vec<_> = content_addrs .into_iter() .map(|content_addr| fetch_store_quote_with_retries(&self.network, content_addr)) @@ -193,7 +205,7 @@ impl Client { async fn fetch_store_quote_with_retries( network: &Network, content_addr: XorName, -) -> Result<(XorName, PayeeQuote), PayError> { +) -> Result<(XorName, PayeeQuote), CostError> { let mut retries = 0; loop { @@ -209,7 +221,7 @@ async fn fetch_store_quote_with_retries( error!( "Error while fetching store quote: {err:?}, stopping after {retries} retries" ); - break Err(PayError::CouldNotGetStoreQuote(content_addr)); + break Err(CostError::CouldNotGetStoreQuote(content_addr)); } } } @@ -229,44 +241,46 @@ async fn fetch_store_quote( } /// Form to be executed payments and already executed payments from a cost map. -fn extract_quote_payments( - cost_map: &HashMap, +pub(crate) fn extract_quote_payments( + cost_map: &HashMap, ) -> (Vec, Vec) { let mut to_be_paid = vec![]; let mut already_paid = vec![]; for (chunk_address, quote) in cost_map.iter() { - if quote.2.cost.is_zero() { + if quote.cost.is_zero() { already_paid.push(*chunk_address); } else { - to_be_paid.push(( - quote.2.hash(), - quote.2.rewards_address, - quote.2.cost.as_atto(), - )); + to_be_paid.push((quote.hash(), quote.rewards_address, quote.cost.as_atto())); } } (to_be_paid, already_paid) } -/// Construct payment proofs from cost map and payments map. -fn construct_proofs( - cost_map: &HashMap, - payments: &BTreeMap, -) -> HashMap { - cost_map - .iter() - .filter_map(|(xor_name, (_, _, quote))| { - payments.get("e.hash()).map(|tx_hash| { - ( - *xor_name, - ProofOfPayment { - quote: quote.clone(), - tx_hash: *tx_hash, - }, - ) - }) - }) - .collect() +pub(crate) async fn process_tasks_with_max_concurrency(tasks: I, batch_size: usize) -> Vec +where + I: IntoIterator, + I::Item: Future + Send, + R: Send, +{ + let mut futures = FuturesUnordered::new(); + let mut results = Vec::new(); + + for task in tasks.into_iter() { + futures.push(task); + + if futures.len() >= batch_size { + if let Some(result) = futures.next().await { + results.push(result); + } + } + } + + // Process remaining tasks + while let Some(result) = futures.next().await { + results.push(result); + } + + results } diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index af40f61cf6..55103b0578 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -6,21 +6,29 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::collections::HashSet; +pub mod key; +pub mod user_data; + +pub use key::{derive_vault_key, VaultSecretKey}; +pub use user_data::UserData; +use xor_name::XorName; use crate::client::data::PutError; use crate::client::Client; -use bls::SecretKey; -use bytes::Bytes; use libp2p::kad::{Quorum, Record}; -use sn_evm::EvmWallet; -use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; +use sn_evm::{Amount, AttoTokens, EvmWallet}; +use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg, VerificationKind}; use sn_protocol::storage::{ try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, }; +use sn_protocol::Bytes; use sn_protocol::{storage::try_deserialize_record, NetworkAddress}; +use std::collections::HashSet; +use std::hash::{DefaultHasher, Hash, Hasher}; use tracing::info; +use super::data::CostError; + #[derive(Debug, thiserror::Error)] pub enum VaultError { #[error("Could not generate Vault secret key from entropy: {0:?}")] @@ -31,24 +39,42 @@ pub enum VaultError { Protocol(#[from] sn_protocol::Error), #[error("Network: {0}")] Network(#[from] NetworkError), + #[error("Vault not found")] + Missing, +} + +/// The content type of the vault data +/// The number is used to determine the type of the contents of the bytes contained in a vault +/// Custom apps can use this to store their own custom types of data in vaults +/// It is recommended to use the hash of the app name or an unique identifier as the content type using [`app_name_to_vault_content_type`] +/// The value 0 is reserved for tests +pub type VaultContentType = u64; + +/// For custom apps using Scratchpad, this function converts an app identifier or name to a [`VaultContentType`] +pub fn app_name_to_vault_content_type(s: T) -> VaultContentType { + let mut hasher = DefaultHasher::new(); + s.hash(&mut hasher); + hasher.finish() } impl Client { /// Retrieves and returns a decrypted vault if one exists. + /// Returns the content type of the bytes in the vault pub async fn fetch_and_decrypt_vault( &self, - secret_key: &SecretKey, - ) -> Result, VaultError> { + secret_key: &VaultSecretKey, + ) -> Result<(Bytes, VaultContentType), VaultError> { info!("Fetching and decrypting vault"); let pad = self.get_vault_from_network(secret_key).await?; - Ok(pad.decrypt_data(secret_key)?) + let data = pad.decrypt_data(secret_key)?; + Ok((data, pad.data_encoding())) } /// Gets the vault Scratchpad from a provided client public key async fn get_vault_from_network( &self, - secret_key: &SecretKey, + secret_key: &VaultSecretKey, ) -> Result { let client_pk = secret_key.public_key(); @@ -65,30 +91,91 @@ impl Client { is_register: false, }; - let record = self + let pad = match self .network - .get_record_from_network(scratch_key, &get_cfg) + .get_record_from_network(scratch_key.clone(), &get_cfg) .await - .inspect_err(|err| { - debug!("Failed to fetch vault {network_address:?} from network: {err}"); - })?; + { + Ok(record) => { + debug!("Got scratchpad for {scratch_key:?}"); + try_deserialize_record::(&record) + .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))? + } + Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { + debug!("Got multiple scratchpads for {scratch_key:?}"); + let mut pads = result_map + .values() + .map(|(record, _)| try_deserialize_record::(record)) + .collect::, _>>() + .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; - let pad = try_deserialize_record::(&record) - .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; + // take the latest versions + pads.sort_by_key(|s| s.count()); + let max_version = pads.last().map(|p| p.count()).unwrap_or_else(|| { + error!("Got empty scratchpad vector for {scratch_key:?}"); + u64::MAX + }); + let latest_pads: Vec<_> = pads + .into_iter() + .filter(|s| s.count() == max_version) + .collect(); + + // make sure we only have one of latest version + let pad = match &latest_pads[..] { + [one] => one, + [multi, ..] => { + error!("Got multiple conflicting scratchpads for {scratch_key:?} with the latest version, returning the first one"); + multi + } + [] => { + error!("Got empty scratchpad vector for {scratch_key:?}"); + return Err(VaultError::Missing); + } + }; + pad.to_owned() + } + Err(e) => { + warn!("Failed to fetch vault {network_address:?} from network: {e}"); + return Err(e)?; + } + }; Ok(pad) } + /// Get the cost of creating a new vault + pub async fn vault_cost(&self, owner: &VaultSecretKey) -> Result { + info!("Getting cost for vault"); + let client_pk = owner.public_key(); + let content_type = Default::default(); + let scratch = Scratchpad::new(client_pk, content_type); + let vault_xor = scratch.network_address().as_xorname().unwrap_or_default(); + + // NB TODO: vault should be priced differently from other data + let cost_map = self.get_store_quotes(std::iter::once(vault_xor)).await?; + let total_cost = AttoTokens::from_atto( + cost_map + .values() + .map(|quote| quote.2.cost.as_atto()) + .sum::(), + ); + + Ok(total_cost) + } + /// Put data into the client's VaultPacket /// - /// Pays for a new VaultPacket if none yet created for the client. Returns the current version - /// of the data on success. + /// Pays for a new VaultPacket if none yet created for the client. + /// Provide the bytes to be written to the vault and the content type of those bytes. + /// It is recommended to use the hash of the app name or unique identifier as the content type. pub async fn write_bytes_to_vault( - &mut self, + &self, data: Bytes, - wallet: &mut EvmWallet, - secret_key: &SecretKey, - ) -> Result { + wallet: &EvmWallet, + secret_key: &VaultSecretKey, + content_type: VaultContentType, + ) -> Result { + let mut total_cost = AttoTokens::zero(); let client_pk = secret_key.public_key(); let pad_res = self.get_vault_from_network(secret_key).await; @@ -103,32 +190,42 @@ impl Client { ); is_new = false; + + if existing_data.owner() != &client_pk { + return Err(PutError::VaultBadOwner); + } + existing_data } else { trace!("new scratchpad creation"); - Scratchpad::new(client_pk) + Scratchpad::new(client_pk, content_type) }; - let next_count = scratch.update_and_sign(data, secret_key); + let _ = scratch.update_and_sign(data, secret_key); + debug_assert!(scratch.is_valid(), "Must be valid after being signed. This is a bug, please report it by opening an issue on our github"); + let scratch_address = scratch.network_address(); let scratch_key = scratch_address.to_record_key(); info!("Writing to vault at {scratch_address:?}",); let record = if is_new { - self.pay( - [&scratch_address].iter().filter_map(|f| f.as_xorname()), - wallet, - ) - .await - .inspect_err(|err| { - error!("Failed to pay for new vault at addr: {scratch_address:?} : {err}"); - })?; + let scratch_xor = [&scratch_address] + .iter() + .filter_map(|f| f.as_xorname()) + .collect::>(); + let (payment_proofs, _) = self + .pay(scratch_xor.iter().cloned(), wallet) + .await + .inspect_err(|err| { + error!("Failed to pay for new vault at addr: {scratch_address:?} : {err}"); + })?; - let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; - let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; - // Should always be there, else it would have failed on the payment step. - let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); + let proof = match payment_proofs.values().next() { + Some(proof) => proof, + None => return Err(PutError::PaymentUnexpectedlyInvalid(scratch_address)), + }; + total_cost = proof.quote.cost; Record { key: scratch_key, @@ -181,6 +278,6 @@ impl Client { ) })?; - Ok(next_count) + Ok(total_cost) } } diff --git a/autonomi/src/client/vault/key.rs b/autonomi/src/client/vault/key.rs new file mode 100644 index 0000000000..e88fd12ef7 --- /dev/null +++ b/autonomi/src/client/vault/key.rs @@ -0,0 +1,56 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use blst::min_pk::SecretKey as BlstSecretKey; +use sha2::{Digest, Sha256}; + +/// Secret key to decrypt vault content +pub type VaultSecretKey = bls::SecretKey; + +#[derive(Debug, thiserror::Error)] +pub enum VaultKeyError { + #[error("Failed to sign message: {0}")] + FailedToSignMessage(#[from] sn_evm::cryptography::SignError), + #[error("Failed to generate vault secret key: {0}")] + FailedToGenerateVaultSecretKey(String), + #[error("Failed to convert blst secret key to blsttc secret key: {0}")] + BlsConversionError(#[from] bls::Error), + #[error("Failed to generate blst secret key")] + KeyGenerationError, +} + +/// Message used to generate the vault secret key from the EVM secret key +const VAULT_SECRET_KEY_SEED: &[u8] = b"Massive Array of Internet Disks Secure Access For Everyone"; + +/// Derives the vault secret key from the EVM secret key hex string +/// The EVM secret key is used to sign a message and the signature is hashed to derive the vault secret key +/// Being able to derive the vault secret key from the EVM secret key allows users to only keep track of one key: the EVM secret key +pub fn derive_vault_key(evm_sk_hex: &str) -> Result { + let signature = sn_evm::cryptography::sign_message(evm_sk_hex, VAULT_SECRET_KEY_SEED) + .map_err(VaultKeyError::FailedToSignMessage)?; + + let blst_key = derive_secret_key_from_seed(&signature)?; + let vault_sk = blst_to_blsttc(&blst_key)?; + Ok(vault_sk) +} + +/// Convert a blst secret key to a blsttc secret key and pray that endianness is the same +fn blst_to_blsttc(sk: &BlstSecretKey) -> Result { + let sk_bytes = sk.to_bytes(); + let sk = bls::SecretKey::from_bytes(sk_bytes).map_err(VaultKeyError::BlsConversionError)?; + Ok(sk) +} + +fn derive_secret_key_from_seed(seed: &[u8]) -> Result { + let mut hasher = Sha256::new(); + hasher.update(seed); + let hashed_seed = hasher.finalize(); + let sk = + BlstSecretKey::key_gen(&hashed_seed, &[]).map_err(|_| VaultKeyError::KeyGenerationError)?; + Ok(sk) +} diff --git a/autonomi/src/client/vault/user_data.rs b/autonomi/src/client/vault/user_data.rs new file mode 100644 index 0000000000..1f91b547bb --- /dev/null +++ b/autonomi/src/client/vault/user_data.rs @@ -0,0 +1,140 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::collections::HashMap; + +use crate::client::archive::ArchiveAddr; +use crate::client::archive_private::PrivateArchiveAccess; +use crate::client::data::GetError; +use crate::client::data::PutError; +use crate::client::registers::RegisterAddress; +use crate::client::vault::VaultError; +use crate::client::vault::{app_name_to_vault_content_type, VaultContentType, VaultSecretKey}; +use crate::client::Client; +use serde::{Deserialize, Serialize}; +use sn_evm::AttoTokens; +use sn_evm::EvmWallet; +use sn_protocol::Bytes; +use std::sync::LazyLock; + +/// Vault content type for UserDataVault +pub static USER_DATA_VAULT_CONTENT_IDENTIFIER: LazyLock = + LazyLock::new(|| app_name_to_vault_content_type("UserData")); + +/// UserData is stored in Vaults and contains most of a user's private data: +/// It allows users to keep track of only the key to their User Data Vault +/// while having the rest kept on the Network encrypted in a Vault for them +/// Using User Data Vault is optional, one can decide to keep all their data locally instead. +#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] +pub struct UserData { + /// The register secret key hex encoded + pub register_sk: Option, + /// Owned register addresses, along with their names (can be empty) + pub registers: HashMap, + /// Owned file archive addresses, along with their names (can be empty) + pub file_archives: HashMap, + /// Owned private file archives, along with their names (can be empty) + pub private_file_archives: HashMap, +} + +/// Errors that can occur during the get operation. +#[derive(Debug, thiserror::Error)] +pub enum UserDataVaultGetError { + #[error("Vault error: {0}")] + Vault(#[from] VaultError), + #[error("Unsupported vault content type: {0}")] + UnsupportedVaultContentType(VaultContentType), + #[error("Serialization error: {0}")] + Serialization(String), + #[error("Get error: {0}")] + GetError(#[from] GetError), +} + +impl UserData { + /// Create a new empty UserData + pub fn new() -> Self { + Self::default() + } + + /// Add an archive. Returning `Option::Some` with the old name if the archive was already in the set. + pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> Option { + self.file_archives.insert(archive, "".into()) + } + + /// Add an archive. Returning `Option::Some` with the old name if the archive was already in the set. + pub fn add_file_archive_with_name( + &mut self, + archive: ArchiveAddr, + name: String, + ) -> Option { + self.file_archives.insert(archive, name) + } + + /// Remove an archive. Returning `Option::Some` with the old name if the archive was already in the set. + pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> Option { + self.file_archives.remove(&archive) + } + + /// To bytes + pub fn to_bytes(&self) -> Result { + let bytes = rmp_serde::to_vec(&self)?; + Ok(Bytes::from(bytes)) + } + + /// From bytes + pub fn from_bytes(bytes: Bytes) -> Result { + let vault_content = rmp_serde::from_slice(&bytes)?; + Ok(vault_content) + } +} + +impl Client { + /// Get the user data from the vault + pub async fn get_user_data_from_vault( + &self, + secret_key: &VaultSecretKey, + ) -> Result { + let (bytes, content_type) = self.fetch_and_decrypt_vault(secret_key).await?; + + if content_type != *USER_DATA_VAULT_CONTENT_IDENTIFIER { + return Err(UserDataVaultGetError::UnsupportedVaultContentType( + content_type, + )); + } + + let vault = UserData::from_bytes(bytes).map_err(|e| { + UserDataVaultGetError::Serialization(format!( + "Failed to deserialize vault content: {e}" + )) + })?; + + Ok(vault) + } + + /// Put the user data to the vault + /// Returns the total cost of the put operation + pub async fn put_user_data_to_vault( + &self, + secret_key: &VaultSecretKey, + wallet: &EvmWallet, + user_data: UserData, + ) -> Result { + let bytes = user_data + .to_bytes() + .map_err(|e| PutError::Serialization(format!("Failed to serialize user data: {e}")))?; + let total_cost = self + .write_bytes_to_vault( + bytes, + wallet, + secret_key, + *USER_DATA_VAULT_CONTENT_IDENTIFIER, + ) + .await?; + Ok(total_cost) + } +} diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 4607949ee2..a102626ea3 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -1,21 +1,29 @@ use libp2p::Multiaddr; use wasm_bindgen::prelude::*; -#[wasm_bindgen] -pub struct Client(super::Client); +use super::address::{addr_to_str, str_to_addr}; -#[wasm_bindgen] -pub struct ChunkAddr(xor_name::XorName); +#[cfg(feature = "vault")] +use super::vault::UserData; -#[wasm_bindgen] -pub struct DataAddr(xor_name::XorName); -#[wasm_bindgen] -impl DataAddr { - #[wasm_bindgen(js_name = toString)] - pub fn to_string(&self) -> String { - crate::client::address::addr_to_str(self.0) - } -} +/// The `Client` object allows interaction with the network to store and retrieve data. +/// +/// To connect to the network, see {@link Client.connect}. +/// +/// # Example +/// +/// ```js +/// let client = await Client.connect(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); +/// const dataAddr = await client.dataPut(new Uint8Array([0, 1, 2, 3]), wallet); +/// +/// const archive = new Archive(); +/// archive.addNewFile("foo", dataAddr); +/// +/// const archiveAddr = await client.archivePut(archive, wallet); +/// const archiveFetched = await client.archiveGet(archiveAddr); +/// ``` +#[wasm_bindgen(js_name = Client)] +pub struct JsClient(super::Client); #[wasm_bindgen] pub struct AttoTokens(sn_evm::AttoTokens); @@ -27,10 +35,17 @@ impl AttoTokens { } } -#[wasm_bindgen] -impl Client { - #[wasm_bindgen(constructor)] - pub async fn connect(peers: Vec) -> Result { +#[wasm_bindgen(js_class = Client)] +impl JsClient { + /// Connect to the network via the given peers. + /// + /// # Example + /// + /// ```js + /// let client = await Client.connect(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); + /// ``` + #[wasm_bindgen] + pub async fn connect(peers: Vec) -> Result { let peers = peers .into_iter() .map(|peer| peer.parse()) @@ -38,33 +53,49 @@ impl Client { let client = super::Client::connect(&peers).await?; - Ok(Client(client)) + Ok(JsClient(client)) } + /// Upload a chunk to the network. + /// + /// Returns the hex encoded address of the chunk. + /// + /// This is not yet implemented. #[wasm_bindgen(js_name = chunkPut)] - pub async fn chunk_put(&self, _data: Vec, _wallet: Wallet) -> Result { + pub async fn chunk_put(&self, _data: Vec, _wallet: &JsWallet) -> Result { async { unimplemented!() }.await } + /// Fetch the chunk from the network. #[wasm_bindgen(js_name = chunkGet)] - pub async fn chunk_get(&self, addr: ChunkAddr) -> Result, JsError> { - let chunk = self.0.chunk_get(addr.0).await?; + pub async fn chunk_get(&self, addr: String) -> Result, JsError> { + let addr = str_to_addr(&addr)?; + let chunk = self.0.chunk_get(addr).await?; + Ok(chunk.value().to_vec()) } + /// Upload data to the network. + /// + /// Returns the hex encoded address of the data. #[wasm_bindgen(js_name = dataPut)] - pub async fn data_put(&self, data: Vec, wallet: Wallet) -> Result { + pub async fn data_put(&self, data: Vec, wallet: &JsWallet) -> Result { let data = crate::Bytes::from(data); let xorname = self.0.data_put(data, &wallet.0).await?; - Ok(DataAddr(xorname)) + + Ok(addr_to_str(xorname)) } + /// Fetch the data from the network. #[wasm_bindgen(js_name = dataGet)] - pub async fn data_get(&self, addr: DataAddr) -> Result, JsError> { - let data = self.0.data_get(addr.0).await?; + pub async fn data_get(&self, addr: String) -> Result, JsError> { + let addr = str_to_addr(&addr)?; + let data = self.0.data_get(addr).await?; + Ok(data.to_vec()) } + /// Get the cost of uploading data to the network. #[wasm_bindgen(js_name = dataCost)] pub async fn data_cost(&self, data: Vec) -> Result { let data = crate::Bytes::from(data); @@ -74,13 +105,315 @@ impl Client { } } -#[wasm_bindgen] -pub struct Wallet(evmlib::wallet::Wallet); +mod archive { + use super::*; + use crate::client::{address::str_to_addr, archive::Archive}; + use std::path::PathBuf; + + /// Structure mapping paths to data addresses. + #[wasm_bindgen(js_name = Archive)] + pub struct JsArchive(Archive); + + #[wasm_bindgen(js_class = Archive)] + impl JsArchive { + /// Create a new archive. + #[wasm_bindgen(constructor)] + pub fn new() -> Self { + Self(Archive::new()) + } + + /// Add a new file to the archive. + #[wasm_bindgen(js_name = addNewFile)] + pub fn add_new_file(&mut self, path: String, data_addr: String) -> Result<(), JsError> { + let path = PathBuf::from(path); + let data_addr = str_to_addr(&data_addr)?; + self.0.add_new_file(path, data_addr); + + Ok(()) + } + + #[wasm_bindgen(js_name = renameFile)] + pub fn rename_file(&mut self, old_path: String, new_path: String) -> Result<(), JsError> { + let old_path = PathBuf::from(old_path); + let new_path = PathBuf::from(new_path); + self.0.rename_file(&old_path, &new_path)?; + + Ok(()) + } + + #[wasm_bindgen] + pub fn map(&self) -> Result { + let files = serde_wasm_bindgen::to_value(self.0.map())?; + Ok(files) + } + } + + #[wasm_bindgen(js_class = Client)] + impl JsClient { + /// Fetch an archive from the network. + #[wasm_bindgen(js_name = archiveGet)] + pub async fn archive_get(&self, addr: String) -> Result { + let addr = str_to_addr(&addr)?; + let archive = self.0.archive_get(addr).await?; + let archive = JsArchive(archive); + + Ok(archive) + } + + /// Upload an archive to the network. + /// + /// Returns the hex encoded address of the archive. + #[wasm_bindgen(js_name = archivePut)] + pub async fn archive_put( + &self, + archive: &JsArchive, + wallet: &JsWallet, + ) -> Result { + let addr = self.0.archive_put(archive.0.clone(), &wallet.0).await?; + + Ok(addr_to_str(addr)) + } + } +} + +#[cfg(feature = "vault")] +mod vault { + use super::*; + + /// Structure to keep track of uploaded archives, registers and other data. + #[wasm_bindgen(js_name = UserData)] + pub struct JsUserData(UserData); + + #[wasm_bindgen(js_class = UserData)] + impl JsUserData { + /// Create a new user data structure. + #[wasm_bindgen(constructor)] + pub fn new() -> Self { + Self(UserData::new()) + } + + /// Store an archive address in the user data with an optional name. + /// + /// # Example + /// + /// ```js + /// userData.addFileArchive(archiveAddr, "foo"); + /// ``` + #[wasm_bindgen(js_name = addFileArchive)] + pub fn add_file_archive( + &mut self, + archive: String, + name: Option, + ) -> Result<(), JsError> { + let archive = str_to_addr(&archive)?; + + let old_name = if let Some(ref name) = name { + self.0.add_file_archive_with_name(archive, name.clone()) + } else { + self.0.add_file_archive(archive) + }; + + if let Some(old_name) = old_name { + tracing::warn!( + "Changing name of archive `{archive}` from `{old_name:?}` to `{name:?}`" + ); + } + + Ok(()) + } + + #[wasm_bindgen(js_name = removeFileArchive)] + pub fn remove_file_archive(&mut self, archive: String) -> Result<(), JsError> { + let archive = str_to_addr(&archive)?; + self.0.remove_file_archive(archive); + + Ok(()) + } + + #[wasm_bindgen(js_name = fileArchives)] + pub fn file_archives(&self) -> Result { + let archives = serde_wasm_bindgen::to_value(&self.0.file_archives)?; + Ok(archives) + } + } + + #[wasm_bindgen(js_class = Client)] + impl JsClient { + /// Fetch the user data from the vault. + /// + /// # Example + /// + /// ```js + /// const secretKey = genSecretKey(); + /// const userData = await client.getUserDataFromVault(secretKey); + /// ``` + #[wasm_bindgen(js_name = getUserDataFromVault)] + pub async fn get_user_data_from_vault( + &self, + secret_key: &SecretKeyJs, + ) -> Result { + let user_data = self.0.get_user_data_from_vault(&secret_key.0).await?; + + Ok(JsUserData(user_data)) + } + + /// Put the user data to the vault. + /// + /// # Example + /// + /// ```js + /// const secretKey = genSecretKey(); + /// await client.putUserDataToVault(userData, wallet, secretKey); + /// ``` + #[wasm_bindgen(js_name = putUserDataToVault)] + pub async fn put_user_data_to_vault( + &self, + user_data: &JsUserData, + wallet: &JsWallet, + secret_key: &SecretKeyJs, + ) -> Result<(), JsError> { + self.0 + .put_user_data_to_vault(&secret_key.0, &wallet.0, user_data.0.clone()) + .await?; + + Ok(()) + } + } +} + +#[cfg(feature = "external-signer")] +mod external_signer { + use super::*; + use crate::payment_proof_from_quotes_and_payments; + use sn_evm::external_signer::{approve_to_spend_tokens_calldata, pay_for_quotes_calldata}; + use sn_evm::EvmNetwork; + use sn_evm::ProofOfPayment; + use sn_evm::QuotePayment; + use sn_evm::{Amount, PaymentQuote}; + use sn_evm::{EvmAddress, QuoteHash, TxHash}; + use std::collections::{BTreeMap, HashMap}; + use wasm_bindgen::prelude::wasm_bindgen; + use wasm_bindgen::{JsError, JsValue}; + use xor_name::XorName; + + #[wasm_bindgen(js_class = Client)] + impl JsClient { + /// Get quotes for given data. + /// + /// # Example + /// + /// ```js + /// const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); + /// `` + #[wasm_bindgen(js_name = getQuotes)] + pub async fn get_quotes_for_data(&self, data: Vec) -> Result { + let data = crate::Bytes::from(data); + let result = self.0.get_quotes_for_data(data).await?; + let js_value = serde_wasm_bindgen::to_value(&result)?; + Ok(js_value) + } + + /// Upload data with a proof of payment. + /// + /// # Example + /// + /// ```js + /// const proof = getPaymentProofFromQuotesAndPayments(quotes, payments); + /// const addr = await client.dataPutWithProof(data, proof); + /// ``` + #[wasm_bindgen(js_name = dataPutWithProof)] + pub async fn data_put_with_proof_of_payment( + &self, + data: Vec, + proof: JsValue, + ) -> Result { + let data = crate::Bytes::from(data); + let proof: HashMap = serde_wasm_bindgen::from_value(proof)?; + let xorname = self.0.data_put_with_proof_of_payment(data, proof).await?; + Ok(addr_to_str(xorname)) + } + } + + /// Get the calldata for paying for quotes. + /// + /// # Example + /// + /// ```js + /// const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); + /// const callData = getPayForQuotesCalldata(evmNetwork, quotePayments); + /// ``` + #[wasm_bindgen(js_name = getPayForQuotesCalldata)] + pub fn get_pay_for_quotes_calldata( + network: JsValue, + payments: JsValue, + ) -> Result { + let network: EvmNetwork = serde_wasm_bindgen::from_value(network)?; + let payments: Vec = serde_wasm_bindgen::from_value(payments)?; + let calldata = pay_for_quotes_calldata(&network, payments.into_iter())?; + let js_value = serde_wasm_bindgen::to_value(&calldata)?; + Ok(js_value) + } + + /// Form approve to spend tokens calldata. + #[wasm_bindgen(js_name = getApproveToSpendTokensCalldata)] + pub fn get_approve_to_spend_tokens_calldata( + network: JsValue, + spender: JsValue, + amount: JsValue, + ) -> Result { + let network: EvmNetwork = serde_wasm_bindgen::from_value(network)?; + let spender: EvmAddress = serde_wasm_bindgen::from_value(spender)?; + let amount: Amount = serde_wasm_bindgen::from_value(amount)?; + let calldata = approve_to_spend_tokens_calldata(&network, spender, amount); + let js_value = serde_wasm_bindgen::to_value(&calldata)?; + Ok(js_value) + } + + /// Generate payment proof. + #[wasm_bindgen(js_name = getPaymentProofFromQuotesAndPayments)] + pub fn get_payment_proof_from_quotes_and_payments( + quotes: JsValue, + payments: JsValue, + ) -> Result { + let quotes: HashMap = serde_wasm_bindgen::from_value(quotes)?; + let payments: BTreeMap = serde_wasm_bindgen::from_value(payments)?; + let proof = payment_proof_from_quotes_and_payments("es, &payments); + let js_value = serde_wasm_bindgen::to_value(&proof)?; + Ok(js_value) + } +} + +#[wasm_bindgen(js_name = SecretKey)] +pub struct SecretKeyJs(bls::SecretKey); + +/// # Example +/// +/// ```js +/// const secretKey = genSecretKey(); +/// await client.putUserDataToVault(userData, wallet, secretKey); +/// const userDataFetched = await client.getUserDataFromVault(secretKey); +/// ``` +#[wasm_bindgen(js_name = genSecretKey)] +pub fn gen_secret_key() -> SecretKeyJs { + let secret_key = bls::SecretKey::random(); + SecretKeyJs(secret_key) +} + +/// Get the current `EvmNetwork` that was set using environment variables that were used during the build process of this library. +#[wasm_bindgen(js_name = getEvmNetwork)] +pub fn evm_network() -> Result { + let evm_network = evmlib::utils::get_evm_network_from_env()?; + let js_value = serde_wasm_bindgen::to_value(&evm_network)?; + Ok(js_value) +} + +#[wasm_bindgen(js_name = Wallet)] +pub struct JsWallet(evmlib::wallet::Wallet); /// Get a funded wallet for testing. This either uses a default private key or the `EVM_PRIVATE_KEY` /// environment variable that was used during the build process of this library. #[wasm_bindgen(js_name = getFundedWallet)] -pub fn funded_wallet() -> Wallet { +pub fn funded_wallet() -> JsWallet { let network = evmlib::utils::get_evm_network_from_env() .expect("Failed to get EVM network from environment variables"); if matches!(network, evmlib::Network::ArbitrumOne) { @@ -95,13 +428,19 @@ pub fn funded_wallet() -> Wallet { let wallet = evmlib::wallet::Wallet::new_from_private_key(network, &private_key) .expect("Invalid private key"); - Wallet(wallet) + JsWallet(wallet) } /// Enable tracing logging in the console. /// /// A level could be passed like `trace` or `warn`. Or set for a specific module/crate /// with `sn_networking=trace,autonomi=info`. +/// +/// # Example +/// +/// ```js +/// logInit("sn_networking=warn,autonomi=trace"); +/// ``` #[wasm_bindgen(js_name = logInit)] pub fn log_init(directive: String) { use tracing_subscriber::prelude::*; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index abfbd7563a..c73bef1378 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -35,11 +35,14 @@ extern crate tracing; pub mod client; #[cfg(feature = "data")] mod self_encryption; +mod utils; pub use sn_evm::get_evm_network_from_env; pub use sn_evm::EvmNetwork; pub use sn_evm::EvmWallet as Wallet; pub use sn_evm::RewardsAddress; +#[cfg(feature = "external-signer")] +pub use utils::payment_proof_from_quotes_and_payments; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; diff --git a/autonomi/src/utils.rs b/autonomi/src/utils.rs new file mode 100644 index 0000000000..fc9ceb7718 --- /dev/null +++ b/autonomi/src/utils.rs @@ -0,0 +1,23 @@ +use sn_evm::{PaymentQuote, ProofOfPayment, QuoteHash, TxHash}; +use std::collections::{BTreeMap, HashMap}; +use xor_name::XorName; + +pub fn payment_proof_from_quotes_and_payments( + quotes: &HashMap, + payments: &BTreeMap, +) -> HashMap { + quotes + .iter() + .filter_map(|(xor_name, quote)| { + payments.get("e.hash()).map(|tx_hash| { + ( + *xor_name, + ProofOfPayment { + quote: quote.clone(), + tx_hash: *tx_hash, + }, + ) + }) + }) + .collect() +} diff --git a/autonomi/tests-js/.gitignore b/autonomi/tests-js/.gitignore new file mode 100644 index 0000000000..b512c09d47 --- /dev/null +++ b/autonomi/tests-js/.gitignore @@ -0,0 +1 @@ +node_modules \ No newline at end of file diff --git a/autonomi/tests-js/index.html b/autonomi/tests-js/index.html new file mode 100644 index 0000000000..a24d82ad63 --- /dev/null +++ b/autonomi/tests-js/index.html @@ -0,0 +1,36 @@ + + + + + + Mocha Tests + + + + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/autonomi/tests-js/index.js b/autonomi/tests-js/index.js new file mode 100644 index 0000000000..1dd1dffac0 --- /dev/null +++ b/autonomi/tests-js/index.js @@ -0,0 +1,73 @@ +import init, * as atnm from '../pkg/autonomi.js'; +import { assert } from './node_modules/chai/chai.js'; + +function randomData(len) { + const array = new Uint8Array(len); + window.crypto.getRandomValues(array); + return array; +} + +describe('autonomi', function () { + this.timeout(180 * 1000); + + let client; + let wallet; + before(async () => { + await init(); + atnm.logInit("sn_networking=warn,autonomi=trace"); + client = await atnm.Client.connect([window.peer_addr]); + wallet = atnm.getFundedWallet(); + }); + + it('calculates cost', async () => { + const data = randomData(32); + const cost = await client.dataCost(data); + + assert.typeOf(Number.parseFloat(cost.toString()), 'number'); + }); + + it('puts data (32 bytes)', async () => { + const data = randomData(32); + const addr = await client.dataPut(data, wallet); + + assert.typeOf(addr, 'string'); + }); + + it('puts data and gets it (32 bytes)', async () => { + const data = randomData(32); + const addr = await client.dataPut(data, wallet); + const fetchedData = await client.dataGet(addr); + + assert.deepEqual(Array.from(data), Array.from(fetchedData)); + }); + + it('puts data, creates archive and retrieves it', async () => { + const data = randomData(32); + const addr = await client.dataPut(data, wallet); + const archive = new atnm.Archive(); + archive.addNewFile("foo", addr); + const archiveAddr = await client.archivePut(archive, wallet); + + const archiveFetched = await client.archiveGet(archiveAddr); + + assert.deepEqual(archive, archiveFetched); + }); + + it('writes archive to vault and fetches it', async () => { + const addr = "0000000000000000000000000000000000000000000000000000000000000000"; // Dummy data address + const data = randomData(32); + const secretKey = atnm.genSecretKey(); + + const archive = new atnm.Archive(); + archive.addNewFile('foo', addr); + const archiveAddr = await client.archivePut(archive, wallet); + + const userData = new atnm.UserData(); + userData.addFileArchive(archiveAddr, 'foo'); + + await client.putUserDataToVault(userData, wallet, secretKey); + const userDataFetched = await client.getUserDataFromVault(secretKey); + + assert.deepEqual(userDataFetched.fileArchives(), userData.fileArchives()); + }); +}); diff --git a/autonomi/tests-js/package-lock.json b/autonomi/tests-js/package-lock.json new file mode 100644 index 0000000000..61daae0de2 --- /dev/null +++ b/autonomi/tests-js/package-lock.json @@ -0,0 +1,1481 @@ +{ + "name": "tests-js", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "devDependencies": { + "chai": "^5.1.1", + "http-server": "^14.1.1", + "mocha": "^10.7.3" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/basic-auth": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", + "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/basic-auth/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true, + "license": "ISC" + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chai": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.1.tgz", + "integrity": "sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/corser": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/corser/-/corser-2.0.1.tgz", + "integrity": "sha512-utCYNzRSQIZNPIcGZdQc92UVJYAhtGAteCFg0yRaFm8f0P+CPtyGyHXJcGXnffjCybUCEx3FQ2G7U3/o9eIkVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/glob": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-server": { + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/http-server/-/http-server-14.1.1.tgz", + "integrity": "sha512-+cbxadF40UXd9T01zUHgA+rlo2Bg1Srer4+B4NwIHdaGxAGGv59nYRnGGDJ9LBk7alpS0US+J+bLLdQOOkJq4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "basic-auth": "^2.0.1", + "chalk": "^4.1.2", + "corser": "^2.0.1", + "he": "^1.2.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy": "^1.18.1", + "mime": "^1.6.0", + "minimist": "^1.2.6", + "opener": "^1.5.1", + "portfinder": "^1.0.28", + "secure-compare": "3.0.1", + "union": "~0.5.0", + "url-join": "^4.0.1" + }, + "bin": { + "http-server": "bin/http-server" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/loupe": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz", + "integrity": "sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg==", + "dev": true, + "license": "MIT" + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "dev": true, + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/mocha": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.7.3.tgz", + "integrity": "sha512-uQWxAu44wwiACGqjbPYmjo7Lg8sFrS3dQe7PP2FQI+woptP4vZXSMcfMyFL/e1yFEeEpV4RtyTpZROOKmxis+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.3", + "browser-stdout": "^1.3.1", + "chokidar": "^3.5.3", + "debug": "^4.3.5", + "diff": "^5.2.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^8.1.0", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^16.2.0", + "yargs-parser": "^20.2.9", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "dev": true, + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathval": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", + "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/portfinder": { + "version": "1.0.32", + "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.32.tgz", + "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "async": "^2.6.4", + "debug": "^3.2.7", + "mkdirp": "^0.5.6" + }, + "engines": { + "node": ">= 0.12.0" + } + }, + "node_modules/portfinder/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/secure-compare": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/secure-compare/-/secure-compare-3.0.1.tgz", + "integrity": "sha512-AckIIV90rPDcBcglUwXPF3kg0P0qmPsPXAj6BBEENQE1p5yA1xfmDJzfi1Tappj37Pv2mVbKpL3Z1T+Nn7k1Qw==", + "dev": true, + "license": "MIT" + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/union": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/union/-/union-0.5.0.tgz", + "integrity": "sha512-N6uOhuW6zO95P3Mel2I2zMsbsanvvtgn6jVqJv4vbVcz/JN0OkL9suomjQGmWtxJQXOCqUJvquc1sMeNz/IwlA==", + "dev": true, + "dependencies": { + "qs": "^6.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "dev": true, + "license": "MIT" + }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/workerpool": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", + "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/autonomi/tests-js/package.json b/autonomi/tests-js/package.json new file mode 100644 index 0000000000..6da24b1037 --- /dev/null +++ b/autonomi/tests-js/package.json @@ -0,0 +1,11 @@ +{ + "type": "module", + "scripts": { + "serve": "http-server -c-1 -a 127.0.0.1 ../" + }, + "devDependencies": { + "chai": "^5.1.1", + "http-server": "^14.1.1", + "mocha": "^10.7.3" + } +} \ No newline at end of file diff --git a/autonomi/tests/external_signer.rs b/autonomi/tests/external_signer.rs new file mode 100644 index 0000000000..d97107cb39 --- /dev/null +++ b/autonomi/tests/external_signer.rs @@ -0,0 +1,91 @@ +#![cfg(feature = "external-signer")] + +use alloy::network::TransactionBuilder; +use alloy::providers::Provider; +use autonomi::Client; +use sn_evm::{QuoteHash, TxHash}; +use sn_logging::LogBuilder; +use std::collections::BTreeMap; +use std::time::Duration; +use test_utils::evm::get_funded_wallet; +use test_utils::{gen_random_data, peers_from_env}; +use tokio::time::sleep; + +// Example of how put would be done using external signers. +#[tokio::test] +async fn external_signer_put() -> eyre::Result<()> { + let _log_appender_guard = + LogBuilder::init_single_threaded_tokio_test("external_signer_put", false); + + let client = Client::connect(&peers_from_env()?).await?; + let wallet = get_funded_wallet(); + let data = gen_random_data(1024 * 1024 * 10); + + let (quotes, quote_payments, _free_chunks) = client.get_quotes_for_data(data.clone()).await?; + + // Form quotes payment transaction data + let pay_for_quotes_calldata = autonomi::client::external_signer::pay_for_quotes_calldata( + wallet.network(), + quote_payments.into_iter(), + )?; + + // Init an external wallet provider. In the webapp, this would be MetaMask for example + let provider = wallet.to_provider(); + + // Form approve to spend tokens transaction data + let approve_calldata = autonomi::client::external_signer::approve_to_spend_tokens_calldata( + wallet.network(), + pay_for_quotes_calldata.approve_spender, + pay_for_quotes_calldata.approve_amount, + ); + + // Prepare approve to spend tokens transaction + let transaction_request = provider + .transaction_request() + .with_to(approve_calldata.1) + .with_input(approve_calldata.0); + + // Send approve to spend tokens transaction + let _tx_hash = provider + .send_transaction(transaction_request) + .await? + .watch() + .await?; + + let mut payments: BTreeMap = Default::default(); + + // Execute all quote payment transactions in batches + for (calldata, quote_hashes) in pay_for_quotes_calldata.batched_calldata_map { + // Prepare batched quote payments transaction + let transaction_request = provider + .transaction_request() + .with_to(pay_for_quotes_calldata.to) + .with_input(calldata); + + // Send batched quote payments transaction + let tx_hash = provider + .send_transaction(transaction_request) + .await? + .watch() + .await?; + + // Add to payments to be later use to construct the proofs + for quote_hash in quote_hashes { + payments.insert(quote_hash, tx_hash); + } + } + + // Payment proofs + let proofs = autonomi::payment_proof_from_quotes_and_payments("es, &payments); + + let addr = client + .data_put_with_proof_of_payment(data.clone(), proofs) + .await?; + + sleep(Duration::from_secs(10)).await; + + let data_fetched = client.data_get(addr).await?; + assert_eq!(data, data_fetched, "data fetched should match data put"); + + Ok(()) +} diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index 5b1fce533b..b952852bc2 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -81,8 +81,8 @@ fn compute_dir_sha256(dir: &str) -> Result { async fn file_into_vault() -> Result<()> { let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("file", false); - let mut client = Client::connect(&peers_from_env()?).await?; - let mut wallet = get_funded_wallet(); + let client = Client::connect(&peers_from_env()?).await?; + let wallet = get_funded_wallet(); let client_sk = bls::SecretKey::random(); let addr = client @@ -91,23 +91,22 @@ async fn file_into_vault() -> Result<()> { sleep(Duration::from_secs(2)).await; let archive = client.archive_get(addr).await?; + let set_version = 0; client - .write_bytes_to_vault(archive.into_bytes()?, &mut wallet, &client_sk) + .write_bytes_to_vault(archive.into_bytes()?, &wallet, &client_sk, set_version) .await?; // now assert over the stored account packet let new_client = Client::connect(&[]).await?; - if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { - let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; + let (ap, got_version) = new_client.fetch_and_decrypt_vault(&client_sk).await?; + assert_eq!(set_version, got_version); + let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; - assert_eq!( - archive.map, ap_archive_fetched.map, - "archive fetched should match archive put" - ); - } else { - eyre::bail!("No account packet found"); - } + assert_eq!( + archive, ap_archive_fetched, + "archive fetched should match archive put" + ); Ok(()) } diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index 485193ea48..8f27576f06 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -21,15 +21,15 @@ wasm_bindgen_test_configure!(run_in_browser); async fn put() -> Result<(), Box> { enable_logging_wasm("sn_networking,autonomi,wasm"); - let client = Client::connect(&peers_from_env()?).await.unwrap(); + let client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); + let data = gen_random_data(1024 * 1024 * 10); - let data = gen_random_data(1024 * 1024 * 2); // 2MiB - let addr = client.put(data.clone(), &wallet).await.unwrap(); + let addr = client.data_put(data.clone(), &wallet).await?; - sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(10)).await; - let data_fetched = client.get(addr).await.unwrap(); + let data_fetched = client.data_get(addr).await?; assert_eq!(data, data_fetched, "data fetched should match data put"); Ok(()) diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 6712604130..5182f2eca7 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1" +version = "0.1.2" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +evmlib = { path = "../evmlib", version = "0.1.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 8aa1dd027f..23c6a35e45 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,14 +6,15 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1" +version = "0.1.2" [features] wasm-bindgen = ["alloy/wasm-bindgen"] local = [] +external-signer = [] [dependencies] -alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } dirs-next = "~2.0.0" serde = "=1.0.210" serde_with = { version = "3.11.0", features = ["macros"] } diff --git a/evmlib/artifacts/AutonomiNetworkToken.json b/evmlib/artifacts/AutonomiNetworkToken.json index b075133e1c..841ed5d678 100644 --- a/evmlib/artifacts/AutonomiNetworkToken.json +++ b/evmlib/artifacts/AutonomiNetworkToken.json @@ -890,8 +890,8 @@ "type": "function" } ], - "bytecode": "0x6101606040523480156200001257600080fd5b506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e00000000000000000000000081525080604051806040016040528060018152602001603160f81b8152506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e0000000000000000000000008152506040518060400160405280600381526020016210539560ea1b8152508160039081620000c79190620009b5565b506004620000d68282620009b5565b50620000e891508390506005620001c0565b61012052620000f9816006620001c0565b61014052815160208084019190912060e052815190820120610100524660a0526200018760e05161010051604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201529081019290925260608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b60805250503060c05250620001ba33620001a46012600a62000b94565b620001b4906301312d0062000ba5565b620001f9565b62000cae565b6000602083511015620001e057620001d8836200023b565b9050620001f3565b81620001ed8482620009b5565b5060ff90505b92915050565b6001600160a01b038216620002295760405163ec442f0560e01b8152600060048201526024015b60405180910390fd5b62000237600083836200027e565b5050565b600080829050601f8151111562000269578260405163305a27a960e01b815260040162000220919062000bbf565b8051620002768262000c10565b179392505050565b6200028b83838362000290565b505050565b6200029d838383620002ff565b6001600160a01b038316620002f2576000620002b860025490565b90506001600160d01b0380821115620002ef57604051630e58ae9360e11b8152600481018390526024810182905260440162000220565b50505b6200028b83838362000432565b6001600160a01b0383166200032e57806002600082825462000322919062000c35565b90915550620003a29050565b6001600160a01b03831660009081526020819052604090205481811015620003835760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640162000220565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b038216620003c057600280548290039055620003df565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516200042591815260200190565b60405180910390a3505050565b6001600160a01b038316620004675762000464600a62000953620004ca60201b176200045e84620004df565b62000519565b50505b6001600160a01b038216620004965762000493600a6200095f6200055660201b176200045e84620004df565b50505b6001600160a01b038381166000908152600860205260408082205485841683529120546200028b9291821691168362000564565b6000620004d8828462000c4b565b9392505050565b60006001600160d01b0382111562000515576040516306dfcc6560e41b815260d060048201526024810183905260440162000220565b5090565b600080620005496200052a620006cb565b620005406200053988620006dc565b868860201c565b8791906200072b565b915091505b935093915050565b6000620004d8828462000c75565b816001600160a01b0316836001600160a01b031614158015620005875750600081115b156200028b576001600160a01b038316156200062a576001600160a01b038316600090815260096020908152604082208291620005d5919062000556901b6200095f176200045e86620004df565b6001600160d01b031691506001600160d01b03169150846001600160a01b031660008051602062002bda83398151915283836040516200061f929190918252602082015260400190565b60405180910390a250505b6001600160a01b038216156200028b576001600160a01b038216600090815260096020908152604082208291620006729190620004ca901b62000953176200045e86620004df565b6001600160d01b031691506001600160d01b03169150836001600160a01b031660008051602062002bda8339815191528383604051620006bc929190918252602082015260400190565b60405180910390a25050505050565b6000620006d76200073b565b905090565b8054600090801562000722576200070883620006fa60018462000c98565b600091825260209091200190565b54660100000000000090046001600160d01b0316620004d8565b60009392505050565b6000806200054985858562000748565b6000620006d743620008da565b8254600090819080156200087b5760006200076a87620006fa60018562000c98565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015620007c257604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff808816911603620008165784620007e988620006fa60018662000c98565b80546001600160d01b039290921666010000000000000265ffffffffffff9092169190911790556200086a565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506200054e9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816200054e565b600065ffffffffffff82111562000515576040516306dfcc6560e41b8152603060048201526024810183905260440162000220565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200093a57607f821691505b6020821081036200095b57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028b576000816000526020600020601f850160051c810160208610156200098c5750805b601f850160051c820191505b81811015620009ad5782815560010162000998565b505050505050565b81516001600160401b03811115620009d157620009d16200090f565b620009e981620009e2845462000925565b8462000961565b602080601f83116001811462000a21576000841562000a085750858301515b600019600386901b1c1916600185901b178555620009ad565b600085815260208120601f198616915b8281101562000a525788860151825594840194600190910190840162000a31565b508582101562000a715787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052601160045260246000fd5b600181815b8085111562000ad857816000190482111562000abc5762000abc62000a81565b8085161562000aca57918102915b93841c939080029062000a9c565b509250929050565b60008262000af157506001620001f3565b8162000b0057506000620001f3565b816001811462000b19576002811462000b245762000b44565b6001915050620001f3565b60ff84111562000b385762000b3862000a81565b50506001821b620001f3565b5060208310610133831016604e8410600b841016171562000b69575081810a620001f3565b62000b75838362000a97565b806000190482111562000b8c5762000b8c62000a81565b029392505050565b6000620004d860ff84168362000ae0565b8082028115828204841417620001f357620001f362000a81565b60006020808352835180602085015260005b8181101562000bef5785810183015185820160400152820162000bd1565b506000604082860101526040601f19601f8301168501019250505092915050565b805160208083015191908110156200095b5760001960209190910360031b1b16919050565b80820180821115620001f357620001f362000a81565b6001600160d01b0381811683821601908082111562000c6e5762000c6e62000a81565b5092915050565b6001600160d01b0382811682821603908082111562000c6e5762000c6e62000a81565b81810381811115620001f357620001f362000a81565b60805160a05160c05160e051610100516101205161014051611ed162000d096000396000610d9901526000610d6c01526000610b3401526000610b0c01526000610a6701526000610a9101526000610abb0152611ed16000f3fe608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033dec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a724", - "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033", + "bytecode": "0x6101606040523480156200001257600080fd5b506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e00000000000000000000000081525080604051806040016040528060018152602001603160f81b8152506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e0000000000000000000000008152506040518060400160405280600381526020016210539560ea1b8152508160039081620000c79190620009b4565b506004620000d68282620009b4565b50620000e891508390506005620001bf565b61012052620000f9816006620001bf565b61014052815160208084019190912060e052815190820120610100524660a0526200018760e05161010051604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201529081019290925260608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b60805250503060c05250620001b933620001a46012600a62000b93565b620001b390622625a062000ba4565b620001f8565b62000cad565b6000602083511015620001df57620001d7836200023a565b9050620001f2565b81620001ec8482620009b4565b5060ff90505b92915050565b6001600160a01b038216620002285760405163ec442f0560e01b8152600060048201526024015b60405180910390fd5b62000236600083836200027d565b5050565b600080829050601f8151111562000268578260405163305a27a960e01b81526004016200021f919062000bbe565b8051620002758262000c0f565b179392505050565b6200028a8383836200028f565b505050565b6200029c838383620002fe565b6001600160a01b038316620002f1576000620002b760025490565b90506001600160d01b0380821115620002ee57604051630e58ae9360e11b815260048101839052602481018290526044016200021f565b50505b6200028a83838362000431565b6001600160a01b0383166200032d57806002600082825462000321919062000c34565b90915550620003a19050565b6001600160a01b03831660009081526020819052604090205481811015620003825760405163391434e360e21b81526001600160a01b038516600482015260248101829052604481018390526064016200021f565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b038216620003bf57600280548290039055620003de565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516200042491815260200190565b60405180910390a3505050565b6001600160a01b038316620004665762000463600a62000953620004c960201b176200045d84620004de565b62000518565b50505b6001600160a01b038216620004955762000492600a6200095f6200055560201b176200045d84620004de565b50505b6001600160a01b038381166000908152600860205260408082205485841683529120546200028a9291821691168362000563565b6000620004d7828462000c4a565b9392505050565b60006001600160d01b0382111562000514576040516306dfcc6560e41b815260d06004820152602481018390526044016200021f565b5090565b6000806200054862000529620006ca565b6200053f6200053888620006db565b868860201c565b8791906200072a565b915091505b935093915050565b6000620004d7828462000c74565b816001600160a01b0316836001600160a01b031614158015620005865750600081115b156200028a576001600160a01b0383161562000629576001600160a01b038316600090815260096020908152604082208291620005d4919062000555901b6200095f176200045d86620004de565b6001600160d01b031691506001600160d01b03169150846001600160a01b031660008051602062002bd983398151915283836040516200061e929190918252602082015260400190565b60405180910390a250505b6001600160a01b038216156200028a576001600160a01b038216600090815260096020908152604082208291620006719190620004c9901b62000953176200045d86620004de565b6001600160d01b031691506001600160d01b03169150836001600160a01b031660008051602062002bd98339815191528383604051620006bb929190918252602082015260400190565b60405180910390a25050505050565b6000620006d66200073a565b905090565b8054600090801562000721576200070783620006f960018462000c97565b600091825260209091200190565b54660100000000000090046001600160d01b0316620004d7565b60009392505050565b6000806200054885858562000747565b6000620006d643620008d9565b8254600090819080156200087a5760006200076987620006f960018562000c97565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015620007c157604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff808816911603620008155784620007e888620006f960018662000c97565b80546001600160d01b039290921666010000000000000265ffffffffffff90921691909117905562000869565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506200054d9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816200054d565b600065ffffffffffff82111562000514576040516306dfcc6560e41b815260306004820152602481018390526044016200021f565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200093957607f821691505b6020821081036200095a57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028a576000816000526020600020601f850160051c810160208610156200098b5750805b601f850160051c820191505b81811015620009ac5782815560010162000997565b505050505050565b81516001600160401b03811115620009d057620009d06200090e565b620009e881620009e1845462000924565b8462000960565b602080601f83116001811462000a20576000841562000a075750858301515b600019600386901b1c1916600185901b178555620009ac565b600085815260208120601f198616915b8281101562000a515788860151825594840194600190910190840162000a30565b508582101562000a705787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052601160045260246000fd5b600181815b8085111562000ad757816000190482111562000abb5762000abb62000a80565b8085161562000ac957918102915b93841c939080029062000a9b565b509250929050565b60008262000af057506001620001f2565b8162000aff57506000620001f2565b816001811462000b18576002811462000b235762000b43565b6001915050620001f2565b60ff84111562000b375762000b3762000a80565b50506001821b620001f2565b5060208310610133831016604e8410600b841016171562000b68575081810a620001f2565b62000b74838362000a96565b806000190482111562000b8b5762000b8b62000a80565b029392505050565b6000620004d760ff84168362000adf565b8082028115828204841417620001f257620001f262000a80565b60006020808352835180602085015260005b8181101562000bee5785810183015185820160400152820162000bd0565b506000604082860101526040601f19601f8301168501019250505092915050565b805160208083015191908110156200095a5760001960209190910360031b1b16919050565b80820180821115620001f257620001f262000a80565b6001600160d01b0381811683821601908082111562000c6d5762000c6d62000a80565b5092915050565b6001600160d01b0382811682821603908082111562000c6d5762000c6d62000a80565b81810381811115620001f257620001f262000a80565b60805160a05160c05160e051610100516101205161014051611ed162000d086000396000610d9901526000610d6c01526000610b3401526000610b0c01526000610a6701526000610a9101526000610abb0152611ed16000f3fe608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220e6593b092e1a24b35f83124c9f1435eef683cc4ae5be2f7a133072dc046158f264736f6c63430008180033dec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a724", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220e6593b092e1a24b35f83124c9f1435eef683cc4ae5be2f7a133072dc046158f264736f6c63430008180033", "linkReferences": {}, "deployedLinkReferences": {} -} +} \ No newline at end of file diff --git a/evmlib/src/common.rs b/evmlib/src/common.rs index af210f9285..88d8855245 100644 --- a/evmlib/src/common.rs +++ b/evmlib/src/common.rs @@ -16,3 +16,4 @@ pub type QuoteHash = Hash; pub type Amount = U256; pub type QuotePayment = (QuoteHash, Address, Amount); pub type EthereumWallet = alloy::network::EthereumWallet; +pub type Calldata = alloy::primitives::Bytes; diff --git a/evmlib/src/contract/data_payments/mod.rs b/evmlib/src/contract/data_payments/mod.rs index 352f294581..79f90f9b04 100644 --- a/evmlib/src/contract/data_payments/mod.rs +++ b/evmlib/src/contract/data_payments/mod.rs @@ -9,9 +9,10 @@ pub mod error; use crate::common; -use crate::common::{Address, TxHash}; +use crate::common::{Address, Calldata, TxHash}; use crate::contract::data_payments::error::Error; use crate::contract::data_payments::DataPaymentsContract::DataPaymentsContractInstance; +use alloy::network::TransactionBuilder; use alloy::providers::{Network, Provider}; use alloy::sol; use alloy::transports::Transport; @@ -64,6 +65,33 @@ where &self, data_payments: I, ) -> Result { + let (calldata, to) = self.pay_for_quotes_calldata(data_payments)?; + + let transaction_request = self + .contract + .provider() + .transaction_request() + .with_to(to) + .with_input(calldata); + + let tx_hash = self + .contract + .provider() + .send_transaction(transaction_request) + .await? + .watch() + .await?; + + Ok(tx_hash) + } + + /// Pay for quotes. + /// Input: (quote_id, reward_address, amount). + /// Returns the transaction calldata. + pub fn pay_for_quotes_calldata>( + &self, + data_payments: I, + ) -> Result<(Calldata, Address), Error> { let data_payments: Vec = data_payments .into_iter() .map(|(hash, addr, amount)| DataPayments::DataPayment { @@ -74,26 +102,15 @@ where .collect(); if data_payments.len() > MAX_TRANSFERS_PER_TRANSACTION { - error!( - "Data payments limit exceeded: {} > {}", - data_payments.len(), - MAX_TRANSFERS_PER_TRANSACTION - ); return Err(Error::TransferLimitExceeded); } - let tx_hash = self + let calldata = self .contract .submitDataPayments(data_payments) - .send() - .await - .inspect_err(|e| error!("Failed to submit data payments during pay_for_quotes: {e:?}"))? - .watch() - .await - .inspect_err(|e| { - error!("Failed to watch data payments during pay_for_quotes: {e:?}") - })?; + .calldata() + .to_owned(); - Ok(tx_hash) + Ok((calldata, *self.contract.address())) } } diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index ce582f2543..10903c9fd2 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -6,8 +6,9 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::common::{Address, TxHash, U256}; +use crate::common::{Address, Calldata, TxHash, U256}; use crate::contract::network_token::NetworkTokenContract::NetworkTokenContractInstance; +use alloy::network::TransactionBuilder; use alloy::providers::{Network, Provider}; use alloy::sol; use alloy::transports::{RpcError, Transport, TransportErrorKind}; @@ -51,7 +52,7 @@ where pub async fn deploy(provider: P) -> Self { let contract = NetworkTokenContract::deploy(provider) .await - .expect("Could not deploy contract"); + .expect("Could not deploy contract, update anvil by running `foundryup` and try again"); NetworkToken { contract } } @@ -91,15 +92,30 @@ where /// Approve spender to spend a raw amount of tokens. pub async fn approve(&self, spender: Address, value: U256) -> Result { debug!("Approving spender to spend raw amt of tokens: {value}"); - let call = self.contract.approve(spender, value); - let pending_tx_builder = call.send().await.inspect_err(|err| { - error!( + let (calldata, to) = self.approve_calldata(spender, value); + + let transaction_request = self + .contract + .provider() + .transaction_request() + .with_to(to) + .with_input(calldata); + + let pending_tx_builder = self + .contract + .provider() + .send_transaction(transaction_request) + .await + .inspect_err(|err| { + error!( "Error approving spender {spender:?} to spend raw amt of tokens {value}: {err:?}" ) - })?; + })?; let pending_tx_hash = *pending_tx_builder.tx_hash(); + debug!("The approval from sender {spender:?} is pending with tx_hash: {pending_tx_hash:?}",); + let tx_hash = pending_tx_builder.watch().await.inspect_err(|err| { error!("Error watching approve tx with hash {pending_tx_hash:?}: {err:?}") })?; @@ -109,13 +125,33 @@ where Ok(tx_hash) } + /// Approve spender to spend a raw amount of tokens. + /// Returns the transaction calldata. + pub fn approve_calldata(&self, spender: Address, value: U256) -> (Calldata, Address) { + let calldata = self.contract.approve(spender, value).calldata().to_owned(); + (calldata, *self.contract.address()) + } + /// Transfer a raw amount of tokens. pub async fn transfer(&self, receiver: Address, amount: U256) -> Result { debug!("Transferring raw amt of tokens: {amount} to {receiver:?}"); - let call = self.contract.transfer(receiver, amount); - let pending_tx_builder = call.send().await.inspect_err(|err| { - error!("Error transferring raw amt of tokens to {receiver:?}: {err:?}") - })?; + let (calldata, to) = self.transfer_calldata(receiver, amount); + + let transaction_request = self + .contract + .provider() + .transaction_request() + .with_to(to) + .with_input(calldata); + + let pending_tx_builder = self + .contract + .provider() + .send_transaction(transaction_request) + .await + .inspect_err(|err| { + error!("Error transferring raw amt of tokens to {receiver:?}: {err:?}") + })?; let pending_tx_hash = *pending_tx_builder.tx_hash(); debug!( @@ -129,4 +165,15 @@ where Ok(tx_hash) } + + /// Transfer a raw amount of tokens. + /// Returns the transaction calldata. + pub fn transfer_calldata(&self, receiver: Address, amount: U256) -> (Calldata, Address) { + let calldata = self + .contract + .transfer(receiver, amount) + .calldata() + .to_owned(); + (calldata, *self.contract.address()) + } } diff --git a/evmlib/src/cryptography.rs b/evmlib/src/cryptography.rs index ddc0149b43..02870942d9 100644 --- a/evmlib/src/cryptography.rs +++ b/evmlib/src/cryptography.rs @@ -8,8 +8,56 @@ use crate::common::Hash; use alloy::primitives::keccak256; +use alloy::signers::k256::ecdsa::{signature, RecoveryId, Signature, SigningKey}; +use alloy::signers::local::PrivateKeySigner; /// Hash data using Keccak256. pub fn hash>(data: T) -> Hash { keccak256(data.as_ref()) } + +/// Sign error +#[derive(Debug, thiserror::Error)] +pub enum SignError { + #[error("Failed to parse EVM secret key: {0}")] + InvalidEvmSecretKey(String), + #[error("Failed to sign message: {0}")] + Signature(#[from] signature::Error), +} + +/// Sign a message with an EVM secret key. +pub fn sign_message(evm_secret_key_str: &str, message: &[u8]) -> Result, SignError> { + let signer: PrivateKeySigner = + evm_secret_key_str + .parse::() + .map_err(|err| { + error!("Error parsing EVM secret key: {err}"); + SignError::InvalidEvmSecretKey(err.to_string()) + })?; + + let message_hash = to_eth_signed_message_hash(message); + let (signature, _) = sign_message_recoverable(&signer.into_credential(), message_hash)?; + Ok(signature.to_vec()) +} + +/// Hash a message using Keccak256, then add the Ethereum prefix and hash it again. +fn to_eth_signed_message_hash>(message: T) -> [u8; 32] { + const PREFIX: &str = "\x19Ethereum Signed Message:\n32"; + + let hashed_message = hash(message); + + let mut eth_message = Vec::with_capacity(PREFIX.len() + 32); + eth_message.extend_from_slice(PREFIX.as_bytes()); + eth_message.extend_from_slice(hashed_message.as_slice()); + + hash(eth_message).into() +} + +/// Sign a message with a recoverable public key. +fn sign_message_recoverable>( + secret_key: &SigningKey, + message: T, +) -> Result<(Signature, RecoveryId), signature::Error> { + let hash = to_eth_signed_message_hash(message); + secret_key.sign_prehash_recoverable(&hash) +} diff --git a/evmlib/src/external_signer.rs b/evmlib/src/external_signer.rs new file mode 100644 index 0000000000..20c3aa95df --- /dev/null +++ b/evmlib/src/external_signer.rs @@ -0,0 +1,96 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::{Address, Amount, Calldata, QuoteHash, QuotePayment, U256}; +use crate::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; +use crate::contract::network_token::NetworkToken; +use crate::contract::{data_payments, network_token}; +use crate::utils::http_provider; +use crate::Network; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Network token contract error: {0}")] + NetworkTokenContract(#[from] network_token::Error), + #[error("Data payments contract error: {0}")] + DataPaymentsContract(#[from] data_payments::error::Error), +} + +/// Approve an address / smart contract to spend this wallet's payment tokens. +/// +/// Returns the transaction calldata (input, to). +pub fn approve_to_spend_tokens_calldata( + network: &Network, + spender: Address, + value: U256, +) -> (Calldata, Address) { + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.approve_calldata(spender, value) +} + +/// Transfer payment tokens from the supplied wallet to an address. +/// +/// Returns the transaction calldata (input, to). +pub fn transfer_tokens_calldata( + network: &Network, + receiver: Address, + amount: U256, +) -> (Calldata, Address) { + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.transfer_calldata(receiver, amount) +} + +#[derive(Serialize, Deserialize)] +pub struct PayForQuotesCalldataReturnType { + pub batched_calldata_map: HashMap>, + pub to: Address, + pub approve_spender: Address, + pub approve_amount: Amount, +} + +/// Use this wallet to pay for chunks in batched transfer transactions. +/// If the amount of transfers is more than one transaction can contain, the transfers will be split up over multiple transactions. +/// +/// Returns PayForQuotesCalldataReturnType, containing calldata of the transaction batches along with the approval details for the spender. +pub fn pay_for_quotes_calldata>( + network: &Network, + payments: T, +) -> Result { + let payments: Vec<_> = payments.into_iter().collect(); + + let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); + + let approve_spender = *network.data_payments_address(); + let approve_amount = total_amount; + + let provider = http_provider(network.rpc_url().clone()); + let data_payments = DataPaymentsHandler::new(*network.data_payments_address(), provider); + + // Divide transfers over multiple transactions if they exceed the max per transaction. + let chunks = payments.chunks(MAX_TRANSFERS_PER_TRANSACTION); + + let mut calldata_map: HashMap> = HashMap::new(); + + for batch in chunks { + let quote_payments = batch.to_vec(); + let (calldata, _) = data_payments.pay_for_quotes_calldata(quote_payments.clone())?; + let quote_hashes = quote_payments.into_iter().map(|(qh, _, _)| qh).collect(); + calldata_map.insert(calldata, quote_hashes); + } + + Ok(PayForQuotesCalldataReturnType { + batched_calldata_map: calldata_map, + to: *data_payments.contract.address(), + approve_spender, + approve_amount, + }) +} diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index fe712e1b27..e0df96d466 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -22,6 +22,8 @@ pub mod common; pub mod contract; pub mod cryptography; pub(crate) mod event; +#[cfg(feature = "external-signer")] +pub mod external_signer; pub mod testnet; pub mod transaction; pub mod utils; @@ -43,14 +45,14 @@ const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); const ARBITRUM_SEPOLIA_PAYMENT_TOKEN_ADDRESS: Address = - address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); + address!("BE1802c27C324a28aeBcd7eeC7D734246C807194"); // Should be updated when the smart contract changes! const ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS: Address = address!("887930F30EDEb1B255Cd2273C3F4400919df2EFe"); const ARBITRUM_SEPOLIA_DATA_PAYMENTS_ADDRESS: Address = - address!("e6D6bB5Fa796baA8c1ADc439Ac0fd66fd2A1858b"); + address!("Dd56b03Dae2Ab8594D80269EC4518D13F1A110BD"); #[serde_as] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index e6f657938b..fbd838843f 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -10,6 +10,12 @@ use crate::common::{Address, Hash}; use crate::{CustomNetwork, Network}; +use alloy::network::Ethereum; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, +}; +use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider}; +use alloy::transports::http::{reqwest, Client, Http}; use dirs_next::data_dir; use rand::Rng; use std::env; @@ -92,9 +98,7 @@ pub fn get_evm_network_from_env() -> Result { .map(|v| v == "arbitrum-sepolia") .unwrap_or(false); - if use_local_evm { - local_evm_network_from_csv() - } else if use_arbitrum_one { + if use_arbitrum_one { info!("Using Arbitrum One EVM network as EVM_NETWORK is set to 'arbitrum-one'"); Ok(Network::ArbitrumOne) } else if use_arbitrum_sepolia { @@ -107,6 +111,8 @@ pub fn get_evm_network_from_env() -> Result { &evm_vars[1], &evm_vars[2], ))) + } else if use_local_evm { + local_evm_network_from_csv() } else { error!("Failed to obtain EVM Network through any means"); Err(Error::FailedToGetEvmNetwork( @@ -143,3 +149,20 @@ fn local_evm_network_from_csv() -> Result { } } } + +#[allow(clippy::type_complexity)] +pub(crate) fn http_provider( + rpc_url: reqwest::Url, +) -> FillProvider< + JoinFill< + Identity, + JoinFill>>, + >, + ReqwestProvider, + Http, + Ethereum, +> { + ProviderBuilder::new() + .with_recommended_fillers() + .on_http(rpc_url) +} diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 9fa3c92ce1..22350b1ff4 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -10,7 +10,9 @@ use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; use crate::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; use crate::contract::network_token::NetworkToken; use crate::contract::{data_payments, network_token}; +use crate::utils::http_provider; use crate::Network; +use alloy::hex::ToHexExt; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; use alloy::providers::fillers::{ BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, @@ -21,6 +23,7 @@ use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use alloy::transports::http::{reqwest, Client, Http}; use alloy::transports::{RpcError, TransportErrorKind}; use std::collections::BTreeMap; +use std::sync::Arc; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -38,12 +41,17 @@ pub enum Error { pub struct Wallet { wallet: EthereumWallet, network: Network, + lock: Arc>, } impl Wallet { /// Creates a new Wallet object with the specific Network and EthereumWallet. pub fn new(network: Network, wallet: EthereumWallet) -> Self { - Self { wallet, network } + Self { + wallet, + network, + lock: Arc::new(tokio::sync::Mutex::new(())), + } } /// Convenience function that creates a new Wallet with a random EthereumWallet. @@ -62,6 +70,11 @@ impl Wallet { wallet_address(&self.wallet) } + /// Returns the `Network` of this wallet. + pub fn network(&self) -> &Network { + &self.network + } + /// Returns the raw balance of payment tokens for this wallet. pub async fn balance_of_tokens(&self) -> Result { balance_of_tokens(self.address(), &self.network).await @@ -125,6 +138,23 @@ impl Wallet { ) -> Result, PayForQuotesError> { pay_for_quotes(self.wallet.clone(), &self.network, data_payments).await } + + /// Build a provider using this wallet. + pub fn to_provider(&self) -> ProviderWithWallet { + http_provider_with_wallet(self.network.rpc_url().clone(), self.wallet.clone()) + } + + /// Lock the wallet to prevent concurrent use. + /// Drop the guard to unlock the wallet. + pub async fn lock(&self) -> tokio::sync::MutexGuard<()> { + self.lock.lock().await + } + + /// Returns a random private key string. + pub fn random_private_key() -> String { + let signer: PrivateKeySigner = LocalSigner::random(); + signer.to_bytes().encode_hex_with_prefix() + } } /// Generate an EthereumWallet with a random private key. @@ -144,28 +174,7 @@ fn from_private_key(private_key: &str) -> Result { // TODO(optimization): Find a way to reuse/persist contracts and/or a provider without the wallet nonce going out of sync -#[allow(clippy::type_complexity)] -fn http_provider( - rpc_url: reqwest::Url, -) -> FillProvider< - JoinFill< - Identity, - JoinFill>>, - >, - ReqwestProvider, - Http, - Ethereum, -> { - ProviderBuilder::new() - .with_recommended_fillers() - .on_http(rpc_url) -} - -#[allow(clippy::type_complexity)] -fn http_provider_with_wallet( - rpc_url: reqwest::Url, - wallet: EthereumWallet, -) -> FillProvider< +pub type ProviderWithWallet = FillProvider< JoinFill< JoinFill< Identity, @@ -176,7 +185,9 @@ fn http_provider_with_wallet( ReqwestProvider, Http, Ethereum, -> { +>; + +fn http_provider_with_wallet(rpc_url: reqwest::Url, wallet: EthereumWallet) -> ProviderWithWallet { ProviderBuilder::new() .with_recommended_fillers() .wallet(wallet) diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index bbab570e94..5da84e4066 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.8" +version = "0.2.9" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_networking = { path = "../sn_networking", version = "0.19.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_networking = { path = "../sn_networking", version = "0.19.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 51f21050fe..b9ee73af76 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.4.1" +version = "0.4.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } -sn-node-manager = { version = "0.11.0", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.4", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } +sn-node-manager = { version = "0.11.1", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.5", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.4.0", path = "../sn_service_management" } +sn_service_management = { version = "0.4.1", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/node-launchpad/src/components/popup/rewards_address.rs b/node-launchpad/src/components/popup/rewards_address.rs index 6f8eda7db0..8ec3741034 100644 --- a/node-launchpad/src/components/popup/rewards_address.rs +++ b/node-launchpad/src/components/popup/rewards_address.rs @@ -329,7 +329,7 @@ impl Component for RewardsAddress { .split(layer_one[1]); let text = Paragraph::new(vec![ - Line::from(Span::styled("Add your wallet and you can earn a slice of millions of tokens created at the genesis of the Autonomi Network when through running nodes.",Style::default())), + Line::from(Span::styled("Add your wallet to store your node earnings, and we'll pay you rewards to the same wallet after the Network's Token Generation Event.",Style::default())), Line::from(Span::styled("\n\n",Style::default())), Line::from(Span::styled("By continuing you agree to the Terms and Conditions found here:",Style::default())), Line::from(Span::styled("\n\n",Style::default())), diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3feb403485..3c82045f7b 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -46,6 +46,7 @@ use std::{ time::{Duration, Instant}, vec, }; +use strum::Display; use tokio::sync::mpsc::UnboundedSender; use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes}; @@ -105,7 +106,7 @@ pub struct Status<'a> { error_popup: Option, } -#[derive(Clone)] +#[derive(Clone, Display, Debug)] pub enum LockRegistryState { StartingNodes, StoppingNodes, @@ -179,8 +180,7 @@ impl Status<'_> { // Update status based on current node status item.status = match node_item.status { ServiceStatus::Running => { - // Call calc_next on the spinner state - item.spinner_state.calc_next(); + NodeItem::update_spinner_state(&mut item.spinner_state); NodeStatus::Running } ServiceStatus::Stopped => NodeStatus::Stopped, @@ -190,7 +190,7 @@ impl Status<'_> { // Starting is not part of ServiceStatus so we do it manually if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - item.spinner_state.calc_next(); + NodeItem::update_spinner_state(&mut item.spinner_state); item.status = NodeStatus::Starting; } @@ -207,7 +207,7 @@ impl Status<'_> { .iter() .find(|s| s.service_name == node_item.service_name) { - item.attos = stats.forwarded_rewards; + item.attos = stats.rewards_wallet_balance; item.memory = stats.memory_usage_mb; item.mbps = format!( "โ†“{:06.2} โ†‘{:06.2}", @@ -273,6 +273,14 @@ impl Status<'_> { Ok(()) } + fn clear_node_items(&mut self) { + debug!("Cleaning items on Status page"); + if let Some(items) = self.items.as_mut() { + items.items.clear(); + debug!("Cleared the items on status page"); + } + } + /// Tries to trigger the update of node stats if the last update was more than `NODE_STAT_UPDATE_INTERVAL` ago. /// The result is sent via the StatusActions::NodesStatsObtained action. fn try_update_node_stats(&mut self, force_update: bool) -> Result<()> { @@ -426,6 +434,7 @@ impl Component for Status<'_> { StatusActions::ResetNodesCompleted { trigger_start_node } => { self.lock_registry = None; self.load_node_registry_and_update_states()?; + self.clear_node_items(); if trigger_start_node { debug!("Reset nodes completed. Triggering start nodes."); @@ -507,6 +516,13 @@ impl Component for Status<'_> { StatusActions::StartNodes => { debug!("Got action to start nodes"); + if self.rewards_address.is_empty() { + info!("Rewards address is not set. Ask for input."); + return Ok(Some(Action::StatusActions( + StatusActions::TriggerRewardsAddress, + ))); + } + if self.nodes_to_start == 0 { info!("Nodes to start not set. Ask for input."); return Ok(Some(Action::StatusActions( @@ -515,7 +531,10 @@ impl Component for Status<'_> { } if self.lock_registry.is_some() { - error!("Registry is locked. Cannot start node now."); + error!( + "Registry is locked ({:?}) Cannot Start nodes now.", + self.lock_registry + ); return Ok(None); } @@ -549,7 +568,10 @@ impl Component for Status<'_> { StatusActions::StopNodes => { debug!("Got action to stop nodes"); if self.lock_registry.is_some() { - error!("Registry is locked. Cannot stop node now."); + error!( + "Registry is locked ({:?}) Cannot Stop nodes now.", + self.lock_registry + ); return Ok(None); } @@ -572,7 +594,10 @@ impl Component for Status<'_> { Action::OptionsActions(OptionsActions::ResetNodes) => { debug!("Got action to reset nodes"); if self.lock_registry.is_some() { - error!("Registry is locked. Cannot reset nodes now."); + error!( + "Registry is locked ({:?}) Cannot Reset nodes now.", + self.lock_registry + ); return Ok(None); } @@ -685,9 +710,12 @@ impl Component for Status<'_> { let total_attos_earned_and_wallet_row = Row::new(vec![ Cell::new("Attos Earned".to_string()).fg(VIVID_SKY_BLUE), - Cell::new(self.node_stats.total_forwarded_rewards.to_string()) - .fg(VIVID_SKY_BLUE) - .bold(), + Cell::new(format!( + "{:?}", + self.node_stats.total_rewards_wallet_balance + )) + .fg(VIVID_SKY_BLUE) + .bold(), Cell::new(Line::from(wallet_not_set).alignment(Alignment::Right)), ]); @@ -720,7 +748,7 @@ impl Component for Status<'_> { // No nodes. Empty Table. if let Some(ref items) = self.items { - if items.items.is_empty() { + if items.items.is_empty() || self.rewards_address.is_empty() { let line1 = Line::from(vec![ Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), @@ -833,7 +861,7 @@ impl Component for Status<'_> { let footer = Footer::default(); let footer_state = if let Some(ref items) = self.items { - if !items.items.is_empty() { + if !items.items.is_empty() || self.rewards_address.is_empty() { if !self.get_running_nodes().is_empty() { &mut NodesToStart::Running } else { @@ -1017,7 +1045,7 @@ impl fmt::Display for NodeStatus { pub struct NodeItem<'a> { name: String, version: String, - attos: u64, + attos: usize, memory: usize, mbps: String, records: usize, @@ -1029,6 +1057,16 @@ pub struct NodeItem<'a> { } impl NodeItem<'_> { + fn update_spinner_state(state: &mut ThrobberState) { + // Call calc_next on the spinner state + // https://github.com/arkbig/throbber-widgets-tui/issues/19 + if state.index() == i8::MAX { + *state = ThrobberState::default(); + } else { + state.calc_next(); + } + } + fn render_as_row(&mut self, index: usize, area: Rect, f: &mut Frame<'_>) -> Row { let mut row_style = Style::default().fg(GHOST_WHITE); let mut spinner_state = self.spinner_state.clone(); @@ -1100,7 +1138,7 @@ impl NodeItem<'_> { ), self.status.to_string(), ]; - let throbber_area = Rect::new(area.width - 2, area.y + 2 + index as u16, 1, 1); + let throbber_area = Rect::new(area.width - 3, area.y + 2 + index as u16, 1, 1); f.render_stateful_widget(self.spinner.clone(), throbber_area, &mut spinner_state); diff --git a/node-launchpad/src/node_stats.rs b/node-launchpad/src/node_stats.rs index a68d0d1404..339ab24b36 100644 --- a/node-launchpad/src/node_stats.rs +++ b/node-launchpad/src/node_stats.rs @@ -20,7 +20,8 @@ use crate::action::{Action, StatusActions}; #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct IndividualNodeStats { pub service_name: String, - pub forwarded_rewards: u64, + pub forwarded_rewards: usize, + pub rewards_wallet_balance: usize, pub memory_usage_mb: usize, pub bandwidth_inbound: usize, pub bandwidth_outbound: usize, @@ -33,7 +34,8 @@ pub struct IndividualNodeStats { #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct NodeStats { - pub total_forwarded_rewards: u64, + pub total_forwarded_rewards: usize, + pub total_rewards_wallet_balance: usize, pub total_memory_usage_mb: usize, pub individual_stats: Vec, } @@ -41,6 +43,7 @@ pub struct NodeStats { impl NodeStats { fn merge(&mut self, other: &IndividualNodeStats) { self.total_forwarded_rewards += other.forwarded_rewards; + self.total_rewards_wallet_balance += other.rewards_wallet_balance; self.total_memory_usage_mb += other.memory_usage_mb; self.individual_stats.push(other.clone()); // Store individual stats } @@ -135,6 +138,7 @@ impl NodeStats { let individual_stats = IndividualNodeStats { service_name: service_name.clone(), forwarded_rewards: stats.forwarded_rewards, + rewards_wallet_balance: stats.rewards_wallet_balance, memory_usage_mb: stats.memory_usage_mb, bandwidth_inbound: stats.bandwidth_inbound, bandwidth_outbound: stats.bandwidth_outbound, @@ -181,7 +185,17 @@ impl NodeStats { prometheus_parse::Value::Counter(val) | prometheus_parse::Value::Gauge(val) | prometheus_parse::Value::Untyped(val) => { - stats.forwarded_rewards = val as u64; + stats.forwarded_rewards = val as usize; + } + _ => {} + } + } else if sample.metric == "sn_node_current_reward_wallet_balance" { + // Attos + match sample.value { + prometheus_parse::Value::Counter(val) + | prometheus_parse::Value::Gauge(val) + | prometheus_parse::Value::Untyped(val) => { + stats.rewards_wallet_balance = val as usize; } _ => {} } diff --git a/release-cycle-info b/release-cycle-info index bdcd486143..9b8978040f 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -14,5 +14,5 @@ # number for all the released binaries. release-year: 2024 release-month: 10 -release-cycle: 3 -release-cycle-counter: 3 +release-cycle: 4 +release-cycle-counter: 2 diff --git a/resources/scripts/release-candidate-description.py b/resources/scripts/release-candidate-description.py index bc9aa8547d..c288bc13fb 100755 --- a/resources/scripts/release-candidate-description.py +++ b/resources/scripts/release-candidate-description.py @@ -70,13 +70,11 @@ def get_pr_list(pr_numbers): def main(pr_numbers): crate_binary_map = { - "sn_faucet": "faucet", "nat-detection": "nat-detection", "node-launchpad": "node-launchpad", - "sn_cli": "safe", + "autonomi-cli": "autonomi", "sn_node": "safenode", "sn_node_manager": "safenode-manager", - "sn_auditor": "sn_auditor", } markdown_doc = [] @@ -103,4 +101,4 @@ def main(pr_numbers): sys.exit(1) file_path = sys.argv[1] - main(read_pr_numbers(file_path)) \ No newline at end of file + main(read_pr_numbers(file_path)) diff --git a/resources/scripts/remove-s3-binary-archives.sh b/resources/scripts/remove-s3-binary-archives.sh index 71cdd0159b..14aa794a9b 100755 --- a/resources/scripts/remove-s3-binary-archives.sh +++ b/resources/scripts/remove-s3-binary-archives.sh @@ -15,26 +15,22 @@ architectures=( "x86_64-unknown-linux-musl" ) declare -A binary_crate_dir_mappings=( - ["faucet"]="sn_faucet" ["nat-detection"]="nat-detection" ["node-launchpad"]="node-launchpad" - ["safe"]="sn_cli" + ["autonomi"]="autonomi-cli" ["safenode"]="sn_node" ["safenode-manager"]="sn_node_manager" ["safenode_rpc_client"]="sn_node_rpc_client" ["safenodemand"]="sn_node_manager" - ["sn_auditor"]="sn_auditor" ) declare -A binary_s3_bucket_mappings=( - ["faucet"]="sn-faucet" ["nat-detection"]="nat-detection" ["node-launchpad"]="node-launchpad" - ["safe"]="sn-cli" + ["autonomi"]="autonomi-cli" ["safenode"]="sn-node" ["safenode-manager"]="sn-node-manager" ["safenode_rpc_client"]="sn-node-rpc-client" ["safenodemand"]="sn-node-manager" - ["sn_auditor"]="sn-auditor" ) for arch in "${architectures[@]}"; do diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 8c5c4b7dfa..d20a5f947b 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.16" +version = "0.1.17" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/sn_build_info/src/release_info.rs b/sn_build_info/src/release_info.rs index 3506c9cc96..e9c752684e 100644 --- a/sn_build_info/src/release_info.rs +++ b/sn_build_info/src/release_info.rs @@ -1,5 +1,4 @@ pub const RELEASE_YEAR: &str = "2024"; pub const RELEASE_MONTH: &str = "10"; -pub const RELEASE_CYCLE: &str = "3"; -pub const RELEASE_CYCLE_COUNTER: &str = "3"; - +pub const RELEASE_CYCLE: &str = "4"; +pub const RELEASE_CYCLE_COUNTER: &str = "2"; diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 73326d9f36..37c9d84cb8 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,15 +8,16 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1" +version = "0.1.2" [features] test-utils = [] local = ["evmlib/local"] +external-signer = ["evmlib/external-signer"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.1" } +evmlib = { path = "../evmlib", version = "0.1.2" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_evm/src/error.rs b/sn_evm/src/error.rs index 386683b5aa..afbd02a004 100644 --- a/sn_evm/src/error.rs +++ b/sn_evm/src/error.rs @@ -27,7 +27,6 @@ pub enum EvmError { NumericOverflow, #[error("Not enough balance, {0} available, {1} required")] NotEnoughBalance(AttoTokens, AttoTokens), - #[error("Invalid quote public key")] InvalidQuotePublicKey, } diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index a62fa5c0fd..49956db39e 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -10,8 +10,12 @@ extern crate tracing; pub use evmlib::common::Address as RewardsAddress; +pub use evmlib::common::Address as EvmAddress; pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; +pub use evmlib::cryptography; +#[cfg(feature = "external-signer")] +pub use evmlib::external_signer; pub use evmlib::utils; pub use evmlib::utils::get_evm_network_from_env; pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 090e3f8a12..8b6d7d8802 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.37" +version = "0.2.38" [dependencies] chrono = "~0.4.19" diff --git a/sn_logging/src/layers.rs b/sn_logging/src/layers.rs index 91f771e6b9..4bc1f46996 100644 --- a/sn_logging/src/layers.rs +++ b/sn_logging/src/layers.rs @@ -266,12 +266,10 @@ fn get_logging_targets(logging_env_value: &str) -> Result> if contains_keyword_all_sn_logs || contains_keyword_verbose_sn_logs { let mut t = BTreeMap::from_iter(vec![ // bins - ("autonomi_cli".to_string(), Level::TRACE), + ("autonomi-cli".to_string(), Level::TRACE), ("evm_testnet".to_string(), Level::TRACE), - ("faucet".to_string(), Level::TRACE), ("safenode".to_string(), Level::TRACE), ("safenode_rpc_client".to_string(), Level::TRACE), - ("safe".to_string(), Level::TRACE), ("safenode_manager".to_string(), Level::TRACE), ("safenodemand".to_string(), Level::TRACE), // libs @@ -279,8 +277,6 @@ fn get_logging_targets(logging_env_value: &str) -> Result> ("evmlib".to_string(), Level::TRACE), ("sn_evm".to_string(), Level::TRACE), ("sn_build_info".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_faucet".to_string(), Level::TRACE), ("sn_logging".to_string(), Level::TRACE), ("sn_node_manager".to_string(), Level::TRACE), ("sn_node_rpc_client".to_string(), Level::TRACE), diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index 4a550a58a8..103d1d628e 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.17" +version = "0.1.18" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 4f2270ff37..1a6bdc5b67 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.0" +version = "0.19.1" [features] default = [] @@ -54,11 +54,11 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index f8b7cf1e59..ec6c019a88 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -7,45 +7,19 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{driver::PendingGetClosestType, SwarmDriver}; -use rand::{rngs::OsRng, Rng}; use tokio::time::Duration; -use crate::target_arch::{interval, Instant, Interval}; +use crate::target_arch::Instant; /// The default interval at which NetworkDiscovery is triggered. The interval is increased as more peers are added to the /// routing table. -pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(10); - -/// Every BOOTSTRAP_CONNECTED_PEERS_STEP connected peer, we step up the BOOTSTRAP_INTERVAL to slow down bootstrapping -/// process -const BOOTSTRAP_CONNECTED_PEERS_STEP: u32 = 5; - -/// If the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT, then we should slowdown the bootstrapping -/// process. This is to make sure we don't flood the network with `FindNode` msgs. -const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); - -/// A minimum interval to prevent bootstrap got triggered too often -const LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(30); - -/// The bootstrap interval to use if we haven't added any new peers in a while. -const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; +pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(15); impl SwarmDriver { /// This functions triggers network discovery based on when the last peer was added to the RT and the number of - /// peers in RT. The function also returns a new bootstrap interval that is proportional to the number of - /// peers in RT, so more peers in RT, the longer the interval. - pub(crate) async fn run_bootstrap_continuously( - &mut self, - current_bootstrap_interval: Duration, - ) -> Option { - let (should_bootstrap, new_interval) = self - .bootstrap - .should_we_bootstrap(self.peers_in_rt as u32, current_bootstrap_interval) - .await; - if should_bootstrap { - self.trigger_network_discovery(); - } - new_interval + /// peers in RT. + pub(crate) fn run_bootstrap_continuously(&mut self) { + self.trigger_network_discovery(); } pub(crate) fn trigger_network_discovery(&mut self) { @@ -61,27 +35,27 @@ impl SwarmDriver { .get_closest_peers(addr.as_bytes()); let _ = self.pending_get_closest_peers.insert( query_id, - (PendingGetClosestType::NetworkDiscovery, Default::default()), + ( + addr, + PendingGetClosestType::NetworkDiscovery, + Default::default(), + ), ); } self.bootstrap.initiated(); - debug!("Trigger network discovery took {:?}", now.elapsed()); + info!("Trigger network discovery took {:?}", now.elapsed()); } } /// Tracks and helps with the continuous kad::bootstrapping process pub(crate) struct ContinuousBootstrap { - initial_bootstrap_done: bool, - last_peer_added_instant: Instant, last_bootstrap_triggered: Option, } impl ContinuousBootstrap { pub(crate) fn new() -> Self { Self { - initial_bootstrap_done: false, - last_peer_added_instant: Instant::now(), last_bootstrap_triggered: None, } } @@ -90,76 +64,4 @@ impl ContinuousBootstrap { pub(crate) fn initiated(&mut self) { self.last_bootstrap_triggered = Some(Instant::now()); } - - /// Notify about a newly added peer to the RT. This will help with slowing down the bootstrap process. - /// Returns `true` if we have to perform the initial bootstrapping. - pub(crate) fn notify_new_peer(&mut self) -> bool { - self.last_peer_added_instant = Instant::now(); - // true to kick off the initial bootstrapping. `run_bootstrap_continuously` might kick of so soon that we might - // not have a single peer in the RT and we'd not perform any bootstrapping for a while. - if !self.initial_bootstrap_done { - self.initial_bootstrap_done = true; - true - } else { - false - } - } - - /// Returns `true` if we should carry out the Kademlia Bootstrap process immediately. - /// Also optionally returns the new interval to re-bootstrap. - pub(crate) async fn should_we_bootstrap( - &self, - peers_in_rt: u32, - current_interval: Duration, - ) -> (bool, Option) { - let is_ongoing = if let Some(last_bootstrap_triggered) = self.last_bootstrap_triggered { - last_bootstrap_triggered.elapsed() < LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT - } else { - false - }; - let should_bootstrap = !is_ongoing && peers_in_rt >= 1; - - // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer to our RT, then, slowdown - // the bootstrapping process. - // Don't slow down if we haven't even added one peer to our RT. - if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { - // To avoid a heart beat like cpu usage due to the 1K candidates generation, - // randomize the interval within certain range - let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( - NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, - ); - let no_peer_added_slowdown_interval_duration = - Duration::from_secs(no_peer_added_slowdown_interval); - info!( - "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" - ); - - // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. - #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] - let mut new_interval = interval(no_peer_added_slowdown_interval_duration); - #[cfg(not(target_arch = "wasm32"))] - new_interval.tick().await; - - return (should_bootstrap, Some(new_interval)); - } - - // increment bootstrap_interval in steps of BOOTSTRAP_INTERVAL every BOOTSTRAP_CONNECTED_PEERS_STEP - let step = peers_in_rt / BOOTSTRAP_CONNECTED_PEERS_STEP; - let step = std::cmp::max(1, step); - let new_interval = BOOTSTRAP_INTERVAL * step; - let new_interval = if new_interval > current_interval { - info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); - - // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. - #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] - let mut interval = interval(new_interval); - #[cfg(not(target_arch = "wasm32"))] - interval.tick().await; - - Some(interval) - } else { - None - }; - (should_bootstrap, new_interval) - } } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index b0eda19190..48cb8f1307 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -7,33 +7,34 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ + close_group_majority, driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, log_markers::Marker, - multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, - REPLICATION_PEERS_COUNT, + multiaddr_pop_p2p, sort_peers_by_address_and_limit, GetRecordCfg, GetRecordError, MsgResponder, + NetworkEvent, CLOSE_GROUP_SIZE, }; use libp2p::{ kad::{ store::{Error as StoreError, RecordStore}, - Quorum, Record, RecordKey, + KBucketDistance, Quorum, Record, RecordKey, }, Multiaddr, PeerId, }; use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, - storage::{RecordHeader, RecordKind, RecordType}, + storage::{get_type_from_record, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use std::{ + cmp::Ordering, collections::{BTreeMap, HashMap}, fmt::Debug, time::Duration, }; use tokio::sync::oneshot; -use xor_name::XorName; use crate::target_arch::Instant; @@ -42,6 +43,9 @@ const MAX_CONTINUOUS_HDD_WRITE_ERROR: usize = 5; // Shall be synced with `sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S` const REPLICATION_TIMEOUT: Duration = Duration::from_secs(45); +// Throttles replication to at most once every 30 seconds +const MIN_REPLICATION_INTERVAL_S: Duration = Duration::from_secs(30); + #[derive(Debug, Eq, PartialEq)] pub enum NodeIssue { /// Data Replication failed @@ -56,6 +60,15 @@ pub enum NodeIssue { /// Commands to send to the Swarm pub enum LocalSwarmCmd { + // Returns all the peers from all the k-buckets from the local Routing Table. + // This includes our PeerId as well. + GetAllLocalPeersExcludingSelf { + sender: oneshot::Sender>, + }, + /// Return the current GetRange as determined by the SwarmDriver + GetCurrentRequestRange { + sender: oneshot::Sender, + }, /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -67,8 +80,8 @@ pub enum LocalSwarmCmd { sender: oneshot::Sender>, }, // Get closest peers from the local RoutingTable - GetCloseGroupLocalPeers { - key: NetworkAddress, + GetCloseRangeLocalPeers { + address: NetworkAddress, sender: oneshot::Sender>, }, GetSwarmLocalState(oneshot::Sender), @@ -132,7 +145,7 @@ pub enum LocalSwarmCmd { /// NOTE: This does result in outgoing messages, but is produced locally TriggerIntervalReplication, /// Triggers unrelevant record cleanup - TriggerUnrelevantRecordCleanup, + TriggerIrrelevantRecordCleanup, } /// Commands to send to the Swarm @@ -213,15 +226,11 @@ impl Debug for LocalSwarmCmd { PrettyPrintRecordKey::from(key) ) } - LocalSwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "LocalSwarmCmd::GetClosestKLocalPeers") } - LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { - write!( - f, - "LocalSwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}" - ) + LocalSwarmCmd::GetCloseRangeLocalPeers { address: key, .. } => { + write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") } LocalSwarmCmd::GetLocalStoreCost { .. } => { write!(f, "LocalSwarmCmd::GetLocalStoreCost") @@ -242,6 +251,12 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::GetKBuckets { .. } => { write!(f, "LocalSwarmCmd::GetKBuckets") } + LocalSwarmCmd::GetCurrentRequestRange { .. } => { + write!(f, "SwarmCmd::GetCurrentRange") + } + LocalSwarmCmd::GetAllLocalPeersExcludingSelf { .. } => { + write!(f, "SwarmCmd::GetAllLocalPeers") + } LocalSwarmCmd::GetSwarmLocalState { .. } => { write!(f, "LocalSwarmCmd::GetSwarmLocalState") } @@ -281,7 +296,7 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::TriggerIntervalReplication => { write!(f, "LocalSwarmCmd::TriggerIntervalReplication") } - LocalSwarmCmd::TriggerUnrelevantRecordCleanup => { + LocalSwarmCmd::TriggerIrrelevantRecordCleanup => { write!(f, "LocalSwarmCmd::TriggerUnrelevantRecordCleanup") } } @@ -472,6 +487,7 @@ impl SwarmDriver { let _ = self.pending_get_closest_peers.insert( query_id, ( + key, PendingGetClosestType::FunctionCall(sender), Default::default(), ), @@ -541,6 +557,7 @@ impl SwarmDriver { Ok(()) } + pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); let mut cmd_string; @@ -624,28 +641,7 @@ impl SwarmDriver { let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); - let record_type = match RecordHeader::from_record(&record) { - Ok(record_header) => { - match record_header.kind { - RecordKind::Chunk => RecordType::Chunk, - RecordKind::Scratchpad => RecordType::Scratchpad, - RecordKind::Spend | RecordKind::Register => { - let content_hash = XorName::from_content(&record.value); - RecordType::NonChunk(content_hash) - } - RecordKind::ChunkWithPayment - | RecordKind::RegisterWithPayment - | RecordKind::ScratchpadWithPayment => { - error!("Record {record_key:?} with payment shall not be stored locally."); - return Err(NetworkError::InCorrectRecordHeader); - } - } - } - Err(err) => { - error!("For record {record_key:?}, failed to parse record_header {err:?}"); - return Err(NetworkError::InCorrectRecordHeader); - } - }; + let record_type = get_type_from_record(&record)?; let result = self .swarm @@ -694,16 +690,8 @@ impl SwarmDriver { // The record_store will prune far records and setup a `distance range`, // once reached the `max_records` cap. - if let Some(distance) = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .get_farthest_replication_distance_bucket() - { - self.replication_fetcher - .set_replication_distance_range(distance); - } + self.replication_fetcher + .set_replication_distance_range(self.get_request_range()); if let Err(err) = result { error!("Can't store verified record {record_key:?} locally: {err:?}"); @@ -760,6 +748,10 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } + LocalSwarmCmd::GetCurrentRequestRange { sender } => { + cmd_string = "GetCurrentRequestRange"; + let _ = sender.send(self.get_request_range()); + } LocalSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); @@ -778,9 +770,13 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - LocalSwarmCmd::GetCloseGroupLocalPeers { key, sender } => { - cmd_string = "GetCloseGroupLocalPeers"; - let key = key.as_kbucket_key(); + LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender } => { + cmd_string = "GetAllLocalPeersExcludingSelf"; + let _ = sender.send(self.get_all_local_peers_excluding_self()); + } + LocalSwarmCmd::GetCloseRangeLocalPeers { address, sender } => { + cmd_string = "GetCloseRangeLocalPeers"; + let key = address.as_kbucket_key(); // calls `kbuckets.closest_keys(key)` internally, which orders the peers by // increasing distance // Note it will return all peers, heance a chop down is required. @@ -790,7 +786,6 @@ impl SwarmDriver { .kademlia .get_closest_local_peers(&key) .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) .collect(); let _ = sender.send(closest_peers); @@ -857,13 +852,13 @@ impl SwarmDriver { self.send_event(NetworkEvent::KeysToFetchForReplication(new_keys_to_fetch)); } } - LocalSwarmCmd::TriggerUnrelevantRecordCleanup => { - cmd_string = "TriggerUnrelevantRecordCleanup"; + LocalSwarmCmd::TriggerIrrelevantRecordCleanup => { + cmd_string = "TriggerIrrelevantRecordCleanup"; self.swarm .behaviour_mut() .kademlia .store_mut() - .cleanup_unrelevant_records(); + .cleanup_irrelevant_records(); } } @@ -981,23 +976,86 @@ impl SwarmDriver { let _ = self.quotes_history.insert(peer_id, quote); } - fn try_interval_replication(&mut self) -> Result<()> { - // get closest peers from buckets, sorted by increasing distance to us - let our_peer_id = self.self_peer_id.into(); - let closest_k_peers = self + /// From all local peers, returns any within (and just exceeding) current get_range for a given key + pub(crate) fn get_filtered_peers_exceeding_range( + &mut self, + target_address: &NetworkAddress, + ) -> Vec { + let acceptable_distance_range = self.get_request_range(); + let target_key = target_address.as_kbucket_key(); + + let sorted_peers: Vec<_> = self .swarm .behaviour_mut() .kademlia - .get_closest_local_peers(&our_peer_id) - // Map KBucketKey to PeerId. - .map(|key| key.into_preimage()); - - // Only grab the closest nodes within the REPLICATE_RANGE - let mut replicate_targets = closest_k_peers - .into_iter() - // add some leeway to allow for divergent knowledge - .take(REPLICATION_PEERS_COUNT) - .collect::>(); + .get_closest_local_peers(&target_key) + .collect(); + + // Binary search to find the index where we exceed the acceptable range + let split_index = sorted_peers + .binary_search_by(|key| { + let distance = target_key.distance(key); + if distance >= acceptable_distance_range { + Ordering::Greater + } else { + Ordering::Less + } + }) + .unwrap_or_else(|x| x); + + // Convert KBucketKey to PeerId for all peers within range + sorted_peers[..split_index] + .iter() + .map(|key| key.into_preimage()) + .collect() + } + + /// From all local peers, returns any within current get_range for a given key + /// Excludes self + pub(crate) fn get_filtered_peers_exceeding_range_or_closest_nodes( + &mut self, + target_address: &NetworkAddress, + ) -> Vec { + let filtered_peers = self.get_filtered_peers_exceeding_range(target_address); + let closest_node_buffer_zone = CLOSE_GROUP_SIZE + close_group_majority(); + if filtered_peers.len() >= closest_node_buffer_zone { + filtered_peers + } else { + warn!("Insufficient peers within replication range of {target_address:?}. Falling back to use {closest_node_buffer_zone:?} closest nodes"); + let all_peers = self.get_all_local_peers_excluding_self(); + match sort_peers_by_address_and_limit( + &all_peers, + target_address, + closest_node_buffer_zone, + ) { + Ok(peers) => peers.iter().map(|p| **p).collect(), + Err(err) => { + error!("sorting peers close to {target_address:?} failed, sort error: {err:?}"); + warn!( + "Using all peers within range even though it's less than CLOSE_GROUP_SIZE." + ); + filtered_peers + } + } + } + } + + fn try_interval_replication(&mut self) -> Result<()> { + // Add a last_replication field to track the last time replication was performed + if let Some(last_replication) = self.last_replication { + if last_replication.elapsed() < MIN_REPLICATION_INTERVAL_S { + info!("Skipping replication as minimum interval hasn't elapsed"); + return Ok(()); + } + } + + // Store the current time as the last replication time + self.last_replication = Some(Instant::now()); + + let our_address = NetworkAddress::from_peer(self.self_peer_id); + + let mut replicate_targets = + self.get_filtered_peers_exceeding_range_or_closest_nodes(&our_address); let now = Instant::now(); self.replication_targets diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index ec716cb4df..e70cc6c68d 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -20,6 +20,7 @@ use crate::{ record_store_api::UnifiedRecordStore, relay_manager::RelayManager, replication_fetcher::ReplicationFetcher, + sort_peers_by_distance_to, target_arch::{interval, spawn, Instant}, GetRecordError, Network, CLOSE_GROUP_SIZE, }; @@ -32,7 +33,6 @@ use futures::future::Either; use futures::StreamExt; #[cfg(feature = "local")] use libp2p::mdns; -use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; use libp2p::{ identity::Keypair, @@ -45,6 +45,7 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use libp2p::{kad::KBucketDistance, Transport as _}; #[cfg(feature = "open-metrics")] use prometheus_client::metrics::info::Info; use sn_evm::PaymentQuote; @@ -52,15 +53,17 @@ use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, version::{ - IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR, - REQ_RESPONSE_VERSION_STR, + get_key_version_str, IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, + IDENTIFY_PROTOCOL_STR, REQ_RESPONSE_VERSION_STR, }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; use sn_registers::SignedRegister; use std::{ - collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{btree_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, fmt::Debug, + fs, + io::{Read, Write}, net::SocketAddr, num::NonZeroUsize, path::PathBuf, @@ -77,6 +80,9 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 /// Interval over which we query relay manager to check if we can make any more reservations. pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); +// Number of range distances to keep in the circular buffer +pub const GET_RANGE_STORAGE_LIMIT: usize = 100; + const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); /// The ways in which the Get Closest queries are used. @@ -87,7 +93,9 @@ pub(crate) enum PendingGetClosestType { /// These are queries made by a function at the upper layers and contains a channel to send the result back. FunctionCall(oneshot::Sender>), } -type PendingGetClosest = HashMap)>; + +/// Maps a query to the address, the type of query and the peers that are being queried. +type PendingGetClosest = HashMap)>; /// Using XorName to differentiate different record content under the same key. type GetRecordResultMap = HashMap)>; @@ -113,7 +121,7 @@ pub const MAX_PACKET_SIZE: usize = 1024 * 1024 * 5; // the chunk size is 1mb, so // Timeout for requests sent/received through the request_response behaviour. const REQUEST_TIMEOUT_DEFAULT_S: Duration = Duration::from_secs(30); // Sets the keep-alive timeout of idle connections. -const CONNECTION_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(30); +const CONNECTION_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(10); // Inverval of resending identify to connected peers. const RESEND_IDENTIFY_INVERVAL: Duration = Duration::from_secs(3600); @@ -123,6 +131,9 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); +/// Periodic bootstrap interval +const KAD_PERIODIC_BOOTSTRAP_INTERVAL_S: Duration = Duration::from_secs(180 * 60); + // Init during compilation, instead of runtime error that should never happen // Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { @@ -349,13 +360,13 @@ impl NetworkBuilder { .set_publication_interval(None) // 1mb packet size .set_max_packet_size(MAX_PACKET_SIZE) - // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR) .set_query_timeout(KAD_QUERY_TIMEOUT_S) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) // Records never expire .set_record_ttl(None) + .set_replication_factor(REPLICATION_FACTOR) + .set_periodic_bootstrap_interval(Some(KAD_PERIODIC_BOOTSTRAP_INTERVAL_S)) // Emit PUT events for validation prior to insertion into the RecordStore. // This is no longer needed as the record_storage::put now can carry out validation. // .set_record_filtering(KademliaStoreInserts::FilterBoth) @@ -363,8 +374,19 @@ impl NetworkBuilder { .set_provider_publication_interval(None); let store_cfg = { - // Configures the disk_store to store records under the provided path and increase the max record size let storage_dir_path = root_dir.join("record_store"); + // In case the node instanace is restarted for a different version of network, + // the previous storage folder shall be wiped out, + // to avoid bring old data into new network. + check_and_wipe_storage_dir_if_necessary( + root_dir.clone(), + storage_dir_path.clone(), + get_key_version_str(), + )?; + + // Configures the disk_store to store records under the provided path and increase the max record size + // The storage dir is appendixed with key_version str to avoid bringing records from old network into new + if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, @@ -428,10 +450,9 @@ impl NetworkBuilder { let _ = kad_cfg .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) .set_max_packet_size(MAX_PACKET_SIZE) + .set_replication_factor(REPLICATION_FACTOR) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. - .disjoint_query_paths(true) - // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR); + .disjoint_query_paths(true); let (network, net_event_recv, driver) = self.build( kad_cfg, @@ -697,6 +718,10 @@ impl NetworkBuilder { bad_nodes: Default::default(), quotes_history: Default::default(), replication_targets: Default::default(), + range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), + first_contact_made: false, + last_replication: None, + last_connection_pruning_time: Instant::now(), }; let network = Network::new( @@ -710,6 +735,45 @@ impl NetworkBuilder { } } +fn check_and_wipe_storage_dir_if_necessary( + root_dir: PathBuf, + storage_dir_path: PathBuf, + cur_version_str: String, +) -> Result<()> { + let mut prev_version_str = String::new(); + let version_file = root_dir.join("network_key_version"); + { + match fs::File::open(version_file.clone()) { + Ok(mut file) => { + file.read_to_string(&mut prev_version_str)?; + } + Err(err) => { + warn!("Failed in accessing version file {version_file:?}: {err:?}"); + // Assuming file was not created yet + info!("Creating a new version file at {version_file:?}"); + fs::File::create(version_file.clone())?; + } + } + } + + // In case of version mismatch: + // * the storage_dir shall be wiped out + // * the version file shall be updated + if cur_version_str != prev_version_str { + warn!("Trying to wipe out storege dir {storage_dir_path:?}, as cur_version {cur_version_str:?} doesn't match prev_version {prev_version_str:?}"); + let _ = fs::remove_dir_all(storage_dir_path); + + let mut file = fs::OpenOptions::new() + .write(true) + .truncate(true) + .open(version_file.clone())?; + info!("Writing cur_version {cur_version_str:?} into version file at {version_file:?}"); + file.write_all(cur_version_str.as_bytes())?; + } + + Ok(()) +} + pub struct SwarmDriver { pub(crate) swarm: Swarm, pub(crate) self_peer_id: PeerId, @@ -732,7 +796,7 @@ pub struct SwarmDriver { pub(crate) local_cmd_sender: mpsc::Sender, local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, - event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. + pub(crate) event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. /// Trackers for underlying behaviour related events pub(crate) pending_get_closest_peers: PendingGetClosest, @@ -755,6 +819,18 @@ pub struct SwarmDriver { pub(crate) bad_nodes: BadNodes, pub(crate) quotes_history: BTreeMap, pub(crate) replication_targets: BTreeMap, + + /// when was the last replication event + /// This allows us to throttle replication no matter how it is triggered + pub(crate) last_replication: Option, + // The recent range_distances calculated by the node + // Each update is generated when there is a routing table change + // We use the largest of these X_STORAGE_LIMIT values as our X distance. + pub(crate) range_distances: VecDeque, + // have we found out initial peer + pub(crate) first_contact_made: bool, + /// when was the last outdated connection prunning undertaken. + pub(crate) last_connection_pruning_time: Instant, } impl SwarmDriver { @@ -805,28 +881,24 @@ impl SwarmDriver { // logging for handling events happens inside handle_swarm_events // otherwise we're rewriting match statements etc around this anwyay if let Err(err) = self.handle_swarm_events(swarm_event) { - warn!("Error while handling swarm event: {err}"); + warn!("Issue while handling swarm event: {err}"); } }, // thereafter we can check our intervals // runs every bootstrap_interval time _ = bootstrap_interval.tick() => { - if let Some(new_interval) = self.run_bootstrap_continuously(bootstrap_interval.period()).await { - bootstrap_interval = new_interval; - } + self.run_bootstrap_continuously(); } _ = set_farthest_record_interval.tick() => { if !self.is_client { - let closest_k_peers = self.get_closest_k_value_local_peers(); - - if let Some(distance) = self.get_responsbile_range_estimate(&closest_k_peers) { - info!("Set responsible range to {distance}"); - // set any new distance to farthest record in the store - self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(distance); - // the distance range within the replication_fetcher shall be in sync as well - self.replication_fetcher.set_replication_distance_range(distance); - } + let get_range = self.get_request_range(); + self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(get_range); + + // the distance range within the replication_fetcher shall be in sync as well + self.replication_fetcher.set_replication_distance_range(get_range); + + } } _ = relay_manager_reservation_interval.tick() => self.relay_manager.try_connecting_to_relay(&mut self.swarm, &self.bad_nodes), @@ -838,32 +910,90 @@ impl SwarmDriver { // ---------- Crate helpers ------------------- // -------------------------------------------- - /// Uses the closest k peers to estimate the farthest address as - /// `K_VALUE / 2`th peer's bucket. - fn get_responsbile_range_estimate( + /// Defines a new X distance range to be used for GETs and data replication + /// + /// Enumerates buckets and generates a random distance in the first bucket + /// that has at least `MIN_PEERS_IN_BUCKET` peers. + /// + pub(crate) fn set_request_range( &mut self, - // Sorted list of closest k peers to our peer id. - closest_k_peers: &[PeerId], - ) -> Option { - // if we don't have enough peers we don't set the distance range yet. - let mut farthest_distance = None; - - if closest_k_peers.is_empty() { - return farthest_distance; + queried_address: NetworkAddress, + network_discovery_peers: &[PeerId], + ) { + info!( + "Adding a GetRange to our stash deriving from {:?} peers", + network_discovery_peers.len() + ); + + let sorted_distances = sort_peers_by_distance_to(network_discovery_peers, queried_address); + + let mapped: Vec<_> = sorted_distances.iter().map(|d| d.ilog2()).collect(); + info!("Sorted distances: {:?}", mapped); + + let farthest_peer_to_check = self + .get_all_local_peers_excluding_self() + .len() + .checked_div(5 * CLOSE_GROUP_SIZE) + .unwrap_or(1); + + info!("Farthest peer we'll check: {:?}", farthest_peer_to_check); + + let yardstick = if sorted_distances.len() >= farthest_peer_to_check { + sorted_distances.get(farthest_peer_to_check.saturating_sub(1)) + } else { + sorted_distances.last() + }; + if let Some(distance) = yardstick { + if self.range_distances.len() >= GET_RANGE_STORAGE_LIMIT { + if let Some(distance) = self.range_distances.pop_front() { + trace!("Removed distance range: {:?}", distance.ilog2()); + } + } + + info!("Adding new distance range: {:?}", distance.ilog2()); + + self.range_distances.push_back(*distance); } - let our_address = NetworkAddress::from_peer(self.self_peer_id); + info!( + "Distance between peers in set_request_range call: {:?}", + yardstick + ); + } + + /// Returns the KBucketDistance we are currently using as our X value + /// for range based search. + pub(crate) fn get_request_range(&self) -> KBucketDistance { + let mut sorted_distances = self.range_distances.iter().collect::>(); + + sorted_distances.sort_unstable(); - // get `K_VALUE / 2`th peer's address distance - // This is a rough estimate of the farthest address we might be responsible for. - // We want this to be higher than actually necessary, so we retain more data - // and can be sure to pass bad node checks - let target_index = std::cmp::min(K_VALUE.get() / 2, closest_k_peers.len()) - 1; + let median_index = sorted_distances.len() / 8; - let address = NetworkAddress::from_peer(closest_k_peers[target_index]); - farthest_distance = our_address.distance(&address).ilog2(); + let default = KBucketDistance::default(); + let median = sorted_distances.get(median_index).cloned(); - farthest_distance + if let Some(dist) = median { + *dist + } else { + default + } + } + + /// get all the peers from our local RoutingTable. Excluding self + pub(crate) fn get_all_local_peers_excluding_self(&mut self) -> Vec { + let our_peer_id = self.self_peer_id; + let mut all_peers: Vec = vec![]; + for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { + for entry in kbucket.iter() { + let id = entry.node.key.into_preimage(); + + if id != our_peer_id { + all_peers.push(id); + } + } + } + all_peers } /// Pushes NetworkSwarmCmd off thread so as to be non-blocking @@ -1010,3 +1140,66 @@ impl SwarmDriver { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::check_and_wipe_storage_dir_if_necessary; + + use std::{fs, io::Read}; + + #[tokio::test] + async fn version_file_update() { + let temp_dir = std::env::temp_dir(); + let unique_dir_name = uuid::Uuid::new_v4().to_string(); + let root_dir = temp_dir.join(unique_dir_name); + fs::create_dir_all(&root_dir).expect("Failed to create root directory"); + + let version_file = root_dir.join("network_key_version"); + let storage_dir = root_dir.join("record_store"); + + let cur_version = uuid::Uuid::new_v4().to_string(); + assert!(check_and_wipe_storage_dir_if_necessary( + root_dir.clone(), + storage_dir.clone(), + cur_version.clone() + ) + .is_ok()); + { + let mut content_str = String::new(); + let mut file = fs::OpenOptions::new() + .read(true) + .open(version_file.clone()) + .expect("Failed to open version file"); + file.read_to_string(&mut content_str) + .expect("Failed to read from version file"); + assert_eq!(content_str, cur_version); + + drop(file); + } + + fs::create_dir_all(&storage_dir).expect("Failed to create storage directory"); + assert!(fs::metadata(storage_dir.clone()).is_ok()); + + let cur_version = uuid::Uuid::new_v4().to_string(); + assert!(check_and_wipe_storage_dir_if_necessary( + root_dir.clone(), + storage_dir.clone(), + cur_version.clone() + ) + .is_ok()); + { + let mut content_str = String::new(); + let mut file = fs::OpenOptions::new() + .read(true) + .open(version_file.clone()) + .expect("Failed to open version file"); + file.read_to_string(&mut content_str) + .expect("Failed to read from version file"); + assert_eq!(content_str, cur_version); + + drop(file); + } + // The storage_dir shall be removed as version_key changed + assert!(fs::metadata(storage_dir.clone()).is_err()); + } +} diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 6534c84017..c767ef8ab1 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -30,48 +30,47 @@ pub(super) type Result = std::result::Result; #[derive(Error, Clone)] pub enum GetRecordError { #[error("Get Record completed with non enough copies")] - NotEnoughCopies { + NotEnoughCopiesInRange { record: Record, expected: usize, got: usize, + range: u32, }, - + #[error("Network query timed out")] + QueryTimeout, + #[error("Record retrieved from the network does not match the provided target record.")] + RecordDoesNotMatch(Record), + #[error("The record kind for the split records did not match")] + RecordKindMismatch, #[error("Record not found in the network")] RecordNotFound, - - // Avoid logging the whole `Record` content by accident + // Avoid logging the whole `Record` content by accident. + /// The split record error will be handled at the network layer. + /// For spends, it accumulates the spends and returns a double spend error if more than one. + /// For registers, it merges the registers and returns the merged record. #[error("Split Record has {} different copies", result_map.len())] SplitRecord { result_map: HashMap)>, }, - - #[error("Network query timed out")] - QueryTimeout, - - #[error("Record retrieved from the network does not match the provided target record.")] - RecordDoesNotMatch(Record), } impl Debug for GetRecordError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::NotEnoughCopies { + Self::NotEnoughCopiesInRange { record, expected, got, + range, } => { let pretty_key = PrettyPrintRecordKey::from(&record.key); - f.debug_struct("NotEnoughCopies") + f.debug_struct("NotEnoughCopiesInRange") .field("record_key", &pretty_key) .field("expected", &expected) .field("got", &got) + .field("range", &range) .finish() } - Self::RecordNotFound => write!(f, "RecordNotFound"), - Self::SplitRecord { result_map } => f - .debug_struct("SplitRecord") - .field("result_map_count", &result_map.len()) - .finish(), Self::QueryTimeout => write!(f, "QueryTimeout"), Self::RecordDoesNotMatch(record) => { let pretty_key = PrettyPrintRecordKey::from(&record.key); @@ -79,6 +78,12 @@ impl Debug for GetRecordError { .field(&pretty_key) .finish() } + Self::RecordKindMismatch => write!(f, "RecordKindMismatch"), + Self::RecordNotFound => write!(f, "RecordNotFound"), + Self::SplitRecord { result_map } => f + .debug_struct("SplitRecord") + .field("result_map_count", &result_map.len()) + .finish(), } } } @@ -122,9 +127,6 @@ pub enum NetworkError { #[error("The RecordKind obtained from the Record did not match with the expected kind: {0}")] RecordKindMismatch(RecordKind), - #[error("Record header is incorrect")] - InCorrectRecordHeader, - // ---------- Transfer Errors #[error("Failed to get spend: {0}")] FailedToGetSpend(String), @@ -138,7 +140,7 @@ pub enum NetworkError { // ---------- Spend Errors #[error("Spend not found: {0:?}")] NoSpendFoundInsideRecord(SpendAddress), - #[error("Double spend(s) attempt was detected. The signed spends are: {0:?}")] + #[error("Double SpendAttempt was detected. The signed spends are: {0:?}")] DoubleSpendAttempt(Vec), // ---------- Store Error diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 6551f6e5f0..de90a187d6 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -7,21 +7,23 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - driver::PendingGetClosestType, get_quorum_value, get_raw_signed_spends_from_record, - target_arch::Instant, GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, - CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, driver::PendingGetClosestType, get_quorum_value, target_arch::Instant, + GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; -use libp2p::kad::{ - self, GetClosestPeersError, InboundRequest, PeerRecord, ProgressStep, QueryId, QueryResult, - QueryStats, Record, K_VALUE, +use libp2p::{ + kad::{ + self, GetClosestPeersError, InboundRequest, KBucketDistance, PeerRecord, ProgressStep, + QueryId, QueryResult, QueryStats, Quorum, Record, K_VALUE, + }, + PeerId, }; use sn_protocol::{ - storage::{try_serialize_record, RecordKind}, - PrettyPrintRecordKey, + messages::{Cmd, Request}, + storage::get_type_from_record, + NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::SignedSpend; -use std::collections::{hash_map::Entry, BTreeSet, HashSet}; +use std::collections::{hash_map::Entry, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; @@ -31,6 +33,9 @@ impl SwarmDriver { let event_string; match kad_event { + // We use this query both to bootstrap and populate our routing table, + // but also to define our GetRange as defined by the largest distance between + // peers in any recent GetClosest call. kad::Event::OutboundQueryProgressed { id, result: QueryResult::GetClosestPeers(Ok(ref closest_peers)), @@ -45,7 +50,7 @@ impl SwarmDriver { ); if let Entry::Occupied(mut entry) = self.pending_get_closest_peers.entry(id) { - let (_, current_closest) = entry.get_mut(); + let (_, _, current_closest) = entry.get_mut(); // TODO: consider order the result and terminate when reach any of the // following criteria: @@ -53,16 +58,19 @@ impl SwarmDriver { // 2, `stats.duration()` is longer than a defined period current_closest.extend(closest_peers.peers.iter().map(|i| i.peer_id)); if current_closest.len() >= usize::from(K_VALUE) || step.last { - let (get_closest_type, current_closest) = entry.remove(); - match get_closest_type { - PendingGetClosestType::NetworkDiscovery => self - .network_discovery - .handle_get_closest_query(current_closest), - PendingGetClosestType::FunctionCall(sender) => { - sender - .send(current_closest) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } + let (address, get_closest_type, current_closest) = entry.remove(); + self.network_discovery + .handle_get_closest_query(¤t_closest); + + if let PendingGetClosestType::FunctionCall(sender) = get_closest_type { + sender + .send(current_closest) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } else { + // do not set this via function calls, as that could potentially + // skew the results in favour of heavily queried (and manipulated) + // areas of the network + self.set_request_range(address, ¤t_closest); } } } else { @@ -81,9 +89,8 @@ impl SwarmDriver { ref step, } => { event_string = "kad_event::get_closest_peers_err"; - error!("GetClosest Query task {id:?} errored with {err:?}, {stats:?} - {step:?}"); - let (get_closest_type, mut current_closest) = + let (address, get_closest_type, mut current_closest) = self.pending_get_closest_peers.remove(&id).ok_or_else(|| { debug!( "Can't locate query task {id:?}, it has likely been completed already." @@ -100,13 +107,23 @@ impl SwarmDriver { match err { GetClosestPeersError::Timeout { ref peers, .. } => { current_closest.extend(peers.iter().map(|i| i.peer_id)); + if current_closest.len() < CLOSE_GROUP_SIZE { + error!( + "GetClosest Query task {id:?} errored, not enough found. {err:?}, {stats:?} - {step:?}" + ); + } } } match get_closest_type { - PendingGetClosestType::NetworkDiscovery => self - .network_discovery - .handle_get_closest_query(current_closest), + PendingGetClosestType::NetworkDiscovery => { + // do not set this via function calls, as that could potentially + // skew the results in favour of heavily queried (and manipulated) + // areas of the network + self.set_request_range(address, ¤t_closest); + self.network_discovery + .handle_get_closest_query(¤t_closest); + } PendingGetClosestType::FunctionCall(sender) => { sender .send(current_closest) @@ -127,7 +144,7 @@ impl SwarmDriver { PrettyPrintRecordKey::from(&peer_record.record.key), peer_record.peer ); - self.accumulate_get_record_found(id, peer_record, stats, step)?; + self.accumulate_get_record_found(id, peer_record)?; } kad::Event::OutboundQueryProgressed { id, @@ -248,12 +265,13 @@ impl SwarmDriver { event_string = "kad_event::RoutingUpdated"; if is_new_peer { self.update_on_peer_addition(peer); + } + if !self.first_contact_made { // This should only happen once - if self.bootstrap.notify_new_peer() { - info!("Performing the first bootstrap"); - self.trigger_network_discovery(); - } + self.first_contact_made = true; + info!("Performing the first bootstrap"); + self.trigger_network_discovery(); } info!("kad_event::RoutingUpdated {:?}: {peer:?}, is_new_peer: {is_new_peer:?} old_peer: {old_peer:?}", self.peers_in_rt); @@ -320,6 +338,7 @@ impl SwarmDriver { // `QueryStats::requests` to be 20 (K-Value) // `QueryStats::success` to be over majority of the requests // `err::NotFound::closest_peers` contains a list of CLOSE_GROUP_SIZE peers + // // 2, targeting an existing entry // there will a sequence of (at least CLOSE_GROUP_SIZE) events of // `kad::Event::OutboundQueryProgressed` to be received @@ -333,26 +352,30 @@ impl SwarmDriver { // where: `cache_candidates`: being the peers supposed to hold the record but not // `ProgressStep::count`: to be `number of received copies plus one` // `ProgressStep::last` to be `true` + // + // /// Accumulates the GetRecord query results - /// If we get enough responses (quorum) for a record with the same content hash: + /// If we get enough responses (ie exceed GetRange) for a record with the same content hash: /// - we return the Record after comparing with the target record. This might return RecordDoesNotMatch if the /// check fails. /// - if multiple content hashes are found, we return a SplitRecord Error /// And then we stop the kad query as we are done here. + /// We do not need to wait for GetRange to be exceeded here and should return early. fn accumulate_get_record_found( &mut self, query_id: QueryId, peer_record: PeerRecord, - _stats: QueryStats, - step: ProgressStep, ) -> Result<()> { + let expected_get_range = self.get_request_range(); + let key = peer_record.record.key.clone(); + let peer_id = if let Some(peer_id) = peer_record.peer { peer_id } else { self.self_peer_id }; - let pretty_key = PrettyPrintRecordKey::from(&peer_record.record.key).into_owned(); + let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); if let Entry::Occupied(mut entry) = self.pending_get_record.entry(query_id) { let (_key, _senders, result_map, cfg) = entry.get_mut(); @@ -367,84 +390,29 @@ impl SwarmDriver { // Insert the record and the peer into the result_map. let record_content_hash = XorName::from_content(&peer_record.record.value); - let responded_peers = + debug!("For record {pretty_key:?} task {query_id:?}, received a copy {peer_id:?} with content hash {record_content_hash:?}"); + + let peer_list = if let Entry::Occupied(mut entry) = result_map.entry(record_content_hash) { let (_, peer_list) = entry.get_mut(); + let _ = peer_list.insert(peer_id); - peer_list.len() + peer_list.clone() } else { let mut peer_list = HashSet::new(); let _ = peer_list.insert(peer_id); - result_map.insert(record_content_hash, (peer_record.record.clone(), peer_list)); - 1 - }; - - let expected_answers = get_quorum_value(&cfg.get_quorum); - - debug!("Expecting {expected_answers:?} answers for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); - - if responded_peers >= expected_answers { - if !cfg.expected_holders.is_empty() { - debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with non-responded expected holders {:?}", cfg.expected_holders); - } - let cfg = cfg.clone(); + result_map.insert( + record_content_hash, + (peer_record.record.clone(), peer_list.clone()), + ); - // Remove the query task and consume the variables. - let (_key, senders, result_map, _) = entry.remove(); + peer_list + }; - if result_map.len() == 1 { - Self::send_record_after_checking_target(senders, peer_record.record, &cfg)?; - } else { - debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with split record"); - let mut accumulated_spends = BTreeSet::new(); - for (record, _) in result_map.values() { - match get_raw_signed_spends_from_record(record) { - Ok(spends) => { - accumulated_spends.extend(spends); - } - Err(_) => { - continue; - } - } - } - if !accumulated_spends.is_empty() { - info!("For record {pretty_key:?} task {query_id:?}, found split record for a spend, accumulated and sending them as a single record"); - let accumulated_spends = - accumulated_spends.into_iter().collect::>(); - - let bytes = try_serialize_record(&accumulated_spends, RecordKind::Spend)?; - - let new_accumulated_record = Record { - key: peer_record.record.key, - value: bytes.to_vec(), - publisher: None, - expires: None, - }; - for sender in senders { - let new_accumulated_record = new_accumulated_record.clone(); + let responded_peers = peer_list.len(); - sender - .send(Ok(new_accumulated_record)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } else { - for sender in senders { - let result_map = result_map.clone(); - sender - .send(Err(GetRecordError::SplitRecord { result_map })) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } - } - - // Stop the query; possibly stops more nodes from being queried. - if let Some(mut query) = self.swarm.behaviour_mut().kademlia.query_mut(&query_id) { - query.finish(); - } - } else if usize::from(step.count) >= CLOSE_GROUP_SIZE { - debug!("For record {pretty_key:?} task {query_id:?}, got {:?} with {} versions so far.", - step.count, result_map.len()); - } + let expected_answers = cfg.get_quorum; + trace!("Expecting {expected_answers:?} answers to exceed {expected_get_range:?} for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); } else { // return error if the entry cannot be found return Err(NetworkError::ReceivedKademliaEventDropped { @@ -455,6 +423,68 @@ impl SwarmDriver { Ok(()) } + /// Checks passed peers from a request and checks they are sufficiently spaced to + /// ensure we have searched enough of the network range as determined by our `get_range` + /// + /// We expect any conflicting records to have been reported prior to this check, + /// so we assume we're returning unique records only. + fn have_we_have_searched_thoroughly_for_quorum( + expected_get_range: KBucketDistance, + searched_peers_list: &HashSet, + data_key_address: &NetworkAddress, + quorum: &Quorum, + ) -> bool { + info!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); + let is_sensitive_data = matches!(quorum, Quorum::All); + + let required_quorum = get_quorum_value(quorum); + + let met_quorum = searched_peers_list.len() >= required_quorum; + + // we only enforce range if we have sensitive data...for data spends quorum::all + if met_quorum && !is_sensitive_data { + return true; + } + + // get the farthest distance between peers in the response + let mut max_distance_to_data_from_responded_nodes = KBucketDistance::default(); + + // iterate over peers and see if the distance to the data is greater than the get_range + for peer_id in searched_peers_list.iter() { + let peer_address = NetworkAddress::from_peer(*peer_id); + let distance_to_data = peer_address.distance(data_key_address); + if max_distance_to_data_from_responded_nodes < distance_to_data { + max_distance_to_data_from_responded_nodes = distance_to_data; + } + } + + // use ilog2 as simplified distance check + // It allows us to say "we've searched up to and including this bucket" + // as opposed to the concrete distance itself (which statistically seems like we can fall outwith a range + // quite easily with a small number of peers) + let exceeded_request_range = if max_distance_to_data_from_responded_nodes.ilog2() + < expected_get_range.ilog2() + { + let dist = max_distance_to_data_from_responded_nodes.ilog2(); + let expected_dist = expected_get_range.ilog2(); + + warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {max_distance_to_data_from_responded_nodes:?} is less than expcted GetRange of {expected_get_range:?}"); + + false + } else { + true + }; + + // We assume a finalised query has searched as far as it can in libp2p + + if exceeded_request_range && met_quorum { + warn!("RANGE: {data_key_address:?} Request satisfied as exceeded request range : {exceeded_request_range:?} and Quorum satisfied with {:?} peers exceeding quorum {required_quorum:?}", searched_peers_list.len()); + return true; + } + + false + } + /// Handles the possible cases when a GetRecord Query completes. /// The accumulate_get_record_found returns the record if the quorum is satisfied, but, if we have reached this point /// then we did not get enough records or we got split records (which prevented the quorum to pass). @@ -464,57 +494,149 @@ impl SwarmDriver { /// SplitRecord if there are multiple content hash versions. fn handle_get_record_finished(&mut self, query_id: QueryId, step: ProgressStep) -> Result<()> { // return error if the entry cannot be found - if let Some((_key, senders, result_map, cfg)) = self.pending_get_record.remove(&query_id) { + if let Some((r_key, senders, result_map, cfg)) = self.pending_get_record.remove(&query_id) { let num_of_versions = result_map.len(); - let (result, log_string) = if let Some((record, from_peers)) = - result_map.values().next() - { - let result = if num_of_versions == 1 { - Err(GetRecordError::NotEnoughCopies { - record: record.clone(), - expected: get_quorum_value(&cfg.get_quorum), - got: from_peers.len(), - }) - } else { - Err(GetRecordError::SplitRecord { - result_map: result_map.clone(), - }) - }; + let data_key_address = NetworkAddress::from_record_key(&r_key); + let expected_get_range = self.get_request_range(); + let all_seen_peers: HashSet<_> = result_map + .values() + .flat_map(|(_, peers)| peers) + .cloned() + .collect(); + let we_have_searched_thoroughly = Self::have_we_have_searched_thoroughly_for_quorum( + expected_get_range, + &all_seen_peers, + &data_key_address, + &cfg.get_quorum, + ); + + // we have a split record, return it + if num_of_versions > 1 { + warn!("RANGE: Multiple versions ({num_of_versions}) found over range"); + for sender in senders { + sender + .send(Err(GetRecordError::SplitRecord { + result_map: result_map.clone(), + })) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } - ( - result, - format!("Getting record {:?} completed with only {:?} copies received, and {num_of_versions} versions.", - PrettyPrintRecordKey::from(&record.key), usize::from(step.count) - 1) - ) - } else { - ( - Err(GetRecordError::RecordNotFound), - format!("Getting record task {query_id:?} completed with step count {:?}, but no copy found.", step.count), - ) - }; - - if cfg.expected_holders.is_empty() { - debug!("{log_string}"); - } else { - debug!( - "{log_string}, and {:?} expected holders not responded", - cfg.expected_holders - ); + for (record, _peers) in result_map.values() { + self.reput_data_to_range(record, &data_key_address, &all_seen_peers)?; + } + + return Ok(()); } - for sender in senders { - sender - .send(result.clone()) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + // we have no results, bail + if num_of_versions == 0 { + warn!("RANGE: No versions found!"); + for sender in senders { + sender + .send(Err(GetRecordError::RecordNotFound)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + return Ok(()); + } + + // if we have searched thoroughly, we can return the record + if num_of_versions == 1 { + let result = if let Some((record, peers)) = result_map.values().next() { + warn!("RANGE: one version found!"); + + if we_have_searched_thoroughly { + Ok(record.clone()) + } else { + self.reput_data_to_range(record, &data_key_address, &all_seen_peers)?; + Err(GetRecordError::NotEnoughCopiesInRange { + record: record.clone(), + expected: get_quorum_value(&cfg.get_quorum), + got: peers.len(), + range: expected_get_range.ilog2().unwrap_or(0), + }) + } + } else { + debug!("Getting record task {query_id:?} completed with step count {:?}, but no copy found.", step.count); + Err(GetRecordError::RecordNotFound) + }; + for sender in senders { + sender + .send(result.clone()) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + + #[cfg(feature = "open-metrics")] + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } } } else { - // We manually perform `query.finish()` if we return early from accumulate fn. - // Thus we will still get FinishedWithNoAdditionalRecord. debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); } Ok(()) } + /// Repost data to the network if we didn't get enough responses. + fn reput_data_to_range( + &mut self, + record: &Record, + data_key_address: &NetworkAddress, + // all peers who responded with any version of the record + from_peers: &HashSet, + ) -> Result<()> { + let pretty_key = PrettyPrintRecordKey::from(&record.key); + // This should be a backstop... Quorum::All is the only one that enforces + // a full search of the network range. + info!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has the record, or same state, we need to extend the range and PUT the data."); + + info!("Reputting data to network {pretty_key:?}..."); + + warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); + + let record_type = get_type_from_record(record)?; + + let replicate_targets: HashSet<_> = self + .get_filtered_peers_exceeding_range_or_closest_nodes(data_key_address) + .iter() + .cloned() + .collect(); + + if from_peers == &replicate_targets { + warn!("RANGE: {pretty_key:?} We asked everyone we know of in that range already!"); + } + + // set holder to someone that has the data + let holder = NetworkAddress::from_peer( + from_peers + .iter() + .next() + .cloned() + .unwrap_or(self.self_peer_id), + ); + + for peer in replicate_targets { + warn!("Reputting data to {peer:?} for {pretty_key:?} if needed..."); + // Do not send to any peer that has already informed us + if from_peers.contains(&peer) { + continue; + } + + debug!("RANGE: (insufficient, so ) Sending data to unresponded peer: {peer:?} for {pretty_key:?}"); + + // nodes will try/fail to trplicate it from us, but grab from the network thereafter + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: Request::Cmd(Cmd::Replicate { + holder: holder.clone(), + keys: vec![(data_key_address.clone(), record_type.clone())], + }), + peer, + sender: None, + }); + } + + Ok(()) + } + /// Handles the possible cases when a kad GetRecord returns an error. /// If we get NotFound/QuorumFailed, we return a RecordNotFound error. Kad currently does not enforce any quorum. /// If we get a Timeout: diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 7af3b268c5..e1d8074d29 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -146,7 +146,7 @@ pub enum NetworkEvent { /// Carry out chunk proof check against the specified record and peer ChunkProofVerification { peer_id: PeerId, - keys_to_verify: Vec, + key_to_verify: NetworkAddress, }, } @@ -208,7 +208,7 @@ impl Debug for NetworkEvent { } NetworkEvent::ChunkProofVerification { peer_id, - keys_to_verify, + key_to_verify: keys_to_verify, } => { write!( f, diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 4550772bf4..c46caa756e 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -7,17 +7,21 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address, MsgResponder, NetworkError, - NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address_and_limit, MsgResponder, + NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, }; -use itertools::Itertools; -use libp2p::request_response::{self, Message}; -use rand::{rngs::OsRng, thread_rng, Rng}; +use libp2p::{ + kad::RecordKey, + request_response::{self, Message}, + PeerId, +}; +use rand::{rngs::OsRng, Rng}; use sn_protocol::{ messages::{CmdResponse, Request, Response}, storage::RecordType, NetworkAddress, }; +use std::collections::HashMap; impl SwarmDriver { /// Forwards `Request` to the upper layers using `Sender`. Sends `Response` to the peers @@ -190,6 +194,10 @@ impl SwarmDriver { sender: NetworkAddress, incoming_keys: Vec<(NetworkAddress, RecordType)>, ) { + let peers = self.get_all_local_peers_excluding_self(); + let our_peer_id = self.self_peer_id; + let more_than_one_key = incoming_keys.len() > 1; + let holder = if let Some(peer_id) = sender.as_peer_id() { peer_id } else { @@ -202,16 +210,12 @@ impl SwarmDriver { incoming_keys.len() ); - // accept replication requests from the K_VALUE peers away, - // giving us some margin for replication - let closest_k_peers = self.get_closest_k_value_local_peers(); - if !closest_k_peers.contains(&holder) || holder == self.self_peer_id { - debug!("Holder {holder:?} is self or not in replication range."); + // accept replication requests from all peers known peers within our GetRange + if !peers.contains(&holder) || holder == our_peer_id { + trace!("Holder {holder:?} is self or not in replication range."); return; } - let more_than_one_key = incoming_keys.len() > 1; - // On receive a replication_list from a close_group peer, we undertake two tasks: // 1, For those keys that we don't have: // fetch them if close enough to us @@ -224,81 +228,109 @@ impl SwarmDriver { .behaviour_mut() .kademlia .store_mut() - .record_addresses_ref(); - let keys_to_fetch = self - .replication_fetcher - .add_keys(holder, incoming_keys, all_keys); + .record_addresses_ref() + .clone(); + + let keys_to_fetch = + self.replication_fetcher + .add_keys(holder, incoming_keys, &all_keys, &peers); + if keys_to_fetch.is_empty() { debug!("no waiting keys to fetch from the network"); } else { self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch)); } - // Only trigger chunk_proof check based every X% of the time - let mut rng = thread_rng(); - // 5% probability - if more_than_one_key && rng.gen_bool(0.05) { - self.verify_peer_storage(sender.clone()); + let event_sender = self.event_sender.clone(); + if more_than_one_key && OsRng.gen_bool(0.1) { + let _handle = tokio::spawn(async move { + // Only run 10% of the time + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &sender); - // In additon to verify the sender, we also verify a random close node. - // This is to avoid malicious node escaping the check by never send a replication_list. - // With further reduced probability of 1% (5% * 20%) - if rng.gen_bool(0.2) { - let close_group_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&self.self_peer_id.into()) - .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) - .collect_vec(); - if close_group_peers.len() == CLOSE_GROUP_SIZE { - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate = NetworkAddress::from_peer(close_group_peers[index]); - if sender != candidate { - self.verify_peer_storage(candidate); - break; + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {holder:?}"); + } else { + // choose one random key to verify + let key_to_verify = + keys_to_verify[OsRng.gen_range(0..keys_to_verify.len())].clone(); + if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + key_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } + } + + // In additon to verify the sender, we also verify a random close node. + // This is to avoid malicious node escaping the check by never send a replication_list. + // With further reduced probability of 1% (5% * 20%) + let close_group_peers = sort_peers_by_address_and_limit( + &peers, + &NetworkAddress::from_peer(our_peer_id), + CLOSE_GROUP_SIZE, + ) + .unwrap_or_default(); + + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate_peer_id = *close_group_peers[index]; + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = Self::select_verification_data_candidates( + &peers, &all_keys, &candidate, + ); + + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {candidate:?}"); + } else { + // choose one random key to verify + let key_to_verify = + keys_to_verify[OsRng.gen_range(0..keys_to_verify.len())].clone(); + + if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: candidate_peer_id, + key_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } } + + break; } } - } + }); } } /// Check among all chunk type records that we have, select those close to the peer, /// and randomly pick one as the verification candidate. - fn verify_peer_storage(&mut self, peer: NetworkAddress) { - let mut closest_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&self.self_peer_id.into()) - .map(|peer| peer.into_preimage()) - .take(20) - .collect_vec(); - closest_peers.push(self.self_peer_id); - + fn select_verification_data_candidates( + all_peers: &Vec, + all_keys: &HashMap, + peer: &NetworkAddress, + ) -> Vec { let target_peer = if let Some(peer_id) = peer.as_peer_id() { peer_id } else { error!("Target {peer:?} is not a valid PeerId"); - return; + return vec![]; }; - let all_keys = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .record_addresses_ref(); - // Targeted chunk type record shall be expected within the close range from our perspective. let mut verify_candidates: Vec = all_keys .values() .filter_map(|(addr, record_type)| { if RecordType::Chunk == *record_type { - match sort_peers_by_address(&closest_peers, addr, CLOSE_GROUP_SIZE) { + // Here we take the actual closest, as this is where we want to be + // strict about who does have the data... + match sort_peers_by_address_and_limit(all_peers, addr, CLOSE_GROUP_SIZE) { Ok(close_group) => { if close_group.contains(&&target_peer) { Some(addr.clone()) @@ -319,17 +351,6 @@ impl SwarmDriver { verify_candidates.sort_by_key(|a| peer.distance(a)); - // To ensure the candidate mush have to be held by the peer, - // we only carry out check when there are already certain amount of chunks uploaded - // AND choose candidate from certain reduced range. - if verify_candidates.len() > 50 { - let index: usize = OsRng.gen_range(0..(verify_candidates.len() / 2)); - self.send_event(NetworkEvent::ChunkProofVerification { - peer_id: target_peer, - keys_to_verify: vec![verify_candidates[index].clone()], - }); - } else { - debug!("No valid candidate to be checked against peer {peer:?}"); - } + verify_candidates } } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 982088f102..c4de69665d 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -244,7 +244,7 @@ impl SwarmDriver { } // If we are not local, we care only for peers that we dialed and thus are reachable. - if self.local || has_dialed { + if !self.local && has_dialed { // A bad node cannot establish a connection with us. So we can add it to the RT directly. self.remove_bootstrap_from_full(peer_id); @@ -254,7 +254,10 @@ impl SwarmDriver { multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit)) }); } + } + if self.local || has_dialed { + // If we are not local, we care only for peers that we dialed and thus are reachable. debug!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table"); // Attempt to add the addresses to the routing table. @@ -392,6 +395,7 @@ impl SwarmDriver { let _ = self.live_connected_peers.remove(&connection_id); self.record_connection_metrics(); + let mut failed_peer_addresses = vec![]; // we need to decide if this was a critical error and the peer should be removed from the routing table let should_clean_peer = match error { DialError::Transport(errors) => { @@ -401,10 +405,14 @@ impl SwarmDriver { // so we default to it not being a real issue // unless there are _specific_ errors (connection refused eg) error!("Dial errors len : {:?}", errors.len()); - let mut there_is_a_serious_issue = false; - for (_addr, err) in errors { + let mut remove_peer_track_peer_issue = false; + for (addr, err) in errors { error!("OutgoingTransport error : {err:?}"); + if !failed_peer_addresses.contains(&addr) { + failed_peer_addresses.push(addr) + } + match err { TransportError::MultiaddrNotSupported(addr) => { warn!("Multiaddr not supported : {addr:?}"); @@ -414,14 +422,13 @@ impl SwarmDriver { println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); } // if we can't dial a peer on a given address, we should remove it from the routing table - there_is_a_serious_issue = true + remove_peer_track_peer_issue = false } TransportError::Other(err) => { - let problematic_errors = [ - "ConnectionRefused", - "HostUnreachable", - "HandshakeTimedOut", - ]; + let problematic_errors = + ["ConnectionRefused", "HostUnreachable"]; + + let intermittent_errors = ["HandshakeTimedOut"]; let is_bootstrap_peer = self .bootstrap_peers @@ -432,7 +439,7 @@ impl SwarmDriver { && self.peers_in_rt < self.bootstrap_peers.len() { warn!("OutgoingConnectionError: On bootstrap peer {failed_peer_id:?}, while still in bootstrap mode, ignoring"); - there_is_a_serious_issue = false; + remove_peer_track_peer_issue = false; } else { // It is really difficult to match this error, due to being eg: // Custom { kind: Other, error: Left(Left(Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })) } @@ -443,13 +450,19 @@ impl SwarmDriver { .any(|err| error_msg.contains(err)) { warn!("Problematic error encountered: {error_msg}"); - there_is_a_serious_issue = true; + remove_peer_track_peer_issue = true; + } else if intermittent_errors + .iter() + .any(|err| error_msg.contains(err)) + { + warn!("Intermittent error encountered: {error_msg}"); + remove_peer_track_peer_issue = false; } } } } } - there_is_a_serious_issue + remove_peer_track_peer_issue } DialError::NoAddresses => { // We provided no address, and while we can't really blame the peer @@ -490,7 +503,7 @@ impl SwarmDriver { }; if should_clean_peer { - warn!("Tracking issue of {failed_peer_id:?}. Clearing it out for now"); + warn!("Serious issue with {failed_peer_id:?}. Clearing it out for now"); if let Some(dead_peer) = self .swarm @@ -592,6 +605,12 @@ impl SwarmDriver { // Remove outdated connection to a peer if it is not in the RT. // Optionally force remove all the connections for a provided peer. fn remove_outdated_connections(&mut self) { + // To avoid this being called too frequenctly, only carry out prunning intervally. + if Instant::now() > self.last_connection_pruning_time + Duration::from_secs(30) { + return; + } + self.last_connection_pruning_time = Instant::now(); + let mut removed_conns = 0; self.live_connected_peers.retain(|connection_id, (peer_id, timeout_time)| { diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 27f07bdb3e..01e5d6c9f6 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -30,6 +30,7 @@ mod transfers; mod transport; use cmd::LocalSwarmCmd; +use sn_registers::SignedRegister; use xor_name::XorName; // re-export arch dependent deps for use in the crate, or above @@ -61,11 +62,15 @@ use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, - storage::{RecordType, RetryStrategy}, + storage::{ + try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType, + RetryStrategy, + }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; +use sn_transfers::SignedSpend; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, net::IpAddr, sync::Arc, }; @@ -78,10 +83,6 @@ use tokio::time::Duration; /// The type of quote for a selected payee. pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); -/// The count of peers that will be considered as close to a record target, -/// that a replication of the record shall be sent/accepted to/by the peer. -pub const REPLICATION_PEERS_COUNT: usize = CLOSE_GROUP_SIZE + 2; - /// Majority of a given group (i.e. > 1/2). #[inline] pub const fn close_group_majority() -> usize { @@ -97,17 +98,47 @@ const MIN_WAIT_BEFORE_READING_A_PUT: Duration = Duration::from_millis(300); /// Sort the provided peers by their distance to the given `NetworkAddress`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_address<'a>( +pub fn sort_peers_by_address_and_limit<'a>( peers: &'a Vec, address: &NetworkAddress, expected_entries: usize, ) -> Result> { - sort_peers_by_key(peers, &address.as_kbucket_key(), expected_entries) + sort_peers_by_key_and_limit(peers, &address.as_kbucket_key(), expected_entries) +} + +/// Sort the provided peers by their distance to the given `NetworkAddress`. +/// Return with the closest expected number of entries if has. +pub fn sort_peers_by_distance_to( + peers: &[PeerId], + queried_address: NetworkAddress, +) -> Vec { + let mut sorted_distances: Vec<_> = peers + .iter() + .map(|peer| { + let addr = NetworkAddress::from_peer(*peer); + queried_address.distance(&addr) + }) + .collect(); + + sorted_distances.sort(); + + sorted_distances +} + +/// Sort the provided peers by their distance to the given `NetworkAddress`. +/// Return with the closest expected number of entries if has. +#[allow(clippy::result_large_err)] +pub fn sort_peers_by_address_and_limit_by_distance<'a>( + peers: &'a Vec, + address: &NetworkAddress, + distance: KBucketDistance, +) -> Result> { + limit_peers_by_distance(peers, &address.as_kbucket_key(), distance) } /// Sort the provided peers by their distance to the given `KBucketKey`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_key<'a, T>( +pub fn sort_peers_by_key_and_limit<'a, T>( peers: &'a Vec, key: &KBucketKey, expected_entries: usize, @@ -144,6 +175,40 @@ pub fn sort_peers_by_key<'a, T>( Ok(sorted_peers) } +/// Only return peers closer to key than the provided distance +/// Their distance is measured by closeness to the given `KBucketKey`. +/// Return with the closest expected number of entries if has. +#[allow(clippy::result_large_err)] +pub fn limit_peers_by_distance<'a, T>( + peers: &'a Vec, + key: &KBucketKey, + distance: KBucketDistance, +) -> Result> { + // Check if there are enough peers to satisfy the request. + // bail early if that's not the case + if CLOSE_GROUP_SIZE > peers.len() { + warn!("Not enough peers in the k-bucket to satisfy the request"); + return Err(NetworkError::NotEnoughPeers { + found: peers.len(), + required: CLOSE_GROUP_SIZE, + }); + } + + // Create a vector of tuples where each tuple is a reference to a peer and its distance to the key. + // This avoids multiple computations of the same distance in the sorting process. + let mut peers_within_distance: Vec<&PeerId> = Vec::with_capacity(peers.len()); + + for peer_id in peers { + let addr = NetworkAddress::from_peer(*peer_id); + let peer_distance = key.distance(&addr.as_kbucket_key()); + + if peer_distance < distance { + peers_within_distance.push(peer_id); + } + } + + Ok(peers_within_distance) +} #[derive(Clone, Debug)] /// API to interact with the underlying Swarm @@ -197,6 +262,13 @@ impl Network { &self.inner.local_swarm_cmd_sender } + /// Return the GetRange as determined by the internal SwarmDriver + pub async fn get_range(&self) -> Result { + let (sender, receiver) = oneshot::channel(); + self.send_local_swarm_cmd(LocalSwarmCmd::GetCurrentRequestRange { sender }); + receiver.await.map_err(NetworkError::from) + } + /// Signs the given data with the node's keypair. pub fn sign(&self, msg: &[u8]) -> Result> { self.keypair().sign(msg).map_err(NetworkError::from) @@ -220,17 +292,123 @@ impl Network { receiver.await? } - /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. - /// Excludes the client's `PeerId` while calculating the closest peers. - pub async fn client_get_closest_peers(&self, key: &NetworkAddress) -> Result> { - self.get_closest_peers(key, true).await + /// Replicate a fresh record to its close group peers. + /// This should not be triggered by a record we receive via replicaiton fetch + pub async fn replicate_valid_fresh_record(&self, paid_key: RecordKey, record_type: RecordType) { + let network = self; + + let start = std::time::Instant::now(); + let pretty_key = PrettyPrintRecordKey::from(&paid_key); + + // first we wait until our own network store can return the record + // otherwise it may not be fully written yet + let mut retry_count = 0; + trace!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); + loop { + let record = match network.get_local_record(&paid_key).await { + Ok(record) => record, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" + ); + None + } + }; + + if record.is_some() { + break; + } + + if retry_count > 10 { + error!( + "Could not get record from store for replication: {pretty_key:?} after 10 retries" + ); + return; + } + + retry_count += 1; + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + trace!("Start replication of fresh record {pretty_key:?} from store"); + + let all_peers = match network.get_all_local_peers_excluding_self().await { + Ok(peers) => peers, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_all_local_peers errored: {err:?}" + ); + return; + } + }; + + let data_addr = NetworkAddress::from_record_key(&paid_key); + let mut peers_to_replicate_to = match network.get_range().await { + Err(error) => { + error!("Replicating fresh record {pretty_key:?} get_range errored: {error:?}"); + + return; + } + + Ok(our_get_range) => { + match sort_peers_by_address_and_limit_by_distance( + &all_peers, + &data_addr, + our_get_range, + ) { + Ok(result) => result, + Err(err) => { + error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); + return; + } + } + } + }; + + if peers_to_replicate_to.len() < CLOSE_GROUP_SIZE { + warn!( + "Replicating fresh record {pretty_key:?} current GetRange insufficient for secure replication. Falling back to CLOSE_GROUP_SIZE" + ); + + peers_to_replicate_to = + match sort_peers_by_address_and_limit(&all_peers, &data_addr, CLOSE_GROUP_SIZE) { + Ok(result) => result, + Err(err) => { + error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); + return; + } + }; + } + + let our_peer_id = network.peer_id(); + let our_address = NetworkAddress::from_peer(our_peer_id); + #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress + let keys = vec![(data_addr.clone(), record_type.clone())]; + + for peer_id in &peers_to_replicate_to { + trace!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); + let request = Request::Cmd(Cmd::Replicate { + holder: our_address.clone(), + keys: keys.clone(), + }); + + network.send_req_ignore_reply(request, **peer_id); + } + trace!( + "Completed replicate fresh record {pretty_key:?} to {:?} peers on store, in {:?}", + peers_to_replicate_to.len(), + start.elapsed() + ); } - /// Returns the closest peers to the given `NetworkAddress`, sorted by their distance to the key. - /// - /// Includes our node's `PeerId` while calculating the closest peers. - pub async fn node_get_closest_peers(&self, key: &NetworkAddress) -> Result> { - self.get_closest_peers(key, false).await + /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. + /// Excludes the client's `PeerId` while calculating the closest peers. + pub async fn client_get_all_close_peers_in_range_or_close_group( + &self, + key: &NetworkAddress, + ) -> Result> { + self.get_all_close_peers_in_range_or_close_group(key, true) + .await } /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that @@ -245,10 +423,10 @@ impl Network { } /// Returns all the PeerId from all the KBuckets from our local Routing Table - /// Also contains our own PeerId. - pub async fn get_closest_k_value_local_peers(&self) -> Result> { + /// Excludes our own PeerId. + pub async fn get_all_local_peers_excluding_self(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_local_swarm_cmd(LocalSwarmCmd::GetClosestKLocalPeers { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender }); receiver .await @@ -280,7 +458,9 @@ impl Network { // Do not query the closest_peers during every re-try attempt. // The close_nodes don't change often and the previous set of close_nodes might be taking a while to write // the Chunk, so query them again incase of a failure. - close_nodes = self.get_closest_peers(&chunk_address, true).await?; + close_nodes = self + .client_get_all_close_peers_in_range_or_close_group(&chunk_address) + .await?; } retry_attempts += 1; info!( @@ -345,7 +525,9 @@ impl Network { ) -> Result { // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. - let mut close_nodes = self.get_closest_peers(&record_address, true).await?; + let mut close_nodes = self + .client_get_all_close_peers_in_range_or_close_group(&record_address) + .await?; // Filter out results from the ignored peers. close_nodes.retain(|peer_id| !ignore_peers.contains(peer_id)); @@ -428,7 +610,9 @@ impl Network { let record_address = NetworkAddress::from_record_key(&key); // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. - let close_nodes = self.get_closest_peers(&record_address, true).await?; + let close_nodes = self + .client_get_all_close_peers_in_range_or_close_group(&record_address) + .await?; let self_address = NetworkAddress::from_peer(self.peer_id()); let request = Request::Query(Query::GetRegisterRecord { @@ -492,6 +676,9 @@ impl Network { /// In case a target_record is provided, only return when fetched target. /// Otherwise count it as a failure when all attempts completed. /// + /// It also handles the split record error for spends and registers. + /// For spends, it accumulates the spends and returns an error if more than one. + /// For registers, it merges the registers and returns the merged record. #[cfg(not(target_arch = "wasm32"))] pub async fn get_record_from_network( &self, @@ -528,7 +715,7 @@ impl Network { Err(GetRecordError::RecordDoesNotMatch(_)) => { warn!("The returned record does not match target {pretty_key:?}."); } - Err(GetRecordError::NotEnoughCopies { expected, got, .. }) => { + Err(GetRecordError::NotEnoughCopiesInRange { expected, got, .. }) => { warn!("Not enough copies ({got}/{expected}) found yet for {pretty_key:?}."); } // libp2p RecordNotFound does mean no holders answered. @@ -537,8 +724,16 @@ impl Network { Err(GetRecordError::RecordNotFound) => { warn!("No holder of record '{pretty_key:?}' found."); } - Err(GetRecordError::SplitRecord { .. }) => { + // This is returned during SplitRecordError, we should not get this error here. + Err(GetRecordError::RecordKindMismatch) => { + error!("Record kind mismatch for {pretty_key:?}. This error should not happen here."); + } + Err(GetRecordError::SplitRecord { result_map }) => { error!("Encountered a split record for {pretty_key:?}."); + if let Some(record) = Self::handle_split_record_error(result_map, &key)? { + info!("Merged the split record (register) for {pretty_key:?}, into a single record"); + return Ok(record); + } } Err(GetRecordError::QueryTimeout) => { error!("Encountered query timeout for {pretty_key:?}."); @@ -563,6 +758,112 @@ impl Network { .await } + /// Handle the split record error. + /// Spend: Accumulate spends and return error if more than one. + /// Register: Merge registers and return the merged record. + #[cfg(not(target_arch = "wasm32"))] + fn handle_split_record_error( + result_map: &HashMap)>, + key: &RecordKey, + ) -> std::result::Result, backoff::Error> { + let pretty_key = PrettyPrintRecordKey::from(key); + + // attempt to deserialise and accumulate any spends or registers + let results_count = result_map.len(); + let mut accumulated_spends = HashSet::new(); + let mut collected_registers = Vec::new(); + + if results_count > 1 { + let mut record_kind = None; + info!("For record {pretty_key:?}, we have more than one result returned."); + for (record, _) in result_map.values() { + let Ok(header) = RecordHeader::from_record(record) else { + continue; + }; + let kind = record_kind.get_or_insert(header.kind); + if *kind != header.kind { + error!("Encountered a split record for {pretty_key:?} with different RecordHeaders. Expected {kind:?} but got {:?}",header.kind); + return Err(backoff::Error::Permanent(NetworkError::GetRecordError( + GetRecordError::RecordKindMismatch, + ))); + } + + // Accumulate the spends + if kind == &RecordKind::Spend { + info!("For record {pretty_key:?}, we have a split record for a spend attempt. Accumulating spends"); + + match get_raw_signed_spends_from_record(record) { + Ok(spends) => { + accumulated_spends.extend(spends); + } + Err(_) => { + continue; + } + } + } + // Accumulate the registers + else if kind == &RecordKind::Register { + info!("For record {pretty_key:?}, we have a split record for a register. Accumulating registers"); + let Ok(register) = try_deserialize_record::(record) else { + error!( + "Failed to deserialize register {pretty_key}. Skipping accumulation" + ); + continue; + }; + + match register.verify() { + Ok(_) => { + collected_registers.push(register); + } + Err(_) => { + error!( + "Failed to verify register for {pretty_key} at address: {}. Skipping accumulation", + register.address() + ); + continue; + } + } + } + } + } + + // Allow for early bail if we've already seen a split SpendAttempt + if accumulated_spends.len() > 1 { + info!("For record {pretty_key:?} task found split record for a spend, accumulated and sending them as a single record"); + let accumulated_spends = accumulated_spends.into_iter().collect::>(); + + return Err(backoff::Error::Permanent(NetworkError::DoubleSpendAttempt( + accumulated_spends, + ))); + } else if !collected_registers.is_empty() { + info!("For record {pretty_key:?} task found multiple registers, merging them."); + let signed_register = collected_registers.iter().fold(collected_registers[0].clone(), |mut acc, x| { + if let Err(e) = acc.merge(x) { + warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address()); + } + acc + }); + + let record_value = try_serialize_record(&signed_register, RecordKind::Register) + .map_err(|err| { + error!( + "Error while serializing the merged register for {pretty_key:?}: {err:?}" + ); + backoff::Error::Permanent(NetworkError::from(err)) + })? + .to_vec(); + + let record = Record { + key: key.clone(), + value: record_value, + publisher: None, + expires: None, + }; + return Ok(Some(record)); + } + Ok(None) + } + /// Get the cost of storing the next record from the network pub async fn get_local_storecost( &self, @@ -638,6 +939,7 @@ impl Network { ); self.put_record_once(record.clone(), cfg).await.map_err(|err| { + // FIXME: Skip if we get a permanent error during verification, e.g., DoubleSpendAttempt warn!("Failed to PUT record with key: {pretty_key:?} to network (retry via backoff) with error: {err:?}"); if cfg.retry_strategy.is_some() { @@ -850,8 +1152,8 @@ impl Network { self.send_local_swarm_cmd(LocalSwarmCmd::QuoteVerification { quotes }); } - pub fn trigger_unrelevant_record_cleanup(&self) { - self.send_local_swarm_cmd(LocalSwarmCmd::TriggerUnrelevantRecordCleanup) + pub fn trigger_irrelevant_record_cleanup(&self) { + self.send_local_swarm_cmd(LocalSwarmCmd::TriggerIrrelevantRecordCleanup) } /// Helper to send NetworkSwarmCmd @@ -865,7 +1167,7 @@ impl Network { /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// If `client` is false, then include `self` among the `closest_peers` - pub async fn get_closest_peers( + pub async fn get_close_group_closest_peers( &self, key: &NetworkAddress, client: bool, @@ -903,10 +1205,79 @@ impl Network { debug!("Network knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}"); } - let closest_peers = sort_peers_by_address(&closest_peers, key, CLOSE_GROUP_SIZE)?; + let closest_peers = sort_peers_by_address_and_limit(&closest_peers, key, CLOSE_GROUP_SIZE)?; Ok(closest_peers.into_iter().cloned().collect()) } + /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. + /// If `client` is false, then include `self` among the `closest_peers` + /// Returns all peers found inside the range + /// + /// If less than CLOSE_GROUP_SIZE peers are found, it will return all the peers found up to the CLOSE_GROUP_SIZE + pub async fn get_all_close_peers_in_range_or_close_group( + &self, + key: &NetworkAddress, + client: bool, + ) -> Result> { + let pretty_key = PrettyPrintKBucketKey(key.as_kbucket_key()); + debug!("Getting the all closest peers in range of {pretty_key:?}"); + let (sender, receiver) = oneshot::channel(); + self.send_network_swarm_cmd(NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { + key: key.clone(), + sender, + }); + + let found_peers = receiver.await?; + + // Count self in if among the CLOSE_GROUP_SIZE closest and sort the result + let result_len = found_peers.len(); + let mut closest_peers = found_peers; + + let expected_range = self.get_range().await?; + + // ensure we're not including self here + if client { + // remove our peer id from the calculations here: + closest_peers.retain(|&x| x != self.peer_id()); + if result_len != closest_peers.len() { + info!("Remove self client from the closest_peers"); + } + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let close_peers_pretty_print: Vec<_> = closest_peers + .iter() + .map(|peer_id| { + format!( + "{peer_id:?}({:?})", + PrettyPrintKBucketKey(NetworkAddress::from_peer(*peer_id).as_kbucket_key()) + ) + }) + .collect(); + + debug!( + "Network knowledge of closest peers to {pretty_key:?} are: {close_peers_pretty_print:?}" + ); + } + + let mut restricted_closest_peers = + sort_peers_by_address_and_limit_by_distance(&closest_peers, key, expected_range)?; + + if restricted_closest_peers.len() < CLOSE_GROUP_SIZE { + warn!( + "Getting close peers to {pretty_key:?} current GetRange of {:?} too strict giving insufficient peers... Falling back to all peers found" + , expected_range.ilog2()); + + restricted_closest_peers = + sort_peers_by_address_and_limit(&closest_peers, key, CLOSE_GROUP_SIZE)?; + } + + debug!( + "Network knowledge of closest peers in range of {:?} to target {pretty_key:?} are: {:?}", expected_range.ilog2(), restricted_closest_peers.len() + ); + Ok(restricted_closest_peers.into_iter().cloned().collect()) + } + /// Send a `Request` to the provided set of peers and wait for their responses concurrently. /// If `get_all_responses` is true, we wait for the responses from all the peers. /// NB TODO: Will return an error if the request timeouts. diff --git a/sn_networking/src/network_discovery.rs b/sn_networking/src/network_discovery.rs index f3f4986134..3d82c944fb 100644 --- a/sn_networking/src/network_discovery.rs +++ b/sn_networking/src/network_discovery.rs @@ -8,7 +8,6 @@ use crate::target_arch::Instant; use libp2p::{kad::KBucketKey, PeerId}; -use rand::{thread_rng, Rng}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use sn_protocol::NetworkAddress; use std::collections::{btree_map::Entry, BTreeMap}; @@ -52,13 +51,13 @@ impl NetworkDiscovery { } /// The result from the kad::GetClosestPeers are again used to update our kbucket. - pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { + pub(crate) fn handle_get_closest_query(&mut self, closest_peers: &[PeerId]) { let now = Instant::now(); let candidates_map: BTreeMap> = closest_peers - .into_iter() + .iter() .filter_map(|peer| { - let peer = NetworkAddress::from_peer(peer); + let peer = NetworkAddress::from_peer(*peer); let peer_key = peer.as_kbucket_key(); peer_key .distance(&self.self_key) @@ -83,18 +82,28 @@ impl NetworkDiscovery { /// Returns one random candidate per bucket. Also tries to refresh the candidate list. /// Todo: Limit the candidates to return. Favor the closest buckets. - pub(crate) fn candidates(&mut self) -> Vec<&NetworkAddress> { - self.try_refresh_candidates(); - - let mut rng = thread_rng(); + pub(crate) fn candidates(&mut self) -> Vec { let mut op = Vec::with_capacity(self.candidates.len()); - let candidates = self.candidates.values().filter_map(|candidates| { - // get a random index each time - let random_index = rng.gen::() % candidates.len(); - candidates.get(random_index) - }); - op.extend(candidates); + let mut generate_fresh_candidates = false; + for addresses in self.candidates.values_mut() { + // get a random candidate from each bucket each time + if addresses.is_empty() { + generate_fresh_candidates = true; + continue; + } + + // remove the first each time + let address = addresses.remove(0); + op.push(address); + } + + if generate_fresh_candidates { + // we only refresh when we are running low on candidates + self.try_refresh_candidates(); + } + + debug!("Candidates returned: {}", op.len()); op } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index ee4e413c5e..254ec6380a 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -22,7 +22,7 @@ use libp2p::{ identity::PeerId, kad::{ store::{Error, RecordStore, Result}, - KBucketDistance as Distance, KBucketKey, ProviderRecord, Record, RecordKey as Key, + KBucketDistance as Distance, ProviderRecord, Record, RecordKey as Key, }, }; #[cfg(feature = "open-metrics")] @@ -70,14 +70,14 @@ const MIN_STORE_COST: u64 = 1; /// A `RecordStore` that stores records on disk. pub struct NodeRecordStore { - /// The identity of the peer owning the store. - local_key: KBucketKey, /// The address of the peer owning the store local_address: NetworkAddress, /// The configuration of the store. config: NodeRecordStoreConfig, - /// A set of keys, each corresponding to a data `Record` stored on disk. + /// Main records store remains unchanged for compatibility records: HashMap, + /// Additional index organizing records by distance bucket + records_by_bucket: HashMap>, /// FIFO simple cache of records to reduce read times records_cache: VecDeque, /// A map from record keys to their indices in the cache @@ -90,7 +90,7 @@ pub struct NodeRecordStore { /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. - responsible_distance_range: Option, + responsible_distance_range: Option, #[cfg(feature = "open-metrics")] /// Used to report the number of records held by the store to the metrics server. record_count_metric: Option, @@ -284,10 +284,10 @@ impl NodeRecordStore { let cache_size = config.records_cache_size; let mut record_store = NodeRecordStore { - local_key: KBucketKey::from(local_id), local_address: NetworkAddress::from_peer(local_id), config, records, + records_by_bucket: HashMap::new(), records_cache: VecDeque::with_capacity(cache_size), records_cache_map: HashMap::with_capacity(cache_size), network_event_sender, @@ -315,11 +315,6 @@ impl NodeRecordStore { self } - /// Returns the current distance ilog2 (aka bucket) range of CLOSE_GROUP nodes. - pub fn get_responsible_distance_range(&self) -> Option { - self.responsible_distance_range - } - // Converts a Key into a Hex string. fn generate_filename(key: &Key) -> String { hex::encode(key.as_ref()) @@ -459,7 +454,7 @@ impl NodeRecordStore { // those `out of range` records shall be cleaned up. // This is to avoid `over-quoting` during restart, when RT is not fully populated, // result in mis-calculation of relevant records. - pub fn cleanup_unrelevant_records(&mut self) { + pub fn cleanup_irrelevant_records(&mut self) { let accumulated_records = self.records.len(); if accumulated_records < MAX_RECORDS_COUNT * 6 / 10 { return; @@ -471,30 +466,25 @@ impl NodeRecordStore { return; }; - let mut removed_keys = Vec::new(); - self.records.retain(|key, _val| { - let kbucket_key = KBucketKey::new(key.to_vec()); - let is_in_range = - responsible_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0); - if !is_in_range { - removed_keys.push(key.clone()); - } - is_in_range - }); + let max_bucket = responsible_range.ilog2().unwrap_or_default(); - // Each `remove` function call will try to re-calculate furthest - // when the key to be removed is the current furthest. - // To avoid duplicated calculation, hence reset `furthest` first here. - self.farthest_record = self.calculate_farthest(); + // Collect keys to remove from buckets beyond our range + let keys_to_remove: Vec = self + .records_by_bucket + .iter() + .filter(|(&bucket, _)| bucket > max_bucket) + .flat_map(|(_, keys)| keys.iter().cloned()) + .collect(); + + let keys_to_remove_len = keys_to_remove.len(); - for key in removed_keys.iter() { - // Deletion from disk will be undertaken as a spawned task, - // hence safe to call this function repeatedly here. - self.remove(key); + // Remove collected keys + for key in keys_to_remove { + self.remove(&key); } info!("Cleaned up {} unrelevant records, among the original {accumulated_records} accumulated_records", - removed_keys.len()); + keys_to_remove_len); } } @@ -523,17 +513,26 @@ impl NodeRecordStore { /// to return the record as stored. pub(crate) fn mark_as_stored(&mut self, key: Key, record_type: RecordType) { let addr = NetworkAddress::from_record_key(&key); - let _ = self - .records + let distance = self.local_address.distance(&addr); + let bucket = distance.ilog2().unwrap_or_default(); + + // Update main records store + self.records .insert(key.clone(), (addr.clone(), record_type)); - let key_distance = self.local_address.distance(&addr); + // Update bucket index + self.records_by_bucket + .entry(bucket) + .or_default() + .insert(key.clone()); + + // Update farthest record if needed (unchanged) if let Some((_farthest_record, farthest_record_distance)) = self.farthest_record.clone() { - if key_distance > farthest_record_distance { - self.farthest_record = Some((key, key_distance)); + if distance > farthest_record_distance { + self.farthest_record = Some((key, distance)); } } else { - self.farthest_record = Some((key, key_distance)); + self.farthest_record = Some((key, distance)); } } @@ -698,29 +697,26 @@ impl NodeRecordStore { /// Calculate how many records are stored within a distance range pub fn get_records_within_distance_range( &self, - records: HashSet<&Key>, - distance_range: u32, + _records: HashSet<&Key>, + max_distance: Distance, ) -> usize { - debug!( - "Total record count is {:?}. Distance is: {distance_range:?}", - self.records.len() - ); + let max_bucket = max_distance.ilog2().unwrap_or_default(); - let relevant_records_len = records + let within_range = self + .records_by_bucket .iter() - .filter(|key| { - let kbucket_key = KBucketKey::new(key.to_vec()); - distance_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0) - }) - .count(); + .filter(|(&bucket, _)| bucket <= max_bucket) + .map(|(_, keys)| keys.len()) + .sum(); + + Marker::CloseRecordsLen(within_range).log(); - Marker::CloseRecordsLen(relevant_records_len).log(); - relevant_records_len + within_range } /// Setup the distance range. - pub(crate) fn set_responsible_distance_range(&mut self, farthest_responsible_bucket: u32) { - self.responsible_distance_range = Some(farthest_responsible_bucket); + pub(crate) fn set_responsible_distance_range(&mut self, farthest_distance: Distance) { + self.responsible_distance_range = Some(farthest_distance); } } @@ -813,7 +809,23 @@ impl RecordStore for NodeRecordStore { } fn remove(&mut self, k: &Key) { - let _ = self.records.remove(k); + // Remove from main store + if let Some((addr, _)) = self.records.remove(k) { + // Remove from bucket index + let bucket = self + .local_address + .distance(&addr) + .ilog2() + .unwrap_or_default(); + if let Some(bucket_keys) = self.records_by_bucket.get_mut(&bucket) { + bucket_keys.remove(k); + // Clean up empty buckets + if bucket_keys.is_empty() { + self.records_by_bucket.remove(&bucket); + } + } + } + self.records_cache.retain(|r| r.key != *k); #[cfg(feature = "open-metrics")] @@ -1244,7 +1256,7 @@ mod tests { let owner_sk = SecretKey::random(); let owner_pk = owner_sk.public_key(); - let mut scratchpad = Scratchpad::new(owner_pk); + let mut scratchpad = Scratchpad::new(owner_pk, 0); let _next_version = scratchpad.update_and_sign(unencrypted_scratchpad_data.clone(), &owner_sk); @@ -1289,8 +1301,7 @@ mod tests { let decrypted_data = scratchpad.decrypt_data(&owner_sk)?; assert_eq!( - decrypted_data, - Some(unencrypted_scratchpad_data), + decrypted_data, unencrypted_scratchpad_data, "Stored scratchpad data should match original" ); } @@ -1500,10 +1511,7 @@ mod tests { .wrap_err("Could not parse record store key")?, ); // get the distance to this record from our local key - let distance = self_address - .distance(&halfway_record_address) - .ilog2() - .unwrap_or(0); + let distance = self_address.distance(&halfway_record_address); // must be plus one bucket from the halfway record store.set_responsible_distance_range(distance); diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index 8e3bc67364..53cea6701e 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -10,7 +10,7 @@ use crate::record_store::{ClientRecordStore, NodeRecordStore}; use libp2p::kad::{ store::{RecordStore, Result}, - ProviderRecord, Record, RecordKey, + KBucketDistance, ProviderRecord, Record, RecordKey, }; use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; @@ -130,17 +130,7 @@ impl UnifiedRecordStore { } } - pub(crate) fn get_farthest_replication_distance_bucket(&self) -> Option { - match self { - Self::Client(_store) => { - warn!("Calling get_distance_range at Client. This should not happen"); - None - } - Self::Node(store) => store.get_responsible_distance_range(), - } - } - - pub(crate) fn set_distance_range(&mut self, distance: u32) { + pub(crate) fn set_distance_range(&mut self, distance: KBucketDistance) { match self { Self::Client(_store) => { warn!("Calling set_distance_range at Client. This should not happen"); @@ -169,12 +159,12 @@ impl UnifiedRecordStore { }; } - pub(crate) fn cleanup_unrelevant_records(&mut self) { + pub(crate) fn cleanup_irrelevant_records(&mut self) { match self { Self::Client(_store) => { - warn!("Calling cleanup_unrelevant_records at Client. This should not happen"); + warn!("Calling cleanup_irrelevant_records at Client. This should not happen"); } - Self::Node(store) => store.cleanup_unrelevant_records(), + Self::Node(store) => store.cleanup_irrelevant_records(), } } } diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 1b90ac9a53..1858d65350 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -8,7 +8,9 @@ #![allow(clippy::mutable_key_type)] use crate::target_arch::spawn; +use crate::CLOSE_GROUP_SIZE; use crate::{event::NetworkEvent, target_arch::Instant}; +use itertools::Itertools; use libp2p::{ kad::{KBucketDistance as Distance, RecordKey, K_VALUE}, PeerId, @@ -41,8 +43,8 @@ pub(crate) struct ReplicationFetcher { // Avoid fetching same chunk from different nodes AND carry out too many parallel tasks. on_going_fetches: HashMap<(RecordKey, RecordType), (PeerId, ReplicationTimeout)>, event_sender: mpsc::Sender, - /// ilog2 bucket distance range that the incoming key shall be fetched - distance_range: Option, + /// KBucketDistance range that the incoming key shall be fetched + distance_range: Option, /// Restrict fetch range to closer than this value /// used when the node is full, but we still have "close" data coming in /// that is _not_ closer than our farthest max record @@ -63,7 +65,7 @@ impl ReplicationFetcher { } /// Set the distance range. - pub(crate) fn set_replication_distance_range(&mut self, distance_range: u32) { + pub(crate) fn set_replication_distance_range(&mut self, distance_range: Distance) { self.distance_range = Some(distance_range); } @@ -76,44 +78,45 @@ impl ReplicationFetcher { holder: PeerId, incoming_keys: Vec<(NetworkAddress, RecordType)>, locally_stored_keys: &HashMap, + all_local_peers: &[PeerId], ) -> Vec<(PeerId, RecordKey)> { - // remove locally stored from incoming_keys - let mut new_incoming_keys: Vec<_> = incoming_keys - .iter() - .filter(|(addr, record_type)| { - let key = &addr.to_record_key(); - !locally_stored_keys.contains_key(key) - && !self - .to_be_fetched - .contains_key(&(key.clone(), record_type.clone(), holder)) - }) - .cloned() - .collect(); - - self.remove_stored_keys(locally_stored_keys); + // Pre-calculate self_address since it's used multiple times let self_address = NetworkAddress::from_peer(self.self_peer_id); - let total_incoming_keys = new_incoming_keys.len(); + let total_incoming_keys = incoming_keys.len(); - // In case of node full, restrict fetch range - if let Some(farthest_distance) = self.farthest_acceptable_distance { - let mut out_of_range_keys = vec![]; - new_incoming_keys.retain(|(addr, _)| { - let is_in_range = self_address.distance(addr) <= farthest_distance; - if !is_in_range { - out_of_range_keys.push(addr.clone()); - } - is_in_range - }); + // Avoid multiple allocations by using with_capacity + let mut new_incoming_keys = Vec::with_capacity(incoming_keys.len()); + let mut keys_to_fetch = Vec::new(); + let mut out_of_range_keys = Vec::new(); + + // Single pass filtering instead of multiple retain() calls + for (addr, record_type) in incoming_keys { + let key = addr.to_record_key(); - info!("Node is full, among {total_incoming_keys} incoming replications from {holder:?}, found {} beyond current farthest", out_of_range_keys.len()); - for addr in out_of_range_keys.iter() { - debug!("Node is full, the incoming record_key {addr:?} is beyond current farthest record"); + // Skip if locally stored or already pending fetch + if locally_stored_keys.contains_key(&key) + || self + .to_be_fetched + .contains_key(&(key.clone(), record_type.clone(), holder)) + { + continue; } + + // Check distance constraints + if let Some(farthest_distance) = self.farthest_acceptable_distance { + if self_address.distance(&addr) > farthest_distance { + out_of_range_keys.push(addr); + continue; + } + } + + new_incoming_keys.push((addr, record_type)); } - let mut keys_to_fetch = vec![]; - // For new data, it will be replicated out in a special replication_list of length 1. - // And we shall `fetch` that copy immediately (if in range), if it's not being fetched. + // Remove any outdated entries in `to_be_fetched` + self.remove_stored_keys(locally_stored_keys); + + // Special case for single new key if new_incoming_keys.len() == 1 { let (record_address, record_type) = new_incoming_keys[0].clone(); @@ -132,13 +135,29 @@ impl ReplicationFetcher { self.to_be_fetched .retain(|_, time_out| *time_out > Instant::now()); - let mut out_of_range_keys = vec![]; // Filter out those out_of_range ones among the incoming_keys. if let Some(ref distance_range) = self.distance_range { new_incoming_keys.retain(|(addr, _record_type)| { - let is_in_range = - self_address.distance(addr).ilog2().unwrap_or(0) <= *distance_range; + // find all closer peers to the data + let closer_peers_len = all_local_peers + .iter() + .filter(|peer_id| { + let peer_address = NetworkAddress::from_peer(**peer_id); + addr.distance(&peer_address) <= *distance_range + }) + .collect_vec() + .len(); + + // we consider ourselves in range if + // A) We don't know enough closer peers than ourselves + // or B) The distance to the data is within our GetRange + let is_in_range = closer_peers_len <= CLOSE_GROUP_SIZE + || self_address.distance(addr).ilog2() <= distance_range.ilog2(); if !is_in_range { + warn!( + "Rejecting incoming key: {addr:?} as out of range. {:?} is larger than {:?} ", + self_address.distance(addr).ilog2(), + distance_range.ilog2()); out_of_range_keys.push(addr.clone()); } is_in_range @@ -147,10 +166,6 @@ impl ReplicationFetcher { if !out_of_range_keys.is_empty() { info!("Among {total_incoming_keys} incoming replications from {holder:?}, found {} out of range", out_of_range_keys.len()); - for addr in out_of_range_keys.iter() { - let ilog2_distance = self_address.distance(addr).ilog2(); - debug!("The incoming record_key {addr:?} is out of range with ilog2_distance being {ilog2_distance:?}, do not fetch it from {holder:?}"); - } } // add in-range AND non existing keys to the fetcher @@ -258,14 +273,20 @@ impl ReplicationFetcher { return vec![]; } - if !self.to_be_fetched.is_empty() { - debug!( - "Number of records still to be retrieved: {:?}", - self.to_be_fetched.len() - ); + // early return if nothing there + if self.to_be_fetched.is_empty() { + return vec![]; } - let mut data_to_fetch = vec![]; + debug!( + "Number of records still to be retrieved: {:?}", + self.to_be_fetched.len() + ); + + // Pre-allocate vectors with known capacity + let remaining_capacity = MAX_PARALLEL_FETCH - self.on_going_fetches.len(); + let mut data_to_fetch = Vec::with_capacity(remaining_capacity); + // Sort to_be_fetched by key closeness to our PeerId let mut to_be_fetched_sorted: Vec<_> = self.to_be_fetched.iter_mut().collect(); @@ -428,8 +449,12 @@ mod tests { incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = - replication_fetcher.add_keys(PeerId::random(), incoming_keys, &locally_stored_keys); + let keys_to_fetch = replication_fetcher.add_keys( + PeerId::random(), + incoming_keys, + &locally_stored_keys, + &[], + ); assert_eq!(keys_to_fetch.len(), MAX_PARALLEL_FETCH); // we should not fetch anymore keys @@ -441,6 +466,7 @@ mod tests { PeerId::random(), vec![(key_1, RecordType::Chunk), (key_2, RecordType::Chunk)], &locally_stored_keys, + &[], ); assert!(keys_to_fetch.is_empty()); @@ -451,6 +477,7 @@ mod tests { PeerId::random(), vec![(key, RecordType::Chunk)], &locally_stored_keys, + &[], ); assert!(!keys_to_fetch.is_empty()); @@ -476,34 +503,41 @@ mod tests { let mut replication_fetcher = ReplicationFetcher::new(peer_id, event_sender); // Set distance range + // way to update this test let distance_target = NetworkAddress::from_peer(PeerId::random()); - let distance_range = self_address.distance(&distance_target).ilog2().unwrap_or(1); + let distance_range = self_address.distance(&distance_target); replication_fetcher.set_replication_distance_range(distance_range); + // generate a list of close peers + let close_peers = (0..100).map(|_| PeerId::random()).collect::>(); + let mut incoming_keys = Vec::new(); let mut in_range_keys = 0; (0..100).for_each(|_| { let random_data: Vec = (0..50).map(|_| rand::random::()).collect(); let key = NetworkAddress::from_record_key(&RecordKey::from(random_data)); - if key.distance(&self_address).ilog2().unwrap_or(0) <= distance_range { + if key.distance(&self_address).ilog2() <= distance_range.ilog2() { in_range_keys += 1; } incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = - replication_fetcher.add_keys(PeerId::random(), incoming_keys, &Default::default()); + let keys_to_fetch = replication_fetcher.add_keys( + PeerId::random(), + incoming_keys, + &Default::default(), + &close_peers, + ); assert_eq!( keys_to_fetch.len(), replication_fetcher.on_going_fetches.len(), "keys to fetch and ongoing fetches should match" ); - assert_eq!( - in_range_keys, - keys_to_fetch.len() + replication_fetcher.to_be_fetched.len(), - "all keys should be in range and in the fetcher" + assert!( + keys_to_fetch.len() + replication_fetcher.to_be_fetched.len() >= in_range_keys, + "at least all keys in range should be in the fetcher" ); } } diff --git a/sn_networking/src/target_arch.rs b/sn_networking/src/target_arch.rs index 264ef2dc11..35a1b62092 100644 --- a/sn_networking/src/target_arch.rs +++ b/sn_networking/src/target_arch.rs @@ -7,19 +7,22 @@ // permissions and limitations relating to use of the SAFE Network Software. #[cfg(not(target_arch = "wasm32"))] -pub use std::time::Instant; +pub use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; /// Wasm32 target arch does not support `time` or spawning via tokio /// so we shim in alternatives here when building for that architecture #[cfg(not(target_arch = "wasm32"))] pub use tokio::{ spawn, - time::{interval, sleep, timeout, Duration, Interval}, + time::{interval, sleep, timeout, Interval}, }; +#[cfg(target_arch = "wasm32")] +pub use std::time::Duration; + #[cfg(target_arch = "wasm32")] pub use wasmtimer::{ - std::Instant, + std::{Instant, SystemTime, UNIX_EPOCH}, tokio::{interval, sleep, timeout, Interval}, }; diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 76b6349ce1..40c6182f94 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -6,9 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - close_group_majority, driver::GetRecordCfg, GetRecordError, Network, NetworkError, Result, -}; +use crate::{driver::GetRecordCfg, Network, NetworkError, Result}; use libp2p::kad::{Quorum, Record}; use sn_protocol::{ storage::{try_deserialize_record, RecordHeader, RecordKind, RetryStrategy, SpendAddress}, @@ -39,7 +37,7 @@ impl Network { }; let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( - "Got record from the network, {:?}", + "Got raw spends from the network, {:?}", PrettyPrintRecordKey::from(&record.key) ); get_raw_signed_spends_from_record(&record) @@ -51,38 +49,14 @@ impl Network { /// If we get a quorum error, we increase the RetryStrategy pub async fn get_spend(&self, address: SpendAddress) -> Result { let key = NetworkAddress::from_spend_address(address).to_record_key(); - let mut get_cfg = GetRecordCfg { + let get_cfg = GetRecordCfg { get_quorum: Quorum::All, retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), is_register: false, }; - let record = match self.get_record_from_network(key.clone(), &get_cfg).await { - Ok(record) => record, - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { - record, - expected, - got, - })) => { - // if majority holds the spend, it might be worth to be trusted. - if got >= close_group_majority() { - debug!("At least a majority nodes hold the spend {address:?}, going to trust it if can fetch with majority again."); - get_cfg.get_quorum = Quorum::Majority; - get_cfg.retry_strategy = Some(RetryStrategy::Balanced); - self.get_record_from_network(key, &get_cfg).await? - } else { - return Err(NetworkError::GetRecordError( - GetRecordError::NotEnoughCopies { - record, - expected, - got, - }, - )); - } - } - Err(err) => return Err(err), - }; + let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( "Got record from the network, {:?}", PrettyPrintRecordKey::from(&record.key) diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 58285aa642..5903b68729 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.112.1" +version = "0.112.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,15 +52,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_networking = { path = "../sn_networking", version = "0.19.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_networking = { path = "../sn_networking", version = "0.19.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1.1" } -autonomi = { path = "../autonomi", version = "0.2.0", features = ["registers"] } +evmlib = { path = "../evmlib", version = "0.1.2" } +autonomi = { path = "../autonomi", version = "0.2.2", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.13", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.20.0", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.1", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 4bb21c720c..204067879a 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -45,7 +45,7 @@ use sn_evm::EvmNetwork; /// Interval to trigger replication of all records to all peers. /// This is the max time it should take. Minimum interval at any node will be half this -pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45; +pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 180; /// Interval to trigger bad node detection. /// This is the max time it should take. Minimum interval at any node will be half this @@ -272,9 +272,9 @@ impl Node { tokio::time::interval(UPTIME_METRICS_UPDATE_INTERVAL); let _ = uptime_metrics_update_interval.tick().await; // first tick completes immediately - let mut unrelevant_records_cleanup_interval = + let mut irrelevant_records_cleanup_interval = tokio::time::interval(UNRELEVANT_RECORDS_CLEANUP_INTERVAL); - let _ = unrelevant_records_cleanup_interval.tick().await; // first tick completes immediately + let _ = irrelevant_records_cleanup_interval.tick().await; // first tick completes immediately loop { let peers_connected = &peers_connected; @@ -333,11 +333,11 @@ impl Node { let _ = metrics_recorder.uptime.set(metrics_recorder.started_instant.elapsed().as_secs() as i64); } } - _ = unrelevant_records_cleanup_interval.tick() => { + _ = irrelevant_records_cleanup_interval.tick() => { let network = self.network().clone(); let _handle = spawn(async move { - Self::trigger_unrelevant_record_cleanup(network); + Self::trigger_irrelevant_record_cleanup(network); }); } } @@ -485,12 +485,12 @@ impl Node { } NetworkEvent::ChunkProofVerification { peer_id, - keys_to_verify, + key_to_verify, } => { event_header = "ChunkProofVerification"; let network = self.network().clone(); - debug!("Going to verify chunk {keys_to_verify:?} against peer {peer_id:?}"); + debug!("Going to verify chunk {key_to_verify} against peer {peer_id:?}"); let _handle = spawn(async move { // To avoid the peer is in the process of getting the copy via replication, @@ -498,7 +498,7 @@ impl Node { // Only report the node as bad when ALL the verification attempts failed. let mut attempts = 0; while attempts < MAX_CHUNK_PROOF_VERIFY_ATTEMPTS { - if chunk_proof_verify_peer(&network, peer_id, &keys_to_verify).await { + if chunk_proof_verify_peer(&network, peer_id, &key_to_verify).await { return; } // Replication interval is 22s - 45s. @@ -528,7 +528,7 @@ impl Node { async fn close_nodes_shunning_peer(network: &Network, peer_id: PeerId) -> bool { // using `client` to exclude self let closest_peers = match network - .client_get_closest_peers(&NetworkAddress::from_peer(peer_id)) + .client_get_all_close_peers_in_range_or_close_group(&NetworkAddress::from_peer(peer_id)) .await { Ok(peers) => peers, @@ -768,44 +768,36 @@ impl Node { } } -async fn chunk_proof_verify_peer( - network: &Network, - peer_id: PeerId, - keys: &[NetworkAddress], -) -> bool { - for key in keys.iter() { - let check_passed = if let Ok(Some(record)) = - network.get_local_record(&key.to_record_key()).await - { - let nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&record.value, nonce); - debug!("To verify peer {peer_id:?}, chunk_proof for {key:?} is {expected_proof:?}"); - - let request = Request::Query(Query::GetChunkExistenceProof { - key: key.clone(), - nonce, - }); - let responses = network - .send_and_get_responses(&[peer_id], &request, true) - .await; - let n_verified = responses - .into_iter() - .filter_map(|(peer, resp)| { - received_valid_chunk_proof(key, &expected_proof, peer, resp) - }) - .count(); - - n_verified >= 1 - } else { - error!( +async fn chunk_proof_verify_peer(network: &Network, peer_id: PeerId, key: &NetworkAddress) -> bool { + let check_passed = if let Ok(Some(record)) = + network.get_local_record(&key.to_record_key()).await + { + let nonce = thread_rng().gen::(); + let expected_proof = ChunkProof::new(&record.value, nonce); + debug!("To verify peer {peer_id:?}, chunk_proof for {key:?} is {expected_proof:?}"); + + let request = Request::Query(Query::GetChunkExistenceProof { + key: key.clone(), + nonce, + }); + let responses = network + .send_and_get_responses(&[peer_id], &request, true) + .await; + let n_verified = responses + .into_iter() + .filter_map(|(peer, resp)| received_valid_chunk_proof(key, &expected_proof, peer, resp)) + .count(); + + n_verified >= 1 + } else { + error!( "To verify peer {peer_id:?} Could not get ChunkProof for {key:?} as we don't have the record locally." ); - true - }; + true + }; - if !check_passed { - return false; - } + if !check_passed { + return false; } true diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 3f3343f403..224fc3bcb9 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -137,10 +137,22 @@ impl Node { store_scratchpad_result } RecordKind::Scratchpad => { - error!("Scratchpad should not be validated at this point"); - Err(Error::InvalidPutWithoutPayment( - PrettyPrintRecordKey::from(&record.key).into_owned(), - )) + // make sure we already have this scratchpad locally, else reject it as first time upload needs payment + let key = record.key.clone(); + let scratchpad = try_deserialize_record::(&record)?; + let net_addr = NetworkAddress::ScratchpadAddress(*scratchpad.address()); + let pretty_key = PrettyPrintRecordKey::from(&key); + trace!("Got record to store without payment for scratchpad at {pretty_key:?}"); + if !self.validate_key_and_existence(&net_addr, &key).await? { + warn!("Ignore store without payment for scratchpad at {pretty_key:?}"); + return Err(Error::InvalidPutWithoutPayment( + PrettyPrintRecordKey::from(&record.key).into_owned(), + )); + } + + // store the scratchpad + self.validate_and_store_scratchpad_record(scratchpad, key, false) + .await } RecordKind::Spend => { let record_key = record.key.clone(); @@ -387,7 +399,6 @@ impl Node { /// Check Counter: It MUST ensure that the new counter value is strictly greater than the currently stored value to prevent replay attacks. /// Verify Signature: It MUST use the public key to verify the BLS12-381 signature against the content hash and the counter. /// Accept or Reject: If all verifications succeed, the node MUST accept the packet and replace any previous version. Otherwise, it MUST reject the update. - pub(crate) async fn validate_and_store_scratchpad_record( &self, scratchpad: Scratchpad, @@ -396,7 +407,8 @@ impl Node { ) -> Result<()> { // owner PK is defined herein, so as long as record key and this match, we're good let addr = scratchpad.address(); - debug!("Validating and storing scratchpad {addr:?}"); + let count = scratchpad.count(); + debug!("Validating and storing scratchpad {addr:?} with count {count}"); // check if the deserialized value's RegisterAddress matches the record's key let scratchpad_key = NetworkAddress::ScratchpadAddress(*addr).to_record_key(); @@ -552,7 +564,7 @@ impl Node { }; debug!( - "Got {} validated spends with key: {unique_pubkey:?} at {pretty_key:?}", + "Found {} spends with key: {unique_pubkey:?} at {pretty_key:?}", validated_spends.len() ); @@ -564,14 +576,12 @@ impl Node { expires: None, }; self.network().put_local_record(record); - debug!( - "Successfully stored validated spends with key: {unique_pubkey:?} at {pretty_key:?}" - ); + debug!("Successfully stored spends with key: {unique_pubkey:?} at {pretty_key:?}"); // Just log the double spend attempt. DoubleSpend error during PUT is not used and would just lead to // RecordRejected marker (which is incorrect, since we store double spends). if validated_spends.len() > 1 { - warn!("Got double spend(s) of len {} for the Spend PUT with unique_pubkey {unique_pubkey}", validated_spends.len()); + warn!("Got Burnt SpendAttempts of len {} for the Spend PUT with unique_pubkey {unique_pubkey} at {pretty_key:?}", validated_spends.len()); } self.record_metrics(Marker::ValidSpendRecordPutFromNetwork(&pretty_key)); @@ -630,9 +640,13 @@ impl Node { #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = self.metrics_recorder() { + // FIXME: We would reach the MAX if the storecost is scaled up. + let current_value = metrics_recorder.current_reward_wallet_balance.get(); + let new_value = + current_value.saturating_add(storecost.as_atto().try_into().unwrap_or(i64::MAX)); let _ = metrics_recorder .current_reward_wallet_balance - .inc_by(storecost.as_atto().try_into().unwrap_or(i64::MAX)); // TODO maybe metrics should be in u256 too? + .set(new_value); } self.events_channel() .broadcast(crate::NodeEvent::RewardReceived(storecost, address.clone())); @@ -745,6 +759,8 @@ impl Node { // get spends from the network at the address for that unique pubkey let network_spends = match self.network().get_raw_spends(spend_addr).await { Ok(spends) => spends, + // Fixme: We don't return SplitRecord Error for spends, instead we return NetworkError::DoubleSpendAttempt. + // The fix should also consider/change all the places we try to get spends, for eg `get_raw_signed_spends_from_record` etc. Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { warn!("Got a split record (double spend) for {unique_pubkey:?} from the network"); let mut spends = vec![]; @@ -756,13 +772,14 @@ impl Node { } spends } - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { + Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopiesInRange { record, got, + range, .. })) => { info!( - "Retrieved {got} copies of the record for {unique_pubkey:?} from the network" + "Retrieved {got} copies of the record for {unique_pubkey:?} from the network in range {range}" ); match get_raw_signed_spends_from_record(&record) { Ok(spends) => spends, diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 59e0cff078..bc3496b750 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -6,15 +6,18 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{error::Result, node::Node}; +use crate::{ + error::{Error, Result}, + node::Node, +}; use libp2p::{ kad::{Quorum, Record, RecordKey}, PeerId, }; -use sn_networking::{sort_peers_by_address, GetRecordCfg, Network, REPLICATION_PEERS_COUNT}; +use sn_networking::{GetRecordCfg, Network}; use sn_protocol::{ - messages::{Cmd, Query, QueryResponse, Request, Response}, - storage::RecordType, + messages::{Query, QueryResponse, Request, Response}, + storage::{try_serialize_record, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use tokio::task::spawn; @@ -26,8 +29,8 @@ impl Node { } /// Cleanup unrelevant records if accumulated too many. - pub(crate) fn trigger_unrelevant_record_cleanup(network: Network) { - network.trigger_unrelevant_record_cleanup() + pub(crate) fn trigger_irrelevant_record_cleanup(network: Network) { + network.trigger_irrelevant_record_cleanup() } /// Get the Record from a peer or from the network without waiting. @@ -79,12 +82,27 @@ impl Node { // Hence value of the flag actually doesn't matter. is_register: false, }; - match node.network().get_record_from_network(key, &get_cfg).await { + match node + .network() + .get_record_from_network(key.clone(), &get_cfg) + .await + { Ok(record) => record, - Err(err) => { - error!("During replication fetch of {pretty_key:?}, failed in re-attempt of get from network {err:?}"); - return; - } + Err(error) => match error { + sn_networking::NetworkError::DoubleSpendAttempt(spends) => { + debug!("Failed to fetch record {pretty_key:?} from the network, double spend attempt {spends:?}"); + + let bytes = try_serialize_record(&spends, RecordKind::Spend)?; + + Record { + key, + value: bytes.to_vec(), + publisher: None, + expires: None, + } + } + other_error => return Err(other_error.into()), + }, } }; @@ -96,6 +114,7 @@ impl Node { } else { debug!("Completed storing Replication Record {pretty_key:?} from network."); } + Ok::<(), Error>(()) }); } Ok(()) @@ -111,86 +130,9 @@ impl Node { let network = self.network().clone(); let _handle = spawn(async move { - let start = std::time::Instant::now(); - let pretty_key = PrettyPrintRecordKey::from(&paid_key); - - // first we wait until our own network store can return the record - // otherwise it may not be fully written yet - let mut retry_count = 0; - debug!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); - loop { - let record = match network.get_local_record(&paid_key).await { - Ok(record) => record, - Err(err) => { - error!( - "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" - ); - None - } - }; - - if record.is_some() { - break; - } - - if retry_count > 10 { - error!( - "Could not get record from store for replication: {pretty_key:?} after 10 retries" - ); - return; - } - - retry_count += 1; - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - } - - debug!("Start replication of fresh record {pretty_key:?} from store"); - - // Already contains self_peer_id - let mut closest_k_peers = match network.get_closest_k_value_local_peers().await { - Ok(peers) => peers, - Err(err) => { - error!("Replicating fresh record {pretty_key:?} get_closest_local_peers errored: {err:?}"); - return; - } - }; - - // remove ourself from these calculations - closest_k_peers.retain(|peer_id| peer_id != &network.peer_id()); - - let data_addr = NetworkAddress::from_record_key(&paid_key); - - let sorted_based_on_addr = match sort_peers_by_address( - &closest_k_peers, - &data_addr, - REPLICATION_PEERS_COUNT, - ) { - Ok(result) => result, - Err(err) => { - error!( - "When replicating fresh record {pretty_key:?}, having error when sort {err:?}" - ); - return; - } - }; - - let our_peer_id = network.peer_id(); - let our_address = NetworkAddress::from_peer(our_peer_id); - let keys = vec![(data_addr.clone(), record_type.clone())]; - - for peer_id in sorted_based_on_addr { - debug!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); - let request = Request::Cmd(Cmd::Replicate { - holder: our_address.clone(), - keys: keys.clone(), - }); - - network.send_req_ignore_reply(request, *peer_id); - } - debug!( - "Completed replicate fresh record {pretty_key:?} on store, in {:?}", - start.elapsed() - ); + network + .replicate_valid_fresh_record(paid_key, record_type) + .await; }); } } diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 8d06a87187..21ba72d619 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -13,18 +13,19 @@ // use common::client::{get_client_and_funded_wallet, get_wallet}; // use eyre::{bail, Result}; // use itertools::Itertools; -// use sn_transfers::{ -// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, -// SpendReason, WalletError, GENESIS_CASHNOTE, -// }; // use sn_logging::LogBuilder; // use sn_networking::NetworkError; +// use sn_transfers::{ +// get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, +// WalletError, GENESIS_CASHNOTE, +// }; // use std::time::Duration; // use tracing::*; // #[tokio::test] // async fn cash_note_transfer_double_spend_fail() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let _log_guards = +// LogBuilder::init_single_threaded_tokio_test("cash_note_transfer_double_spend_fail", true); // // create 1 wallet add money from faucet // let first_wallet_dir = TempDir::new()?; @@ -40,7 +41,7 @@ // assert_eq!(third_wallet.balance(), NanoTokens::zero()); // // manually forge two transfers of the same source -// let amount = first_wallet_balance / 3; +// let amount = NanoTokens::from(first_wallet_balance / 3); // let to1 = first_wallet.address(); // let to2 = second_wallet.address(); // let to3 = third_wallet.address(); @@ -70,31 +71,50 @@ // )?; // // send both transfers to the network -// // upload won't error out, only error out during verification. + // info!("Sending both transfers to the network..."); -// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; -// assert!(res.is_ok()); -// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; -// assert!(res.is_ok()); +// // These may error (but may not depending on network speed) +// // so we're not going to rely on it here. +// let _ = client.send_spends(transfer_to_2.spends.iter(), true).await; -// // we wait 5s to ensure that the double spend attempt is detected and accumulated -// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); -// tokio::time::sleep(Duration::from_secs(10)).await; +// let _ = client.send_spends(transfer_to_3.spends.iter(), true).await; + +// // check the CashNotes, it should fail +// info!("Verifying the transfers from first wallet..."); // let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// // check the CashNotes, it should fail -// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; -// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; -// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); +// let mut should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let mut should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; + +// for i in 0..5 { +// if should_err1.is_err() && should_err2.is_err() { +// break; +// } + +// tokio::time::sleep(Duration::from_secs(1)).await; +// info!("Retrying verification.{i}... for should_err1+2"); +// println!("Retrying verification{i} ... for should_err1+2"); +// should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// } + +// info!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); +// println!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); // assert!(should_err1.is_err() && should_err2.is_err()); -// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); -// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); + +// assert_eq!( +// format!("{should_err1:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpend error, was: {should_err1:?}" +// ); + +// assert_eq!( +// format!("{should_err2:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpend error, was: {should_err2:?}" +// ); // Ok(()) // } @@ -168,7 +188,7 @@ // )?; // // send the transfer to the network which should reject it -// let res = client.send_spends(transfer2.spends.iter(), false).await; +// let res = client.send_spends(transfer2.spends.iter(), true).await; // std::mem::drop(exclusive_access); // assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); @@ -184,8 +204,8 @@ // let wallet_dir_1 = TempDir::new()?; // let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; -// let balance_1 = wallet_1.balance(); -// let amount = balance_1 / 2; +// let balance_1 = wallet_1.balance().as_nano(); +// let amount = NanoTokens::from(balance_1 / 2); // let to1 = wallet_1.address(); // // Send from 1 -> 2 @@ -262,14 +282,18 @@ // reason.clone(), // wallet_1.key(), // )?; // reuse the old cash notes -// client -// .send_spends(transfer_to_3.spends.iter(), false) -// .await?; +// // ignore response in case it errors out early, we verify below +// let _res = client.send_spends(transfer_to_3.spends.iter(), true).await; // info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned + +// let res = client.verify_cashnote(&cash_notes_for_3[0]).await; +// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned + // info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); -// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + +// let res = client.verify_cashnote(&cash_notes_for_2[0]).await; +// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned // // The old spend has been poisoned, but spends from 22 -> 222 should still work // let wallet_dir_222 = TempDir::new()?; @@ -300,16 +324,16 @@ // client.verify_cashnote(&cash_notes_for_222[0]).await?; // // finally assert that we have a double spend attempt error here -// // we wait 1s to ensure that the double spend attempt is detected and accumulated +// // we wait to ensure that the double spend attempt is detected and accumulated // tokio::time::sleep(Duration::from_secs(5)).await; // match client.verify_cashnote(&cash_notes_for_2[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert!( -// e.to_string() -// .contains("Network Error Double spend(s) attempt was detected"), -// "error should reflect double spend attempt", +// assert_eq!( +// e.to_string(), +// format!("{}", WalletError::BurntSpend), +// "error should reflect double spend attempt was: {e:?}", // ); // } // } @@ -317,10 +341,10 @@ // match client.verify_cashnote(&cash_notes_for_3[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert!( -// e.to_string() -// .contains("Network Error Double spend(s) attempt was detected"), -// "error should reflect double spend attempt", +// assert_eq!( +// e.to_string(), +// format!("{}", WalletError::BurntSpend), +// "error should reflect double spend attempt was: {e:?}", // ); // } // } @@ -339,7 +363,7 @@ // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; // let balance_a = wallet_a.balance().as_nano(); -// let amount = balance_a / 2; +// let amount = NanoTokens::from(balance_a / 2); // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -428,12 +452,10 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_secs(10)).await; - -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); // poisoned +// assert!( +// format!("{result:?}").starts_with("Err(UnexpectedParentSpends"), +// "Should have been UnexpectedParentSpends error, was: {result:?}" +// ); // // Try to double spend from B -> Y // let wallet_dir_y = TempDir::new()?; @@ -470,32 +492,48 @@ // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from B -> Y: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // info!("Verifying the original cashnote of A -> B"); + +// // arbitrary time sleep to allow for network accumulation of double spend. +// tokio::time::sleep(Duration::from_secs(1)).await; + // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; // info!("Got result while verifying the original spend from A -> B: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); + +// println!("Verifying the original cashnote of B -> C"); -// info!("Verifying the original cashnote of B -> C"); // let result = client.verify_cashnote(&cash_notes_for_c[0]).await; // info!("Got result while verifying the original spend from B -> C: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); + // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // Ok(()) // } @@ -511,8 +549,8 @@ // let wallet_dir_a = TempDir::new()?; // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; -// let balance_a = wallet_a.balance(); -// let amount = balance_a / 2; +// let balance_a = wallet_a.balance().as_nano(); +// let amount = NanoTokens::from(balance_a / 2); // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -574,7 +612,7 @@ // )?; // client -// .send_spends(transfer_to_c.spends.iter(), false) +// .send_spends(transfer_to_c.spends.iter(), true) // .await?; // info!("Verifying the transfers from B -> C wallet..."); @@ -611,9 +649,10 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend) +// ); // // the original A should still be present as one of the double spends // let res = client @@ -649,20 +688,23 @@ // reason.clone(), // wallet_a.key(), // )?; // reuse the old cash notes -// client -// .send_spends(transfer_to_y.spends.iter(), false) -// .await?; + +// // we actually don't care about the result here, we just want to spam the network with double spends +// let _ = client.send_spends(transfer_to_y.spends.iter(), false).await; + +// // and then we verify the double spend attempt // info!("Verifying the transfers from A -> Y wallet... It should error out."); // let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); // // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_millis(500)).await; +// tokio::time::sleep(Duration::from_millis(1500)).await; // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from A -> Y: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend) +// ); // // the original A should still be present as one of the double spends // let res = client diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 23fe9c53b0..d36f680ca2 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -14,7 +14,6 @@ // use libp2p::PeerId; // use rand::Rng; // use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; -// use sn_evm::{Amount, AttoTokens, PaymentQuote}; // use sn_logging::LogBuilder; // use sn_networking::{GetRecordError, NetworkError}; // use sn_protocol::{ @@ -23,6 +22,7 @@ // NetworkAddress, // }; // use sn_registers::Permissions; +// use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; // use std::collections::BTreeMap; // use tokio::time::{sleep, Duration}; // use tracing::info; @@ -80,7 +80,7 @@ // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // let subset_len = chunks.len() / 3; -// let _storage_cost = wallet_client +// let res = wallet_client // .pay_for_storage( // chunks // .clone() @@ -88,7 +88,15 @@ // .take(subset_len) // .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), // ) -// .await?; +// .await; + +// // if the payment failed, we can log that +// if let Err(error) = res { +// tracing::warn!( +// "Payment failed, (though that doesn't really break this test): {:?}", +// error +// ); +// } // // now let's request to upload all addresses, even that we've already paid for a subset of them // let verify_store = false; @@ -111,7 +119,7 @@ // let paying_wallet_dir: TempDir = TempDir::new()?; // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let wallet_original_balance = paying_wallet.balance().as_atto(); +// let wallet_original_balance = paying_wallet.balance().as_nano(); // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // // generate a random number (between 50 and 100) of random addresses @@ -135,10 +143,10 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for the subset of addresses, 1 nano per addr -// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); +// let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); // info!("Verifying new balance on paying wallet is {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm +// assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs for the subset have been cached in the wallet // assert!(random_content_addrs @@ -160,13 +168,12 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for addresses we haven't previously paid for, 1 nano per addr -// let new_balance = AttoTokens::from_atto( -// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), +// let new_balance = NanoTokens::from( +// wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), // ); // println!("Verifying new balance on paying wallet is now {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// // TODO adapt to evm -// // assert_eq!(paying_wallet.balance(), new_balance); +// assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs now for all addresses have been cached in the wallet // // assert!(random_content_addrs @@ -229,18 +236,16 @@ // no_data_payments.insert( // *chunk_name, // ( -// sn_evm::utils::dummy_address(), -// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), +// MainPubkey::new(bls::SecretKey::random().public_key()), +// PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), // PeerId::random().to_bytes(), // ), // ); // } -// // TODO adapt to evm -// // let _ = wallet_client -// // .mut_wallet() -// // .send_storage_payment(&no_data_payments) -// // .await?; +// let _ = wallet_client +// .mut_wallet() +// .local_send_storage_payment(&no_data_payments)?; // sleep(Duration::from_secs(5)).await; @@ -248,131 +253,131 @@ // .upload_test_bytes(content_bytes.clone(), false) // .await?; -// info!("Reading {content_addr:?} expected to fail"); -// let mut files_download = FilesDownload::new(files_api); -// assert!( -// matches!( -// files_download.download_file(content_addr, None).await, -// Err(ClientError::Network(NetworkError::GetRecordError( -// GetRecordError::RecordNotFound -// ))) -// ), -// "read bytes should fail as we didn't store them" -// ); +// // info!("Reading {content_addr:?} expected to fail"); +// // let mut files_download = FilesDownload::new(files_api); +// // assert!( +// // matches!( +// // files_download.download_file(content_addr, None).await, +// // Err(ClientError::Network(NetworkError::GetRecordError( +// // GetRecordError::RecordNotFound +// // ))) +// // ), +// // "read bytes should fail as we didn't store them" +// // ); -// Ok(()) -// } +// // Ok(()) +// // } -// #[tokio::test] -// async fn storage_payment_register_creation_succeeds() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// // #[tokio::test] +// // async fn storage_payment_register_creation_succeeds() -> Result<()> { +// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// let paying_wallet_dir = TempDir::new()?; +// // let paying_wallet_dir = TempDir::new()?; -// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// let mut rng = rand::thread_rng(); -// let xor_name = XorName::random(&mut rng); -// let address = RegisterAddress::new(xor_name, client.signer_pk()); -// let net_addr = NetworkAddress::from_register_address(address); -// info!("Paying for random Register address {net_addr:?} ..."); +// // let mut rng = rand::thread_rng(); +// // let xor_name = XorName::random(&mut rng); +// // let address = RegisterAddress::new(xor_name, client.signer_pk()); +// // let net_addr = NetworkAddress::from_register_address(address); +// // info!("Paying for random Register address {net_addr:?} ..."); -// let _cost = wallet_client -// .pay_for_storage(std::iter::once(net_addr)) -// .await?; +// // let _cost = wallet_client +// // .pay_for_storage(std::iter::once(net_addr)) +// // .await?; -// let (mut register, _cost, _royalties_fees) = client -// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) -// .await?; +// // let (mut register, _cost, _royalties_fees) = client +// // .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// // .await?; -// println!("Newly created register has {} ops", register.read().len()); +// // println!("Newly created register has {} ops", register.read().len()); -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// assert_eq!(register.read(), retrieved_reg.read()); +// // assert_eq!(register.read(), retrieved_reg.read()); -// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// register.write(&random_entry)?; +// // register.write(&random_entry)?; -// println!( -// "Register has {} ops after first write", -// register.read().len() -// ); +// // println!( +// // "Register has {} ops after first write", +// // register.read().len() +// // ); -// register.sync(&mut wallet_client, true, None).await?; +// // register.sync(&mut wallet_client, true, None).await?; -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// assert_eq!(retrieved_reg.read().len(), 1); +// // assert_eq!(retrieved_reg.read().len(), 1); -// for index in 1..10 { -// println!("current index is {index}"); -// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// // for index in 1..10 { +// // println!("current index is {index}"); +// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// register.write(&random_entry)?; -// register.sync(&mut wallet_client, true, None).await?; +// // register.write(&random_entry)?; +// // register.sync(&mut wallet_client, true, None).await?; -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// println!( -// "current retrieved register entry length is {}", -// retrieved_reg.read().len() -// ); -// println!("current expected entry length is {}", register.read().len()); +// // println!( +// // "current retrieved register entry length is {}", +// // retrieved_reg.read().len() +// // ); +// // println!("current expected entry length is {}", register.read().len()); -// println!( -// "current retrieved register ops length is {}", -// retrieved_reg.ops.len() -// ); -// println!("current local cached ops length is {}", register.ops.len()); +// // println!( +// // "current retrieved register ops length is {}", +// // retrieved_reg.ops.len() +// // ); +// // println!("current local cached ops length is {}", register.ops.len()); -// assert_eq!(retrieved_reg.read().len(), register.read().len()); +// // assert_eq!(retrieved_reg.read().len(), register.read().len()); -// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// println!("Current fetched register is {:?}", retrieved_reg.register); -// println!( -// "Fetched register has update history of {}", -// retrieved_reg.register.log_update_history() -// ); +// // println!("Current fetched register is {:?}", retrieved_reg.register); +// // println!( +// // "Fetched register has update history of {}", +// // retrieved_reg.register.log_update_history() +// // ); -// std::thread::sleep(std::time::Duration::from_millis(1000)); -// } +// // std::thread::sleep(std::time::Duration::from_millis(1000)); +// // } -// Ok(()) -// } +// // Ok(()) +// // } -// #[tokio::test] -// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// // #[tokio::test] +// // #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// // async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// let paying_wallet_dir = TempDir::new()?; +// // let paying_wallet_dir = TempDir::new()?; -// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// let mut rng = rand::thread_rng(); -// let xor_name = XorName::random(&mut rng); -// let address = RegisterAddress::new(xor_name, client.signer_pk()); -// let net_address = -// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); +// // let mut rng = rand::thread_rng(); +// // let xor_name = XorName::random(&mut rng); +// // let address = RegisterAddress::new(xor_name, client.signer_pk()); +// // let net_address = +// // NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); -// let mut no_data_payments = BTreeMap::default(); -// no_data_payments.insert( -// net_address -// .as_xorname() -// .expect("RegisterAddress should convert to XorName"), -// ( -// sn_evm::utils::dummy_address(), -// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), -// vec![], -// ), -// ); +// // let mut no_data_payments = BTreeMap::default(); +// // no_data_payments.insert( +// // net_address +// // .as_xorname() +// // .expect("RegisterAddress should convert to XorName"), +// // ( +// // sn_evm::utils::dummy_address(), +// // PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// // vec![], +// // ), +// // ); // println!( // "current retrieved register entry length is {}", @@ -395,16 +400,16 @@ // // .send_storage_payment(&no_data_payments) // // .await?; -// // this should fail to store as the amount paid is not enough -// let (mut register, _cost, _royalties_fees) = client -// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) -// .await?; +// // // this should fail to store as the amount paid is not enough +// // let (mut register, _cost, _royalties_fees) = client +// // .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// // .await?; -// sleep(Duration::from_secs(5)).await; -// assert!(matches!( -// client.get_register(address).await, -// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// )); +// // sleep(Duration::from_secs(5)).await; +// // assert!(matches!( +// // client.get_register(address).await, +// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// // )); // println!("Current fetched register is {:?}", retrieved_reg.address()); // println!( @@ -415,11 +420,11 @@ // let random_entry = rng.gen::<[u8; 32]>().to_vec(); // register.write(&random_entry)?; -// sleep(Duration::from_secs(5)).await; -// assert!(matches!( -// register.sync(&mut wallet_client, false, None).await, -// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// )); +// // sleep(Duration::from_secs(5)).await; +// // assert!(matches!( +// // register.sync(&mut wallet_client, false, None).await, +// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// // )); -// Ok(()) -// } +// // Ok(()) +// // } diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 641756fa2c..8649d07909 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -16,13 +16,10 @@ use common::{ get_all_peer_ids, get_safenode_rpc_client, NodeRestart, }; use eyre::{eyre, Result}; -use libp2p::{ - kad::{KBucketKey, RecordKey}, - PeerId, -}; +use libp2p::{kad::RecordKey, PeerId}; use rand::{rngs::OsRng, Rng}; use sn_logging::LogBuilder; -use sn_networking::{sleep, sort_peers_by_key}; +use sn_networking::{sleep, sort_peers_by_address_and_limit, sort_peers_by_key_and_limit}; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, @@ -160,8 +157,8 @@ fn print_node_close_groups(all_peers: &[PeerId]) { for (node_index, peer) in all_peers.iter().enumerate() { let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); - let closest_peers = - sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); + let closest_peers = sort_peers_by_key_and_limit(&all_peers, &key, CLOSE_GROUP_SIZE) + .expect("failed to sort peer"); let closest_peers_idx = closest_peers .iter() .map(|&&peer| { @@ -212,11 +209,12 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd for (key, actual_holders_idx) in record_holders.iter() { println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - let record_key = KBucketKey::from(key.to_vec()); - let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? - .into_iter() - .cloned() - .collect::>(); + let record_address = NetworkAddress::from_record_key(key); + let expected_holders = + sort_peers_by_address_and_limit(all_peers, &record_address, CLOSE_GROUP_SIZE)? + .into_iter() + .cloned() + .collect::>(); let actual_holders = actual_holders_idx .iter() diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index da19270b69..85dc2e3a09 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -26,7 +26,7 @@ use tracing::{error, info, trace}; /// Sleep for sometime for the nodes for discover each other before verification /// Also can be set through the env variable of the same name. -const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); +const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(60); #[tokio::test(flavor = "multi_thread")] async fn verify_routing_table() -> Result<()> { diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 4b152994c4..b3e651927e 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.11.0" +version = "0.11.1" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index c83938137f..bb29f6be3a 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -170,7 +170,9 @@ pub async fn run( // been killed, which clears it out. let local_node_reg_path = &get_local_node_registry_path()?; let mut local_node_registry: NodeRegistry = if clean { - debug!("Clean set to true, removing client, node dir and killing the network."); + debug!( + "Clean set to true, removing client, node dir, local registry and killing the network." + ); let client_data_path = dirs_next::data_dir() .ok_or_else(|| eyre!("Could not obtain user's data directory"))? .join("safe") @@ -178,6 +180,9 @@ pub async fn run( if client_data_path.is_dir() { std::fs::remove_dir_all(client_data_path)?; } + if local_node_reg_path.exists() { + std::fs::remove_file(local_node_reg_path)?; + } kill(false, verbosity)?; NodeRegistry::load(local_node_reg_path)? } else { diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 721015ed2f..a71e7b6b4e 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -424,12 +424,7 @@ pub async fn status_report( node.reward_balance .map_or("-".to_string(), |b| b.to_string()) ); - println!( - "Owner: {}", - node.owner - .as_ref() - .map_or("-".to_string(), |o| o.to_string()) - ); + println!("Rewards address: {}", node.rewards_address); println!(); } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 3b4fd86564..cdeb4a2dc1 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.32" +version = "0.6.33" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_node = { path = "../sn_node", version = "0.112.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.4.0" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_node = { path = "../sn_node", version = "0.112.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index c8e46ee8be..2d40d10161 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.4" +version = "0.5.5" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.12", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.13", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 622ed3dd4d..832a832206 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.12" +version = "0.17.13" [features] default = [] @@ -28,10 +28,10 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index f73c356b53..2d24feb0d9 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -51,6 +51,9 @@ pub enum Error { /// The provided SecretyKey failed to decrypt the data #[error("Failed to derive CipherText from encrypted_data")] ScratchpadCipherTextFailed, + /// The provided cypher text is invalid + #[error("Provided cypher text is invalid")] + ScratchpadCipherTextInvalid, // ---------- payment errors #[error("There was an error getting the storecost from kademlia store")] @@ -78,4 +81,7 @@ pub enum Error { // The record already exists at this node #[error("The record already exists, so do not charge for it: {0:?}")] RecordExists(PrettyPrintRecordKey<'static>), + + #[error("Record header is incorrect")] + IncorrectRecordHeader, } diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 4d3b92628d..f397173ca1 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -32,7 +32,10 @@ pub use error::Error; use storage::ScratchpadAddress; use self::storage::{ChunkAddress, RegisterAddress, SpendAddress}; -use bytes::Bytes; + +/// Re-export of Bytes used throughout the protocol +pub use bytes::Bytes; + use libp2p::{ kad::{KBucketDistance as Distance, KBucketKey as Key, RecordKey}, multiaddr::Protocol, diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index 2935e43fce..3a6b4ba6a8 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -18,7 +18,10 @@ use std::{str::FromStr, time::Duration}; pub use self::{ address::{ChunkAddress, RegisterAddress, ScratchpadAddress, SpendAddress}, chunks::Chunk, - header::{try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType}, + header::{ + get_type_from_record, try_deserialize_record, try_serialize_record, RecordHeader, + RecordKind, RecordType, + }, scratchpad::Scratchpad, }; diff --git a/sn_protocol/src/storage/header.rs b/sn_protocol/src/storage/header.rs index 96a4515526..af43c21256 100644 --- a/sn_protocol/src/storage/header.rs +++ b/sn_protocol/src/storage/header.rs @@ -84,6 +84,33 @@ impl Display for RecordKind { } } +/// Return the RecordType +pub fn get_type_from_record(record: &Record) -> Result { + let key = record.key.clone(); + let record_key = PrettyPrintRecordKey::from(&key); + + match RecordHeader::from_record(record) { + Ok(record_header) => match record_header.kind { + RecordKind::Chunk => Ok(RecordType::Chunk), + RecordKind::Scratchpad => Ok(RecordType::Scratchpad), + RecordKind::Spend | RecordKind::Register => { + let content_hash = XorName::from_content(&record.value); + Ok(RecordType::NonChunk(content_hash)) + } + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { + error!("Record {record_key:?} with payment shall not be stored locally."); + Err(Error::IncorrectRecordHeader) + } + }, + Err(err) => { + error!("For record {record_key:?}, failed to parse record_header {err:?}"); + Err(Error::IncorrectRecordHeader) + } + } +} + impl RecordHeader { pub const SIZE: usize = 2; diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index ea38d2e686..94b5c633a5 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -8,9 +8,9 @@ use super::ScratchpadAddress; use crate::error::{Error, Result}; +use crate::Bytes; use crate::NetworkAddress; use bls::{Ciphertext, PublicKey, SecretKey, Signature}; -use bytes::Bytes; use serde::{Deserialize, Serialize}; use xor_name::XorName; @@ -23,6 +23,8 @@ pub struct Scratchpad { /// Network address. Omitted when serialising and /// calculated from the `encrypted_data` when deserialising. address: ScratchpadAddress, + /// Data encoding: custom apps using scratchpad should use this so they can identify the type of data they are storing + data_encoding: u64, /// Contained data. This should be encrypted #[debug(skip)] encrypted_data: Bytes, @@ -35,10 +37,11 @@ pub struct Scratchpad { impl Scratchpad { /// Creates a new instance of `Scratchpad`. - pub fn new(owner: PublicKey) -> Self { + pub fn new(owner: PublicKey, data_encoding: u64) -> Self { Self { address: ScratchpadAddress::new(owner), encrypted_data: Bytes::new(), + data_encoding, counter: 0, signature: None, } @@ -49,6 +52,11 @@ impl Scratchpad { self.counter } + /// Return the current data encoding + pub fn data_encoding(&self) -> u64 { + self.data_encoding + } + /// Increments the counter value. pub fn increment(&mut self) -> u64 { self.counter += 1; @@ -94,13 +102,13 @@ impl Scratchpad { } /// Returns the encrypted_data, decrypted via the passed SecretKey - pub fn decrypt_data(&self, sk: &SecretKey) -> Result> { - Ok(sk - .decrypt( - &Ciphertext::from_bytes(&self.encrypted_data) - .map_err(|_| Error::ScratchpadCipherTextFailed)?, - ) - .map(Bytes::from)) + pub fn decrypt_data(&self, sk: &SecretKey) -> Result { + let cipher = Ciphertext::from_bytes(&self.encrypted_data) + .map_err(|_| Error::ScratchpadCipherTextFailed)?; + let bytes = sk + .decrypt(&cipher) + .ok_or(Error::ScratchpadCipherTextInvalid)?; + Ok(Bytes::from(bytes)) } /// Returns the encrypted_data hash @@ -133,3 +141,17 @@ impl Scratchpad { self.encrypted_data.len() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scratchpad_is_valid() { + let sk = SecretKey::random(); + let pk = sk.public_key(); + let mut scratchpad = Scratchpad::new(pk, 42); + scratchpad.update_and_sign(Bytes::from_static(b"data to be encrypted"), &sk); + assert!(scratchpad.is_valid()); + } +} diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index 04921730ef..e1c952976c 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -58,7 +58,7 @@ fn get_truncate_version_str() -> String { /// Get the PKs version string. /// If the public key mis-configed via env variable, /// it shall result in being rejected to join by the network -fn get_key_version_str() -> String { +pub fn get_key_version_str() -> String { let mut f_k_str = FOUNDATION_PK.to_hex(); let _ = f_k_str.split_off(6); let mut g_k_str = GENESIS_PK.to_hex(); diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index fd68714064..596ce700ed 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0" +version = "0.4.1" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index b0f60bc453..5cdfd7cd8f 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0" +version = "0.4.1" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index cbd6206fba..f156f93de9 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.20.0" +version = "0.20.1" [features] reward-forward = [] diff --git a/sn_transfers/src/wallet/error.rs b/sn_transfers/src/wallet/error.rs index 5a57b7434a..f60b718f42 100644 --- a/sn_transfers/src/wallet/error.rs +++ b/sn_transfers/src/wallet/error.rs @@ -40,9 +40,19 @@ pub enum Error { /// A general error when receiving a transfer fails #[error("Failed to receive transfer due to {0}")] CouldNotReceiveMoney(String), + /// A spend has been burnt (ie there was a DoubleSpendAttempt) + #[error("Failed to verify transfer validity in the network, a burnt SpendAttempt was found")] + BurntSpend, + /// Parents of a spend were not as expected in a provided cash note + #[error("Failed to verify transfer's parents in the network, transfer could be invalid or a parent double spent")] + UnexpectedParentSpends(crate::SpendAddress), + ///No valid unspent cashnotes found + #[error("All the redeemed CashNotes are already spent")] + AllRedeemedCashnotesSpent, /// A general error when verifying a transfer validity in the network #[error("Failed to verify transfer validity in the network {0}")] CouldNotVerifyTransfer(String), + /// Failed to fetch spend from network #[error("Failed to fetch spend from network: {0}")] FailedToGetSpend(String), diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 48955e7e8c..5acb11e414 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.8" +version = "0.4.9" [features] local = ["sn_peers_acquisition/local"] @@ -16,9 +16,9 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1" } +evmlib = { path = "../evmlib", version = "0.1.2" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index 22cdd87d1c..cf18a18ec8 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.55" +version = "0.1.56" [dependencies]