diff --git a/.github/workflows/benchmark-prs.yml b/.github/workflows/benchmark-prs.yml
index af1b3ce0fe..13da75ef2d 100644
--- a/.github/workflows/benchmark-prs.yml
+++ b/.github/workflows/benchmark-prs.yml
@@ -5,390 +5,378 @@ on: pull_request
env:
CARGO_INCREMENTAL: "0"
RUST_BACKTRACE: 1
- CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
+ CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi
NODE_DATA_PATH: /home/runner/.local/share/safe/node
jobs:
- # benchmark-cli:
- # name: Compare sn_cli benchmarks to main
- # # right now only ubuntu, running on multiple systems would require many pushes...\
- # # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing
- # # once to the branch..
- # runs-on: ubuntu-latest
- # steps:
- # - uses: actions/checkout@v4
-
- # - uses: dtolnay/rust-toolchain@stable
- # with:
- # components: rustfmt, clippy
-
- # - uses: Swatinem/rust-cache@v2
- # continue-on-error: true
-
- # ########################
- # ### Setup ###
- # ########################
- # - run: cargo install cargo-criterion
-
- # - name: install ripgrep
- # run: sudo apt-get -y install ripgrep
-
- # - name: Download 95mb file to be uploaded with the safe client
- # shell: bash
- # run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip
-
- # # As normal user won't care much about initial client startup,
- # # but be more alerted on communication speed during transmission.
- # # Meanwhile the criterion testing code includes the client startup as well,
- # # it will be better to execute bench test with `local`,
- # # to make the measurement results reflect speed improvement or regression more accurately.
- # - name: Build sn bins
- # run: cargo build --release --bin safe --bin safenode --features local
- # timeout-minutes: 30
-
- # - name: Build faucet bin
- # run: cargo build --release --bin faucet --features local --features gifting --no-default-features
- # timeout-minutes: 30
-
- # - name: Start a local network
- # uses: maidsafe/sn-local-testnet-action@main
- # env:
- # SN_LOG: "all"
- # with:
- # action: start
- # interval: 2000
- # node-path: target/release/safenode
- # faucet-path: target/release/faucet
- # platform: ubuntu-latest
- # build: true
-
- # - name: Check SAFE_PEERS was set
- # shell: bash
- # run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS"
-
- # #########################
- # ### Upload large file ###
- # #########################
-
- # - name: Fund cli wallet
- # shell: bash
- # run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000
- # env:
- # SN_LOG: "all"
-
- # - name: Start a client instance to compare memory usage
- # shell: bash
- # run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick
- # env:
- # SN_LOG: "all"
-
- # - name: Cleanup uploaded_files folder to avoid pollute download benchmark
- # shell: bash
- # run: rm -rf $CLIENT_DATA_PATH/uploaded_files
-
- # ###########################
- # ### Client Mem Analysis ###
- # ###########################
-
- # - name: Check client memory usage
- # shell: bash
- # run: |
- # client_peak_mem_limit_mb="1024" # mb
- # client_avg_mem_limit_mb="512" # mb
-
- # peak_mem_usage=$(
- # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
- # awk -F':' '/"memory_used_mb":/{print $2}' |
- # sort -n |
- # tail -n 1
- # )
- # echo "Peak memory usage: $peak_mem_usage MB"
- # if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then
- # echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB"
- # exit 1
- # fi
-
- # total_mem=$(
- # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
- # awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
- # )
- # num_of_times=$(
- # rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats |
- # rg "(\d+) matches" |
- # rg "\d+" -o
- # )
- # echo "num_of_times: $num_of_times"
- # echo "Total memory is: $total_mem"
- # average_mem=$(($total_mem/$(($num_of_times))))
- # echo "Average memory is: $average_mem"
-
- # if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then
- # echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB"
- # exit 1
- # fi
- # # Write the client memory usage to a file
- # echo '[
- # {
- # "name": "client-peak-memory-usage-during-upload",
- # "value": '$peak_mem_usage',
- # "unit": "MB"
- # },
- # {
- # "name": "client-average-memory-usage-during-upload",
- # "value": '$average_mem',
- # "unit": "MB"
- # }
- # ]' > client_memory_usage.json
-
- # - name: check client_memory_usage.json
- # shell: bash
- # run: cat client_memory_usage.json
-
- # - name: Alert for client memory usage
- # uses: benchmark-action/github-action-benchmark@v1
- # with:
- # name: "Memory Usage of Client during uploading large file"
- # tool: "customSmallerIsBetter"
- # output-file-path: client_memory_usage.json
- # # Where the previous data file is stored
- # external-data-json-path: ./cache/client-mem-usage.json
- # # Workflow will fail when an alert happens
- # fail-on-alert: true
- # # GitHub API token to make a commit comment
- # github-token: ${{ secrets.GITHUB_TOKEN }}
- # # Enable alert commit comment
- # comment-on-alert: true
- # # 200% regression will result in alert
- # alert-threshold: "200%"
- # # Enable Job Summary for PRs
- # summary-always: true
-
- # ########################
- # ### Benchmark ###
- # ########################
- # - name: Bench `safe` cli
- # shell: bash
- # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr,
- # # passes to tee which displays it in the terminal and writes to output.txt
- # run: |
- # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt
- # cat output.txt | rg benchmark-complete | jq -s 'map({
- # name: (.id | split("/"))[-1],
- # unit: "MiB/s",
- # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9))
- # })' > files-benchmark.json
- # timeout-minutes: 15
-
- # - name: Confirming the number of files uploaded and downloaded during the benchmark test
- # shell: bash
- # run: |
- # ls -l $CLIENT_DATA_PATH
- # ls -l $CLIENT_DATA_PATH/uploaded_files
- # ls -l $CLIENT_DATA_PATH/safe_files
-
- # - name: Store benchmark result
- # uses: benchmark-action/github-action-benchmark@v1
- # with:
- # # What benchmark tool the output.txt came from
- # tool: "customBiggerIsBetter"
- # output-file-path: files-benchmark.json
- # # Where the previous data file is stored
- # external-data-json-path: ./cache/benchmark-data.json
- # # Workflow will fail when an alert happens
- # fail-on-alert: true
- # # GitHub API token to make a commit comment
- # github-token: ${{ secrets.GITHUB_TOKEN }}
- # # Enable alert commit comment
- # comment-on-alert: true
- # # 200% regression will result in alert
- # alert-threshold: "200%"
- # # Enable Job Summary for PRs
- # summary-always: true
-
- # - name: Start a client to carry out download to output the logs
- # shell: bash
- # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick
-
- # - name: Start a client to simulate criterion upload
- # shell: bash
- # run: |
- # ls -l target/release
- # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick
-
- # #########################
- # ### Stop Network ###
- # #########################
-
- # - name: Stop the local network
- # if: always()
- # uses: maidsafe/sn-local-testnet-action@main
- # with:
- # action: stop
- # log_file_prefix: safe_test_logs_benchmark
- # platform: ubuntu-latest
- # build: true
-
- # - name: Upload Faucet folder
- # uses: actions/upload-artifact@main
- # with:
- # name: faucet_folder
- # path: /home/runner/.local/share/safe/test_faucet
- # continue-on-error: true
- # if: always()
-
- # #########################
- # ### Node Mem Analysis ###
- # #########################
-
- # # The large file uploaded will increase node's peak mem usage a lot
- # - name: Check node memory usage
- # shell: bash
- # run: |
- # node_peak_mem_limit_mb="250" # mb
- # peak_mem_usage=$(
- # rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename |
- # awk -F':' '/"memory_used_mb":/{print $2}' |
- # sort -n |
- # tail -n 1
- # )
-
- # echo "Memory usage: $peak_mem_usage MB"
- # if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then
- # echo "Node memory usage exceeded threshold: $peak_mem_usage MB"
- # exit 1
- # fi
- # # Write the node memory usage to a file
- # echo '[
- # {
- # "name": "node-memory-usage-through-safe-benchmark",
- # "value": '$peak_mem_usage',
- # "unit": "MB"
- # }
- # ]' > node_memory_usage.json
-
- # - name: check node_memory_usage.json
- # shell: bash
- # run: cat node_memory_usage.json
-
- # - name: Alert for node memory usage
- # uses: benchmark-action/github-action-benchmark@v1
- # with:
- # tool: "customSmallerIsBetter"
- # output-file-path: node_memory_usage.json
- # # Where the previous data file is stored
- # external-data-json-path: ./cache/node-mem-usage.json
- # # Workflow will fail when an alert happens
- # fail-on-alert: true
- # # GitHub API token to make a commit comment
- # github-token: ${{ secrets.GITHUB_TOKEN }}
- # # Enable alert commit comment
- # comment-on-alert: true
- # # Comment on the PR
- # comment-always: true
- # # 200% regression will result in alert
- # alert-threshold: "200%"
- # # Enable Job Summary for PRs
- # summary-always: true
-
- # ###########################################
- # ### Swarm_driver handling time Analysis ###
- # ###########################################
-
- # - name: Check swarm_driver handling time
- # shell: bash
- # run: |
- # num_of_times=$(
- # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
- # rg "(\d+) matches" |
- # rg "\d+" -o
- # )
- # echo "Number of long cmd handling times: $num_of_times"
- # total_long_handling_ms=$(
- # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
- # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
- # )
- # echo "Total cmd long handling time is: $total_long_handling_ms ms"
- # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
- # echo "Average cmd long handling time is: $average_handling_ms ms"
- # total_long_handling=$(($total_long_handling_ms))
- # total_num_of_times=$(($num_of_times))
- # num_of_times=$(
- # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
- # rg "(\d+) matches" |
- # rg "\d+" -o
- # )
- # echo "Number of long event handling times: $num_of_times"
- # total_long_handling_ms=$(
- # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
- # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
- # )
- # echo "Total event long handling time is: $total_long_handling_ms ms"
- # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
- # echo "Average event long handling time is: $average_handling_ms ms"
- # total_long_handling=$(($total_long_handling_ms+$total_long_handling))
- # total_num_of_times=$(($num_of_times+$total_num_of_times))
- # average_handling_ms=$(($total_long_handling/$(($total_num_of_times))))
- # echo "Total swarm_driver long handling times is: $total_num_of_times"
- # echo "Total swarm_driver long handling duration is: $total_long_handling ms"
- # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms"
- # total_num_of_times_limit_hits="30000" # hits
- # total_long_handling_limit_ms="400000" # ms
- # average_handling_limit_ms="20" # ms
- # if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then
- # echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits"
- # exit 1
- # fi
- # if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then
- # echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms"
- # exit 1
- # fi
- # if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then
- # echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms"
- # exit 1
- # fi
-
- # # Write the node memory usage to a file
- # echo '[
- # {
- # "name": "swarm_driver long handling times",
- # "value": '$total_num_of_times',
- # "unit": "hits"
- # },
- # {
- # "name": "swarm_driver long handling total_time",
- # "value": '$total_long_handling',
- # "unit": "ms"
- # },
- # {
- # "name": "swarm_driver average long handling time",
- # "value": '$average_handling_ms',
- # "unit": "ms"
- # }
- # ]' > swarm_driver_long_handlings.json
-
- # - name: check swarm_driver_long_handlings.json
- # shell: bash
- # run: cat swarm_driver_long_handlings.json
-
- # - name: Alert for swarm_driver long handlings
- # uses: benchmark-action/github-action-benchmark@v1
- # with:
- # tool: "customSmallerIsBetter"
- # output-file-path: swarm_driver_long_handlings.json
- # # Where the previous data file is stored
- # external-data-json-path: ./cache/swarm_driver_long_handlings.json
- # # Workflow will fail when an alert happens
- # fail-on-alert: true
- # # GitHub API token to make a commit comment
- # github-token: ${{ secrets.GITHUB_TOKEN }}
- # # Enable alert commit comment
- # comment-on-alert: true
- # # Comment on the PR
- # comment-always: true
- # # 200% regression will result in alert
- # alert-threshold: "200%"
- # # Enable Job Summary for PRs
- # summary-always: true
+ benchmark-cli:
+ name: Compare autonomi_cli benchmarks to main
+ # right now only ubuntu, running on multiple systems would require many pushes...\
+ # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing
+ # once to the branch..
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: dtolnay/rust-toolchain@stable
+ with:
+ components: rustfmt, clippy
+
+ - uses: Swatinem/rust-cache@v2
+ continue-on-error: true
+
+ ########################
+ ### Setup ###
+ ########################
+ - run: cargo install cargo-criterion
+
+ - name: install ripgrep
+ run: sudo apt-get -y install ripgrep
+
+ - name: Download 95mb file to be uploaded with the safe client
+ shell: bash
+ run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip
+
+ # As normal user won't care much about initial client startup,
+ # but be more alerted on communication speed during transmission.
+ # Meanwhile the criterion testing code includes the client startup as well,
+ # it will be better to execute bench test with `local`,
+ # to make the measurement results reflect speed improvement or regression more accurately.
+ - name: Build binaries
+ run: cargo build --release --features local --bin safenode --bin autonomi
+ timeout-minutes: 30
+
+ - name: Start a local network
+ uses: maidsafe/sn-local-testnet-action@main
+ env:
+ SN_LOG: "all"
+ with:
+ action: start
+ enable-evm-testnet: true
+ node-path: target/release/safenode
+ platform: ubuntu-latest
+ build: true
+
+ - name: Check SAFE_PEERS was set
+ shell: bash
+ run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS"
+
+ - name: export default secret key
+ run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV
+ shell: bash
+
+ #########################
+ ### Upload large file ###
+ #########################
+
+ - name: Start a client instance to compare memory usage
+ shell: bash
+ run: ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data.zip"
+ env:
+ SN_LOG: "all"
+ timeout-minutes: 5
+
+ - name: Cleanup uploaded_files folder to avoid pollute download benchmark
+ shell: bash
+ run: |
+ ls -l $CLIENT_DATA_PATH
+ rm -rf $CLIENT_DATA_PATH/uploaded_files
+
+ ###########################
+ ### Client Mem Analysis ###
+ ###########################
+
+ - name: Check client memory usage
+ shell: bash
+ run: |
+ client_peak_mem_limit_mb="1024" # mb
+ client_avg_mem_limit_mb="512" # mb
+
+ peak_mem_usage=$(
+ rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename |
+ awk -F':' '/"memory_used_mb":/{print $2}' |
+ sort -n |
+ tail -n 1
+ )
+ echo "Peak memory usage: $peak_mem_usage MB"
+ if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then
+ echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB"
+ exit 1
+ fi
+
+ total_mem=$(
+ rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename |
+ awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
+ )
+ num_of_times=$(
+ rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob autonomi.* -c --stats |
+ rg "(\d+) matches" |
+ rg "\d+" -o
+ )
+ echo "num_of_times: $num_of_times"
+ echo "Total memory is: $total_mem"
+ average_mem=$(($total_mem/$(($num_of_times))))
+ echo "Average memory is: $average_mem"
+
+ if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then
+ echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB"
+ exit 1
+ fi
+ # Write the client memory usage to a file
+ echo '[
+ {
+ "name": "client-peak-memory-usage-during-upload",
+ "value": '$peak_mem_usage',
+ "unit": "MB"
+ },
+ {
+ "name": "client-average-memory-usage-during-upload",
+ "value": '$average_mem',
+ "unit": "MB"
+ }
+ ]' > client_memory_usage.json
+
+ - name: check client_memory_usage.json
+ shell: bash
+ run: cat client_memory_usage.json
+
+ - name: Alert for client memory usage
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ name: "Memory Usage of Client during uploading large file"
+ tool: "customSmallerIsBetter"
+ output-file-path: client_memory_usage.json
+ # Where the previous data file is stored
+ external-data-json-path: ./cache/client-mem-usage.json
+ # Workflow will fail when an alert happens
+ fail-on-alert: true
+ # GitHub API token to make a commit comment
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ # Enable alert commit comment
+ comment-on-alert: true
+ # 200% regression will result in alert
+ alert-threshold: "200%"
+ # Enable Job Summary for PRs
+ summary-always: true
+
+ # ########################
+ # ### Benchmark ###
+ # ########################
+ # - name: Bench `safe` cli
+ # shell: bash
+ # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr,
+ # # passes to tee which displays it in the terminal and writes to output.txt
+ # run: |
+ # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt
+ # cat output.txt | rg benchmark-complete | jq -s 'map({
+ # name: (.id | split("/"))[-1],
+ # unit: "MiB/s",
+ # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9))
+ # })' > files-benchmark.json
+ # timeout-minutes: 15
+
+ # - name: Confirming the number of files uploaded and downloaded during the benchmark test
+ # shell: bash
+ # run: |
+ # ls -l $CLIENT_DATA_PATH
+ # ls -l $CLIENT_DATA_PATH/uploaded_files
+ # ls -l $CLIENT_DATA_PATH/safe_files
+
+ # - name: Store benchmark result
+ # uses: benchmark-action/github-action-benchmark@v1
+ # with:
+ # # What benchmark tool the output.txt came from
+ # tool: "customBiggerIsBetter"
+ # output-file-path: files-benchmark.json
+ # # Where the previous data file is stored
+ # external-data-json-path: ./cache/benchmark-data.json
+ # # Workflow will fail when an alert happens
+ # fail-on-alert: true
+ # # GitHub API token to make a commit comment
+ # github-token: ${{ secrets.GITHUB_TOKEN }}
+ # # Enable alert commit comment
+ # comment-on-alert: true
+ # # 200% regression will result in alert
+ # alert-threshold: "200%"
+ # # Enable Job Summary for PRs
+ # summary-always: true
+
+ # - name: Start a client to carry out download to output the logs
+ # shell: bash
+ # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick
+
+ # - name: Start a client to simulate criterion upload
+ # shell: bash
+ # run: |
+ # ls -l target/release
+ # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick
+
+ #########################
+ ### Stop Network ###
+ #########################
+
+ - name: Stop the local network
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ log_file_prefix: safe_test_logs_benchmark
+ platform: ubuntu-latest
+ build: true
+
+ #########################
+ ### Node Mem Analysis ###
+ #########################
+
+ # The large file uploaded will increase node's peak mem usage a lot
+ - name: Check node memory usage
+ shell: bash
+ run: |
+ node_peak_mem_limit_mb="250" # mb
+ peak_mem_usage=$(
+ rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename |
+ awk -F':' '/"memory_used_mb":/{print $2}' |
+ sort -n |
+ tail -n 1
+ )
+
+ echo "Memory usage: $peak_mem_usage MB"
+ if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then
+ echo "Node memory usage exceeded threshold: $peak_mem_usage MB"
+ exit 1
+ fi
+ # Write the node memory usage to a file
+ echo '[
+ {
+ "name": "node-memory-usage-through-safe-benchmark",
+ "value": '$peak_mem_usage',
+ "unit": "MB"
+ }
+ ]' > node_memory_usage.json
+
+ - name: check node_memory_usage.json
+ shell: bash
+ run: cat node_memory_usage.json
+
+ - name: Alert for node memory usage
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ tool: "customSmallerIsBetter"
+ output-file-path: node_memory_usage.json
+ # Where the previous data file is stored
+ external-data-json-path: ./cache/node-mem-usage.json
+ # Workflow will fail when an alert happens
+ fail-on-alert: true
+ # GitHub API token to make a commit comment
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ # Enable alert commit comment
+ comment-on-alert: true
+ # Comment on the PR
+ comment-always: true
+ # 200% regression will result in alert
+ alert-threshold: "200%"
+ # Enable Job Summary for PRs
+ summary-always: true
+
+ ###########################################
+ ### Swarm_driver handling time Analysis ###
+ ###########################################
+
+ - name: Check swarm_driver handling time
+ shell: bash
+ run: |
+ num_of_times=$(
+ rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats |
+ rg "(\d+) matches" |
+ rg "\d+" -o
+ )
+ echo "Number of long cmd handling times: $num_of_times"
+ total_long_handling_ms=$(
+ rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename |
+ awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
+ )
+ echo "Total cmd long handling time is: $total_long_handling_ms ms"
+ average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
+ echo "Average cmd long handling time is: $average_handling_ms ms"
+ total_long_handling=$(($total_long_handling_ms))
+ total_num_of_times=$(($num_of_times))
+ num_of_times=$(
+ rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats |
+ rg "(\d+) matches" |
+ rg "\d+" -o
+ )
+ echo "Number of long event handling times: $num_of_times"
+ total_long_handling_ms=$(
+ rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename |
+ awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
+ )
+ echo "Total event long handling time is: $total_long_handling_ms ms"
+ average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
+ echo "Average event long handling time is: $average_handling_ms ms"
+ total_long_handling=$(($total_long_handling_ms+$total_long_handling))
+ total_num_of_times=$(($num_of_times+$total_num_of_times))
+ average_handling_ms=$(($total_long_handling/$(($total_num_of_times))))
+ echo "Total swarm_driver long handling times is: $total_num_of_times"
+ echo "Total swarm_driver long handling duration is: $total_long_handling ms"
+ echo "Total average swarm_driver long handling duration is: $average_handling_ms ms"
+ total_num_of_times_limit_hits="30000" # hits
+ total_long_handling_limit_ms="400000" # ms
+ average_handling_limit_ms="20" # ms
+ if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then
+ echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits"
+ exit 1
+ fi
+ if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then
+ echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms"
+ exit 1
+ fi
+ if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then
+ echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms"
+ exit 1
+ fi
+
+ # Write the node memory usage to a file
+ echo '[
+ {
+ "name": "swarm_driver long handling times",
+ "value": '$total_num_of_times',
+ "unit": "hits"
+ },
+ {
+ "name": "swarm_driver long handling total_time",
+ "value": '$total_long_handling',
+ "unit": "ms"
+ },
+ {
+ "name": "swarm_driver average long handling time",
+ "value": '$average_handling_ms',
+ "unit": "ms"
+ }
+ ]' > swarm_driver_long_handlings.json
+
+ - name: check swarm_driver_long_handlings.json
+ shell: bash
+ run: cat swarm_driver_long_handlings.json
+
+ - name: Alert for swarm_driver long handlings
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ tool: "customSmallerIsBetter"
+ output-file-path: swarm_driver_long_handlings.json
+ # Where the previous data file is stored
+ external-data-json-path: ./cache/swarm_driver_long_handlings.json
+ # Workflow will fail when an alert happens
+ fail-on-alert: true
+ # GitHub API token to make a commit comment
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ # Enable alert commit comment
+ comment-on-alert: true
+ # Comment on the PR
+ comment-always: true
+ # 200% regression will result in alert
+ alert-threshold: "200%"
+ # Enable Job Summary for PRs
+ summary-always: true
benchmark-cash:
name: Compare sn_transfer benchmarks to main
diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml
index 27a737a7a7..b8c6a10ffe 100644
--- a/.github/workflows/generate-benchmark-charts.yml
+++ b/.github/workflows/generate-benchmark-charts.yml
@@ -67,7 +67,7 @@ jobs:
# Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr,
# passes to tee which displays it in the terminal and writes to output.txt
run: |
- cargo criterion --features=local --message-format=json 2>&1 -p autonomi | tee -a output.txt
+ cargo criterion --features=local --message-format=json 2>&1 -p autonomi-cli | tee -a output.txt
cat output.txt | rg benchmark-complete | jq -s 'map({
name: (.id | split("/"))[-1],
unit: "MiB/s",
@@ -158,17 +158,17 @@ jobs:
shell: bash
run: |
peak_mem_usage=$(
- rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
+ rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs/*/*.log -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/{print $2}' |
sort -n |
tail -n 1
)
total_mem=$(
- rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
+ rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs/*/*.log -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
)
num_of_times=$(
- rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats |
+ rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs/*/*.log -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml
index 55d3790bb5..d16b417fca 100644
--- a/.github/workflows/memcheck.yml
+++ b/.github/workflows/memcheck.yml
@@ -5,517 +5,293 @@ on:
# on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors
# the merge run checks should show on master and enable this clear test/passing history
merge_group:
- branches: [ main, alpha*, beta*, rc* ]
+ branches: [main, alpha*, beta*, rc*]
pull_request:
- branches: [ "*" ]
+ branches: ["*"]
env:
SAFE_DATA_PATH: /home/runner/.local/share/safe
- CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
+ CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi
NODE_DATA_PATH: /home/runner/.local/share/safe/node
- BOOTSTRAP_NODE_DATA_PATH: /home/runner/.local/share/safe/bootstrap_node
RESTART_TEST_NODE_DATA_PATH: /home/runner/.local/share/safe/restart_node
- FAUCET_LOG_PATH: /home/runner/.local/share/safe/test_faucet/logs
-
-# jobs:
-# memory-check:
-# runs-on: ubuntu-latest
-# steps:
-# - name: Checkout code
-# uses: actions/checkout@v4
-
-# - name: Check we're on the right commit
-# run: git log -1 --oneline
-
-# - name: Install Rust
-# uses: dtolnay/rust-toolchain@stable
-
-# - uses: Swatinem/rust-cache@v2
-# continue-on-error: true
-
-# - name: install ripgrep
-# shell: bash
-# run: sudo apt-get install -y ripgrep
-
-# - name: Build binaries
-# run: cargo build --release --bin safe --bin safenode
-# timeout-minutes: 30
-
-# - name: Build faucet binary with gifting
-# run: cargo build --release --bin faucet --features gifting
-# timeout-minutes: 30
-
-# - name: Build tests
-# run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run
-# timeout-minutes: 30
-
-# - name: Start a node instance that does not undergo churn
-# run: |
-# mkdir -p $BOOTSTRAP_NODE_DATA_PATH
-# ./target/release/safenode --first \
-# --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap &
-# sleep 10
-# env:
-# SN_LOG: "all"
-
-# - name: Set SAFE_PEERS
-# run: |
-# safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \
-# rg '/ip4.*$' -m1 -o | rg '"' -r '')
-# echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV
-
-# - name: Check SAFE_PEERS was set
-# shell: bash
-# run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS"
-
-# - name: Start a node instance to be restarted
-# run: |
-# mkdir -p $RESTART_TEST_NODE_DATA_PATH
-# ./target/release/safenode \
-# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart &
-# sleep 10
-# env:
-# SN_LOG: "all"
-
-# - name: Start a local network
-# env:
-# SN_LOG: "all"
-# uses: maidsafe/sn-local-testnet-action@main
-# with:
-# action: start
-# build: true
-# faucet-path: target/release/faucet
-# interval: 2000
-# join: true
-# node-path: target/release/safenode
-# owner-prefix: node
-# platform: ubuntu-latest
-# set-safe-peers: false
-
-# # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet
-# - name: Check SAFE_PEERS was not changed
-# shell: bash
-# run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}"
-
-# - name: Create and fund a wallet to pay for files storage
-# run: |
-# echo "Obtaining address for use with the faucet..."
-# ./target/release/safe --log-output-dest=data-dir wallet create --no-password
-# address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1)
-# echo "Sending tokens to the faucet at $address"
-# ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt
-# cat initial_balance_from_faucet.txt
-# cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex
-# cat transfer_hex
-# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
-# env:
-# SN_LOG: "all"
-# timeout-minutes: 15
-
-# - name: Move faucet log to the working folder
-# run: |
-# echo "SAFE_DATA_PATH has: "
-# ls -l $SAFE_DATA_PATH
-# echo "test_faucet foder has: "
-# ls -l $SAFE_DATA_PATH/test_faucet
-# echo "logs folder has: "
-# ls -l $SAFE_DATA_PATH/test_faucet/logs
-# mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log
-# continue-on-error: true
-# if: always()
-# timeout-minutes: 1
-
-# - name: Download 95mb file to be uploaded with the safe client
-# shell: bash
-# run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip
-
-# # The resources file we upload may change, and with it mem consumption.
-# # Be aware!
-# - name: Start a client to upload files
-# # -p makes files public
-# run: |
-# ls -l
-# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p
-# env:
-# SN_LOG: "all"
-# timeout-minutes: 25
-
-# # this check needs to be after some transfer activity
-# - name: Check we're warned about using default genesis
-# run: |
-# git log -1 --oneline
-# ls -la $RESTART_TEST_NODE_DATA_PATH
-# cat $RESTART_TEST_NODE_DATA_PATH/safenode.log
-# - name: Check we're warned about using default genesis
-# run: |
-# git log -1 --oneline
-# ls -la $BOOTSTRAP_NODE_DATA_PATH
-# cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log
-
-# - name: Check we're warned about using default genesis
-# run: |
-# git log -1 --oneline
-# ls -la $NODE_DATA_PATH
-# rg "USING DEFAULT" "$NODE_DATA_PATH" -u
-# shell: bash
-
-# # Uploading same file using different client shall not incur any payment neither uploads
-# # Note rg will throw an error directly in case of failed to find a matching pattern.
-# - name: Start a different client to upload the same file
-# run: |
-# pwd
-# mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first
-# ls -l $SAFE_DATA_PATH
-# ls -l $SAFE_DATA_PATH/client_first
-# mkdir $SAFE_DATA_PATH/client
-# ls -l $SAFE_DATA_PATH
-# mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs
-# ls -l $CLIENT_DATA_PATH
-# cp ./the-test-data.zip ./the-test-data_1.zip
-# ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password
-# ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt
-# cat initial_balance_from_faucet_1.txt
-# cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex
-# cat transfer_hex
-# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
-# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt
-# cat second_upload.txt
-# rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats
-# env:
-# SN_LOG: "all"
-# timeout-minutes: 25
-
-# - name: Stop the restart node
-# run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid )
-
-# - name: Start the restart node again
-# run: |
-# ./target/release/safenode \
-# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted &
-# sleep 10
-# env:
-# SN_LOG: "all"
-
-# - name: Assert we've reloaded some chunks
-# run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH
-
-# - name: Chunks data integrity during nodes churn
-# run: cargo test --release -p sn_node --test data_with_churn -- --nocapture
-# env:
-# TEST_DURATION_MINS: 5
-# TEST_TOTAL_CHURN_CYCLES: 15
-# SN_LOG: "all"
-# timeout-minutes: 30
-
-# - name: Check current files
-# run: ls -la
-# - name: Check safenode file
-# run: ls /home/runner/work/safe_network/safe_network/target/release
-
-# - name: Check there was no restart issues
-# run: |
-# if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then
-# echo "Restart issues detected"
-# exit 1
-# else
-# echo "No restart issues detected"
-# fi
-
-# - name: Verify the routing tables of the nodes
-# run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture
-# env:
-# SLEEP_BEFORE_VERIFICATION: 300
-# timeout-minutes: 10
-
-# - name: Verify restart of nodes using rg
-# shell: bash
-# timeout-minutes: 1
-# # get the counts, then the specific line, and then the digit count only
-# # then check we have an expected level of restarts
-# # TODO: make this use an env var, or relate to testnet size
-# run: |
-# restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \
-# rg "(\d+) matches" | rg "\d+" -o)
-# echo "Restart $restart_count nodes"
-# peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \
-# rg "(\d+) matches" | rg "\d+" -o)
-# echo "PeerRemovedFromRoutingTable $peer_removed times"
-# if [ $peer_removed -lt $restart_count ]; then
-# echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count"
-# exit 1
-# fi
-# node_count=$(ls $NODE_DATA_PATH | wc -l)
-# echo "Node dir count is $node_count"
-# # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here
-# # if [ $restart_count -lt $node_count ]; then
-# # echo "Restart count of: $restart_count is less than the node count of: $node_count"
-# # exit 1
-# # fi
-
-# - name: Verify data replication using rg
-# shell: bash
-# timeout-minutes: 1
-# # get the counts, then the specific line, and then the digit count only
-# # then check we have an expected level of replication
-# # TODO: make this use an env var, or relate to testnet size
-# # As the bootstrap_node using separate folder for logging,
-# # hence the folder input to rg needs to cover that as well.
-# run: |
-# sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \
-# rg "(\d+) matches" | rg "\d+" -o)
-# echo "Sent $sending_list_count replication lists"
-# received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \
-# rg "(\d+) matches" | rg "\d+" -o)
-# echo "Received $received_list_count replication lists"
-# fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \
-# rg "(\d+) matches" | rg "\d+" -o)
-# echo "Carried out $fetching_attempt_count fetching attempts"
-# if: always()
-
-# - name: Start a client to download files
-# run: |
-# ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick
-# ls -l $CLIENT_DATA_PATH/safe_files
-# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l)
-# if [ $downloaded_files -lt 1 ]; then
-# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded"
-# exit 1
-# fi
-# env:
-# SN_LOG: "all"
-# timeout-minutes: 10
-
-# # Download the same files again to ensure files won't get corrupted.
-# - name: Start a client to download the same files again
-# run: |
-# ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick
-# ls -l $CLIENT_DATA_PATH/safe_files
-# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l)
-# if [ $downloaded_files -lt 1 ]; then
-# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded"
-# exit 1
-# fi
-# file_size1=$(stat -c "%s" ./the-test-data_1.zip)
-# file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip)
-# if [ $file_size1 != $file_size2 ]; then
-# echo "The downloaded file has a different size $file_size2 to the original $file_size1."
-# exit 1
-# fi
-# env:
-# SN_LOG: "all"
-# timeout-minutes: 10
-
-# - name: Audit from genesis to collect entire spend DAG and dump to a dot file
-# run: |
-# ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt
-# echo "=============================================================================="
-# cat spend_dag_and_statistics.txt
-# env:
-# SN_LOG: "all"
-# timeout-minutes: 5
-# if: always()
-
-# - name: Ensure discord_ids decrypted
-# run: |
-# rg 'node_' ./spend_dag_and_statistics.txt -o
-# timeout-minutes: 1
-# if: always()
-
-# - name: Check nodes running
-# shell: bash
-# timeout-minutes: 1
-# continue-on-error: true
-# run: pgrep safenode | wc -l
-# if: always()
-
-# - name: Wait before verifying reward forwarding
-# run: sleep 300
-
-# - name: Stop the local network and upload logs
-# if: always()
-# uses: maidsafe/sn-local-testnet-action@main
-# with:
-# action: stop
-# log_file_prefix: safe_test_logs_memcheck
-# platform: ubuntu-latest
-# build: true
-
-# - name: Check node memory usage
-# shell: bash
-# # The resources file and churning chunk_size we upload may change, and with it mem consumption.
-# # This is set to a value high enough to allow for some variation depending on
-# # resources and node location in the network, but hopefully low enough to catch
-# # any wild memory issues
-# # Any changes to this value should be carefully considered and tested!
-# # As we have a bootstrap node acting as an access point for churning nodes and client,
-# # The memory usage here will be significantly higher here than in the benchmark test,
-# # where we don't have a bootstrap node.
-# run: |
-# node_peak_mem_limit_mb="300" # mb
-
-# peak_mem_usage=$(
-# rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename |
-# awk -F':' '/"memory_used_mb":/{print $2}' |
-# sort -n |
-# tail -n 1
-# )
-# echo "Node memory usage: $peak_mem_usage MB"
-
-# if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then
-# echo "Node memory usage exceeded threshold: $peak_mem_usage MB"
-# exit 1
-# fi
-# if: always()
-
-# - name: Check client memory usage
-# shell: bash
-# # limits here are lower that benchmark tests as there is less going on.
-# run: |
-# client_peak_mem_limit_mb="1024" # mb
-# client_avg_mem_limit_mb="512" # mb
-
-# peak_mem_usage=$(
-# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
-# awk -F':' '/"memory_used_mb":/{print $2}' |
-# sort -n |
-# tail -n 1
-# )
-# echo "Peak memory usage: $peak_mem_usage MB"
-# if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then
-# echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB"
-# exit 1
-# fi
-
-# total_mem=$(
-# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
-# awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
-# )
-# num_of_times=$(
-# rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats |
-# rg "(\d+) matches" |
-# rg "\d+" -o
-# )
-# echo "num_of_times: $num_of_times"
-# echo "Total memory is: $total_mem"
-# average_mem=$(($total_mem/$(($num_of_times))))
-# echo "Average memory is: $average_mem"
-
-# if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then
-# echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB"
-# exit 1
-# fi
-
-# - name: Check node swarm_driver handling statistics
-# shell: bash
-# # With the latest improvements, swarm_driver will be in high chance
-# # has no super long handling (longer than 1s).
-# # As the `rg` cmd will fail the shell directly if no entry find,
-# # hence not covering it.
-# # Be aware that if do need to looking for handlings longer than second, it shall be:
-# # rg "SwarmCmd handled in [^m,ยต,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats
-# run: |
-# num_of_times=$(
-# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
-# rg "(\d+) matches" |
-# rg "\d+" -o
-# )
-# echo "Number of long cmd handling times: $num_of_times"
-# total_long_handling_ms=$(
-# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
-# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
-# )
-# echo "Total cmd long handling time is: $total_long_handling_ms ms"
-# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
-# echo "Average cmd long handling time is: $average_handling_ms ms"
-# total_long_handling=$(($total_long_handling_ms))
-# total_num_of_times=$(($num_of_times))
-# num_of_times=$(
-# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
-# rg "(\d+) matches" |
-# rg "\d+" -o
-# )
-# echo "Number of long event handling times: $num_of_times"
-# total_long_handling_ms=$(
-# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
-# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
-# )
-# echo "Total event long handling time is: $total_long_handling_ms ms"
-# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
-# echo "Average event long handling time is: $average_handling_ms ms"
-# total_long_handling=$(($total_long_handling_ms+$total_long_handling))
-# total_num_of_times=$(($num_of_times+$total_num_of_times))
-# average_handling_ms=$(($total_long_handling/$(($total_num_of_times))))
-# echo "Total swarm_driver long handling times is: $total_num_of_times"
-# echo "Total swarm_driver long handling duration is: $total_long_handling ms"
-# echo "Total average swarm_driver long handling duration is: $average_handling_ms ms"
-
-# - name: Verify reward forwarding using rg
-# shell: bash
-# timeout-minutes: 1
-# run: |
-# min_reward_forwarding_times="100"
-# reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \
-# rg "(\d+) matches" | rg "\d+" -o)
-# echo "Carried out $reward_forwarding_count reward forwardings"
-# if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then
-# echo "Reward forwarding times below the threshold: $min_reward_forwarding_times"
-# exit 1
-# fi
-# if: always()
-
-# - name: Upload payment wallet initialization log
-# uses: actions/upload-artifact@main
-# with:
-# name: payment_wallet_initialization_log
-# path: initial_balance_from_faucet.txt
-# continue-on-error: true
-# if: always()
-
-# - name: Move faucet log to the working folder
-# run: |
-# echo "current folder is:"
-# pwd
-# echo "SAFE_DATA_PATH has: "
-# ls -l $SAFE_DATA_PATH
-# echo "test_faucet foder has: "
-# ls -l $SAFE_DATA_PATH/test_faucet
-# echo "logs folder has: "
-# ls -l $SAFE_DATA_PATH/test_faucet/logs
-# mv $FAUCET_LOG_PATH/*.log ./faucet_log.log
-# env:
-# SN_LOG: "all"
-# continue-on-error: true
-# if: always()
-# timeout-minutes: 1
-
-# - name: Move bootstrap_node log to the working directory
-# run: |
-# ls -l $BOOTSTRAP_NODE_DATA_PATH
-# mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log
-# continue-on-error: true
-# if: always()
-# timeout-minutes: 1
-
-# - name: Upload faucet log
-# uses: actions/upload-artifact@main
-# with:
-# name: memory_check_faucet_log
-# path: faucet_log.log
-# continue-on-error: true
-# if: always()
-
-# - name: Upload bootstrap_node log
-# uses: actions/upload-artifact@main
-# with:
-# name: memory_check_bootstrap_node_log
-# path: bootstrap_node.log
-# continue-on-error: true
-# if: always()
-
-# - name: Upload spend DAG and statistics
-# uses: actions/upload-artifact@main
-# with:
-# name: memory_check_spend_dag_and_statistics
-# path: spend_dag_and_statistics.txt
-# continue-on-error: true
-# if: always()
+
+jobs:
+ memory-check:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Check we're on the right commit
+ run: git log -1 --oneline
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@stable
+
+ - uses: Swatinem/rust-cache@v2
+ continue-on-error: true
+
+ - name: install ripgrep
+ shell: bash
+ run: sudo apt-get install -y ripgrep
+
+ - name: Build binaries
+ run: cargo build --release --features local --bin safenode --bin autonomi
+ timeout-minutes: 30
+
+ - name: Start a local network
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: start
+ enable-evm-testnet: true
+ node-path: target/release/safenode
+ platform: ubuntu-latest
+ build: true
+
+ - name: Check SAFE_PEERS was set
+ shell: bash
+ run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS"
+
+ - name: Start a node instance to be restarted
+ run: |
+ mkdir -p $RESTART_TEST_NODE_DATA_PATH
+ ./target/release/safenode \
+ --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --rewards-address "0x03B770D9cD32077cC0bF330c13C114a87643B124" &
+ sleep 10
+ env:
+ SN_LOG: "all"
+
+ - name: Download 95mb file to be uploaded with the safe client
+ shell: bash
+ run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip
+
+ - name: export default secret key
+ run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV
+ shell: bash
+
+ - name: File upload
+ run: ./target/release/autonomi --log-output-dest=data-dir file upload --public "./the-test-data.zip" > ./upload_output 2>&1
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 5
+
+ - name: showing the upload terminal output
+ run: cat upload_output
+ shell: bash
+ if: always()
+
+ - name: parse address
+ run: |
+ UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output)
+ echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV
+ shell: bash
+
+ # Uploading same file using different client shall not incur any payment neither uploads
+ # Note rg will throw an error directly in case of failed to find a matching pattern.
+ - name: Start a different client to upload the same file
+ run: |
+ pwd
+ ls -l $SAFE_DATA_PATH
+ mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first
+ ls -l $SAFE_DATA_PATH
+ ls -l $SAFE_DATA_PATH/client_first
+ ls -l $SAFE_DATA_PATH/client_first/logs
+ mkdir $SAFE_DATA_PATH/client
+ ls -l $SAFE_DATA_PATH
+ cp ./the-test-data.zip ./the-test-data_1.zip
+ ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data_1.zip" > ./second_upload 2>&1
+ env:
+ SN_LOG: "all"
+ timeout-minutes: 25
+
+ - name: showing the second upload terminal output
+ run: cat second_upload
+ shell: bash
+ if: always()
+
+ - name: Stop the restart node
+ run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid )
+
+ - name: Start the restart node again
+ run: |
+ ./target/release/safenode \
+ --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --rewards-address "0x03B770D9cD32077cC0bF330c13C114a87643B124" &
+ sleep 10
+ env:
+ SN_LOG: "all"
+
+ # Records are encrypted, and seeds will change after restart
+ # Currently, there will be `Existing record found`, but NO `Existing record loaded`
+ # Due to the failure on decryption (as different seed used)
+ - name: Assert we've reloaded some chunks
+ run: rg "Existing record found" $RESTART_TEST_NODE_DATA_PATH
+
+ - name: Wait at least 1min for replication to happen # it is throttled to once/30s.
+ run: sleep 60
+
+ - name: Verify data replication using rg
+ shell: bash
+ timeout-minutes: 1
+ # get the counts, then the specific line, and then the digit count only
+ # then check we have an expected level of replication
+ run: |
+ sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \
+ rg "(\d+) matches" | rg "\d+" -o)
+ echo "Sent $sending_list_count replication lists"
+ received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \
+ rg "(\d+) matches" | rg "\d+" -o)
+ echo "Received $received_list_count replication lists"
+ fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \
+ rg "(\d+) matches" | rg "\d+" -o)
+ echo "Carried out $fetching_attempt_count fetching attempts"
+ if: always()
+
+ - name: File Download
+ run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 2
+
+ - name: Check nodes running
+ shell: bash
+ timeout-minutes: 1
+ continue-on-error: true
+ run: pgrep safenode | wc -l
+ if: always()
+
+ - name: Stop the local network and upload logs
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ log_file_prefix: safe_test_logs_memcheck
+ platform: ubuntu-latest
+ build: true
+
+ - name: Check node memory usage
+ shell: bash
+ # The resources file and churning chunk_size we upload may change, and with it mem consumption.
+ # This is set to a value high enough to allow for some variation depending on
+ # resources and node location in the network, but hopefully low enough to catch
+ # any wild memory issues
+ # Any changes to this value should be carefully considered and tested!
+ # As we have a bootstrap node acting as an access point for churning nodes and client,
+ # The memory usage here will be significantly higher here than in the benchmark test,
+ # where we don't have a bootstrap node.
+ run: |
+ node_peak_mem_limit_mb="300" # mb
+
+ peak_mem_usage=$(
+ rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename |
+ awk -F':' '/"memory_used_mb":/{print $2}' |
+ sort -n |
+ tail -n 1
+ )
+ echo "Node memory usage: $peak_mem_usage MB"
+
+ if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then
+ echo "Node memory usage exceeded threshold: $peak_mem_usage MB"
+ exit 1
+ fi
+ if: always()
+
+ - name: Check client memory usage
+ shell: bash
+ # limits here are lower that benchmark tests as there is less going on.
+ run: |
+ client_peak_mem_limit_mb="1024" # mb
+ client_avg_mem_limit_mb="512" # mb
+
+ peak_mem_usage=$(
+ rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename |
+ awk -F':' '/"memory_used_mb":/{print $2}' |
+ sort -n |
+ tail -n 1
+ )
+ echo "Peak memory usage: $peak_mem_usage MB"
+ if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then
+ echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB"
+ exit 1
+ fi
+
+ total_mem=$(
+ rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename |
+ awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
+ )
+ num_of_times=$(
+ rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob autonomi.* -c --stats |
+ rg "(\d+) matches" |
+ rg "\d+" -o
+ )
+ echo "num_of_times: $num_of_times"
+ echo "Total memory is: $total_mem"
+ average_mem=$(($total_mem/$(($num_of_times))))
+ echo "Average memory is: $average_mem"
+
+ if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then
+ echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB"
+ exit 1
+ fi
+
+ # Logging of handling time is on Trace level,
+ # meanwhile the local_network startup tool sets the logging level on Debug.
+ #
+ # - name: Check node swarm_driver handling statistics
+ # shell: bash
+ # # With the latest improvements, swarm_driver will be in high chance
+ # # has no super long handling (longer than 1s).
+ # # As the `rg` cmd will fail the shell directly if no entry find,
+ # # hence not covering it.
+ # # Be aware that if do need to looking for handlings longer than second, it shall be:
+ # # rg "SwarmCmd handled in [^m,ยต,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats
+ # run: |
+ # num_of_times=$(
+ # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats |
+ # rg "(\d+) matches" |
+ # rg "\d+" -o
+ # )
+ # echo "Number of long cmd handling times: $num_of_times"
+ # total_long_handling_ms=$(
+ # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename |
+ # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
+ # )
+ # echo "Total cmd long handling time is: $total_long_handling_ms ms"
+ # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
+ # echo "Average cmd long handling time is: $average_handling_ms ms"
+ # total_long_handling=$(($total_long_handling_ms))
+ # total_num_of_times=$(($num_of_times))
+ # num_of_times=$(
+ # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats |
+ # rg "(\d+) matches" |
+ # rg "\d+" -o
+ # )
+ # echo "Number of long event handling times: $num_of_times"
+ # total_long_handling_ms=$(
+ # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename |
+ # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
+ # )
+ # echo "Total event long handling time is: $total_long_handling_ms ms"
+ # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
+ # echo "Average event long handling time is: $average_handling_ms ms"
+ # total_long_handling=$(($total_long_handling_ms+$total_long_handling))
+ # total_num_of_times=$(($num_of_times+$total_num_of_times))
+ # average_handling_ms=$(($total_long_handling/$(($total_num_of_times))))
+ # echo "Total swarm_driver long handling times is: $total_num_of_times"
+ # echo "Total swarm_driver long handling duration is: $total_long_handling ms"
+ # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms"
+
+ - name: Move restart_node log to the working directory
+ run: |
+ ls -l $RESTART_TEST_NODE_DATA_PATH
+ mv $RESTART_TEST_NODE_DATA_PATH/safenode.log ./restart_node.log
+ continue-on-error: true
+ if: always()
+ timeout-minutes: 1
+
+ - name: Upload restart_node log
+ uses: actions/upload-artifact@main
+ with:
+ name: memory_check_restart_node_log
+ path: restart_node.log
+ continue-on-error: true
+ if: always()
diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml
index 98ee999b06..db89c867be 100644
--- a/.github/workflows/merge.yml
+++ b/.github/workflows/merge.yml
@@ -110,6 +110,10 @@ jobs:
- uses: Swatinem/rust-cache@v2
+ - name: Run autonomi tests
+ timeout-minutes: 25
+ run: cargo test --release --package autonomi --lib --features="full,fs"
+
- name: Run node tests
timeout-minutes: 25
run: cargo test --release --package sn_node --lib
@@ -186,7 +190,7 @@ jobs:
echo "EVM_NETWORK has been set to $EVM_NETWORK"
fi
- # only these unit tests require a network, the rest are run above
+ # only these unit tests require a network, the rest are run above in unit test section
- name: Run autonomi --tests
run: cargo test --package autonomi --tests -- --nocapture
env:
@@ -313,7 +317,7 @@ jobs:
- name: Delete current register signing key
shell: bash
- run: rm -rf ${{ matrix.safe_path }}/client
+ run: rm -rf ${{ matrix.safe_path }}/autonomi
- name: Generate new register signing key
run: ./target/release/autonomi --log-output-dest=data-dir register generate-key
@@ -531,15 +535,19 @@ jobs:
# platform: ${{ matrix.os }}
# build: true
- # - name: Check SAFE_PEERS was set
- # shell: bash
- # run: |
- # if [[ -z "$SAFE_PEERS" ]]; then
- # echo "The SAFE_PEERS variable has not been set"
- # exit 1
- # else
- # echo "SAFE_PEERS has been set to $SAFE_PEERS"
- # fi
+ # # incase the faucet is not ready yet
+ # - name: 30s sleep for faucet completion
+ # run: sleep 30
+
+ # - name: Check SAFE_PEERS was set
+ # shell: bash
+ # run: |
+ # if [[ -z "$SAFE_PEERS" ]]; then
+ # echo "The SAFE_PEERS variable has not been set"
+ # exit 1
+ # else
+ # echo "SAFE_PEERS has been set to $SAFE_PEERS"
+ # fi
# - name: execute token_distribution tests
# run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1
@@ -623,6 +631,10 @@ jobs:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 30
+ # Sleep for a while to allow restarted nodes can be detected by others
+ - name: Sleep a while
+ run: sleep 300
+
- name: Stop the local network and upload logs
if: always()
uses: maidsafe/sn-local-testnet-action@main
@@ -631,7 +643,14 @@ jobs:
log_file_prefix: safe_test_logs_churn
platform: ${{ matrix.os }}
- - name: Verify restart of nodes using rg
+ - name: Get total node count
+ shell: bash
+ timeout-minutes: 1
+ run: |
+ node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
+ echo "Node dir count is $node_count"
+
+ - name: Get restart of nodes using rg
shell: bash
timeout-minutes: 1
# get the counts, then the specific line, and then the digit count only
@@ -640,16 +659,23 @@ jobs:
run: |
restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
- echo "Restart $restart_count nodes"
+ echo "Restarted $restart_count nodes"
+
+ # `PeerRemovedFromRoutingTable` now only happens when a peer reported as `BadNode`.
+ # Otherwise kad will remove a `dropped out node` directly from RT.
+ # So, the detection of the removal explicity will now have much less chance,
+ # due to the removal of connection_issue tracking.
+ - name: Get peers removed from nodes using rg
+ shell: bash
+ timeout-minutes: 1
+ run: |
peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \
- rg "(\d+) matches" | rg "\d+" -o)
- echo "PeerRemovedFromRoutingTable $peer_removed times"
- if [ $peer_removed -lt $restart_count ]; then
- echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count"
+ rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 0; }
+ if [ -z "$peer_removed" ]; then
+ echo "No peer removal count found"
exit 1
fi
- node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
- echo "Node dir count is $node_count"
+ echo "PeerRemovedFromRoutingTable $peer_removed times"
# TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here
# if [ $restart_count -lt $node_count ]; then
@@ -763,12 +789,16 @@ jobs:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 5
+ # Sleep for a while to allow restarted nodes can be detected by others
+ - name: Sleep a while
+ run: sleep 300
+
- name: Stop the local network and upload logs
if: always()
uses: maidsafe/sn-local-testnet-action@main
with:
action: stop
- log_file_prefix: safe_test_logs_data_location
+ log_file_prefix: safe_test_logs_data_location_routing_table
platform: ${{ matrix.os }}
- name: Verify restart of nodes using rg
@@ -776,20 +806,29 @@ jobs:
timeout-minutes: 1
# get the counts, then the specific line, and then the digit count only
# then check we have an expected level of restarts
- # TODO: make this use an env var, or relate to testnet size
+ #
+ # `PeerRemovedFromRoutingTable` now only happens when a peer reported as `BadNode`.
+ # Otherwise kad will remove a `dropped out node` directly from RT.
+ # So, the detection of the removal explicity will now have much less chance,
+ # due to the removal of connection_issue tracking.
+ #
+ # With the further reduction of replication frequency,
+ # it now becomes harder to detect a `dropped out node` as a `failed to replicate` node.
+ # Hence now remove the assertion check and replace with a print out only.
run: |
+ node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
+ echo "Node dir count is $node_count"
restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "Restart $restart_count nodes"
+ if ! rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats
+ then
+ echo "No peer removal count found"
+ exit 0
+ fi
peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "PeerRemovedFromRoutingTable $peer_removed times"
- if [ $peer_removed -lt $restart_count ]; then
- echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count"
- exit 1
- fi
- node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
- echo "Node dir count is $node_count"
# Only error out after uploading the logs
- name: Don't log raw data
@@ -860,15 +899,15 @@ jobs:
# echo "SAFE_PEERS has been set to $SAFE_PEERS"
# fi
- # - name: Create and fund a wallet first time
- # run: |
- # ~/safe --log-output-dest=data-dir wallet create --no-password
- # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt
- # echo "----------"
- # cat first.txt
- # env:
- # SN_LOG: "all"
- # timeout-minutes: 5
+ # - name: Create and fund a wallet first time
+ # run: |
+ # ~/safe --log-output-dest=data-dir wallet create --no-password
+ # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt
+ # echo "----------"
+ # cat first.txt
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
# - name: Move faucet log to the working folder
# run: |
@@ -894,44 +933,64 @@ jobs:
# continue-on-error: true
# if: always()
- # - name: Create and fund a wallet second time
- # run: |
- # ls -l /home/runner/.local/share
- # ls -l /home/runner/.local/share/safe
- # rm -rf /home/runner/.local/share/safe/test_faucet
- # rm -rf /home/runner/.local/share/safe/test_genesis
- # rm -rf /home/runner/.local/share/safe/client
- # ~/safe --log-output-dest=data-dir wallet create --no-password
- # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt
- # echo "----------"
- # cat second.txt
- # if grep "genesis is already spent" second.txt; then
- # echo "Duplicated faucet rejected"
- # else
- # echo "Duplicated faucet not rejected!"
- # exit 1
- # fi
- # env:
- # SN_LOG: "all"
- # timeout-minutes: 5
-
- # - name: Create and fund a wallet with different keypair
- # run: |
- # ls -l /home/runner/.local/share
- # ls -l /home/runner/.local/share/safe
- # rm -rf /home/runner/.local/share/safe/test_faucet
- # rm -rf /home/runner/.local/share/safe/test_genesis
- # rm -rf /home/runner/.local/share/safe/client
- # ~/safe --log-output-dest=data-dir wallet create --no-password
- # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then
- # echo "Faucet with different genesis key not rejected!"
- # exit 1
- # else
- # echo "Faucet with different genesis key rejected"
- # fi
- # env:
- # SN_LOG: "all"
- # timeout-minutes: 5
+ # - name: Cleanup prior faucet and cashnotes
+ # run: |
+ # ls -l /home/runner/.local/share
+ # ls -l /home/runner/.local/share/safe
+ # rm -rf /home/runner/.local/share/safe/test_faucet
+ # rm -rf /home/runner/.local/share/safe/test_genesis
+ # rm -rf /home/runner/.local/share/safe/autonomi
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
+
+ # - name: Create a new wallet
+ # run: ~/safe --log-output-dest=data-dir wallet create --no-password
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
+
+ # - name: Attempt second faucet genesis disbursement
+ # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 || true
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
+
+ # - name: cat second.txt
+ # run: cat second.txt
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
+
+ # - name: Verify a second disbursement is rejected
+ # run: |
+ # if grep "Faucet disbursement has already occured" second.txt; then
+ # echo "Duplicated faucet rejected"
+ # else
+ # echo "Duplicated faucet not rejected!"
+ # exit 1
+ # fi
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
+
+ # - name: Create and fund a wallet with different keypair
+ # run: |
+ # ls -l /home/runner/.local/share
+ # ls -l /home/runner/.local/share/safe
+ # rm -rf /home/runner/.local/share/safe/test_faucet
+ # rm -rf /home/runner/.local/share/safe/test_genesis
+ # rm -rf /home/runner/.local/share/safe/autonomi
+ # ~/safe --log-output-dest=data-dir wallet create --no-password
+ # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then
+ # echo "Faucet with different genesis key not rejected!"
+ # exit 1
+ # else
+ # echo "Faucet with different genesis key rejected"
+ # fi
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
# - name: Build faucet binary again without the gifting feature
# run: cargo build --release --bin faucet
@@ -943,7 +1002,7 @@ jobs:
# ls -l /home/runner/.local/share/safe
# rm -rf /home/runner/.local/share/safe/test_faucet
# rm -rf /home/runner/.local/share/safe/test_genesis
- # rm -rf /home/runner/.local/share/safe/client
+ # rm -rf /home/runner/.local/share/safe/autonomi
# target/release/faucet server &
# sleep 60
# env:
@@ -970,152 +1029,135 @@ jobs:
# platform: ubuntu-latest
# log_file_prefix: safe_test_logs_faucet
- # large_file_upload_test:
- # if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
- # name: Large file upload
- # runs-on: ubuntu-latest
- # steps:
- # - uses: actions/checkout@v4
-
- # - name: Install Rust
- # uses: dtolnay/rust-toolchain@stable
- # - uses: Swatinem/rust-cache@v2
+ large_file_upload_test:
+ if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
+ name: Large file upload
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
- # - name: install ripgrep
- # shell: bash
- # run: sudo apt-get install -y ripgrep
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@stable
+ - uses: Swatinem/rust-cache@v2
- # - name: Check the available space
- # run: |
- # df
- # echo "Home dir:"
- # du -sh /home/runner/
- # echo "Home subdirs:"
- # du -sh /home/runner/*/
- # echo "PWD:"
- # du -sh .
- # echo "PWD subdirs:"
- # du -sh */
-
- # - name: Download material, 1.1G
- # shell: bash
- # run: |
- # wget https://releases.ubuntu.com/14.04.6/ubuntu-14.04.6-desktop-i386.iso
- # ls -l
+ - name: install ripgrep
+ shell: bash
+ run: sudo apt-get install -y ripgrep
- # - name: Build binaries
- # run: cargo build --release --bin safenode --bin safe
- # timeout-minutes: 30
+ - name: Check the available space
+ run: |
+ df
+ echo "Home dir:"
+ du -sh /home/runner/
+ echo "Home subdirs:"
+ du -sh /home/runner/*/
+ echo "PWD:"
+ du -sh .
+ echo "PWD subdirs:"
+ du -sh */
+
+ - name: Download material (135MB)
+ shell: bash
+ run: |
+ mkdir test_data_1
+ cd test_data_1
+ wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safe-qiWithListeners-x86_64.tar.gz
+ wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode-qiWithListeners-x86_64.tar.gz
+ ls -l
+ cd ..
+ tar -cvzf test_data_1.tar.gz test_data_1
+ ls -l
- # - name: Build faucet binary
- # run: cargo build --release --bin faucet --features gifting
- # timeout-minutes: 30
+ - name: Build binaries
+ run: cargo build --release --features local --bin safenode --bin autonomi
+ timeout-minutes: 30
- # - name: Start a local network
- # uses: maidsafe/sn-local-testnet-action@main
- # with:
- # action: start
- # interval: 2000
- # node-path: target/release/safenode
- # faucet-path: target/release/faucet
- # platform: ubuntu-latest
- # build: true
+ - name: Start a local network
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: start
+ enable-evm-testnet: true
+ node-path: target/release/safenode
+ platform: ubuntu-latest
+ build: true
- # - name: Check we're _not_ warned about using default genesis
- # run: |
- # if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then
- # exit 1
- # fi
- # shell: bash
+ - name: Check if SAFE_PEERS and EVM_NETWORK are set
+ shell: bash
+ run: |
+ if [[ -z "$SAFE_PEERS" ]]; then
+ echo "The SAFE_PEERS variable has not been set"
+ exit 1
+ elif [[ -z "$EVM_NETWORK" ]]; then
+ echo "The EVM_NETWORK variable has not been set"
+ exit 1
+ else
+ echo "SAFE_PEERS has been set to $SAFE_PEERS"
+ echo "EVM_NETWORK has been set to $EVM_NETWORK"
+ fi
- # # The test currently fails because the GH runner runs out of disk space. So we clear out the target dir here.
- # # Might be related to additional deps used in the codebase.
- # - name: Move built binaries and clear out target dir
- # shell: bash
- # run: |
- # mv target/release/faucet ~/faucet
- # mv target/release/safe ~/safe
- # rm -rf target
+ - name: Check the available space post download
+ run: |
+ df
+ echo "Home dir:"
+ du -sh /home/runner/
+ echo "Home subdirs:"
+ du -sh /home/runner/*/
+ echo "PWD:"
+ du -sh .
+ echo "PWD subdirs:"
+ du -sh */
- # - name: Check SAFE_PEERS was set
- # shell: bash
- # run: |
- # if [[ -z "$SAFE_PEERS" ]]; then
- # echo "The SAFE_PEERS variable has not been set"
- # exit 1
- # else
- # echo "SAFE_PEERS has been set to $SAFE_PEERS"
- # fi
+ - name: export default secret key
+ run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV
+ shell: bash
- # - name: Check the available space post download
- # run: |
- # df
- # echo "Home dir:"
- # du -sh /home/runner/
- # echo "Home subdirs:"
- # du -sh /home/runner/*/
- # echo "PWD:"
- # du -sh .
- # echo "PWD subdirs:"
- # du -sh */
-
- # - name: Create and fund a wallet to pay for files storage
- # run: |
- # ~/safe --log-output-dest=data-dir wallet create --no-password
- # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
- # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex
- # env:
- # SN_LOG: "all"
- # timeout-minutes: 5
+ - name: File upload
+ run: ./target/release/autonomi --log-output-dest=data-dir file upload "./test_data_1.tar.gz" > ./upload_output 2>&1
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 5
- # - name: Start a client to upload
- # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick
- # env:
- # SN_LOG: "all"
- # timeout-minutes: 30
+ - name: showing the upload terminal output
+ run: cat upload_output
+ shell: bash
+ if: always()
- # - name: Stop the local network and upload logs
- # if: always()
- # uses: maidsafe/sn-local-testnet-action@main
- # with:
- # action: stop
- # platform: ubuntu-latest
- # log_file_prefix: safe_test_logs_large_file_upload
- # build: true
+ - name: parse address
+ run: |
+ UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output)
+ echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV
+ shell: bash
- # - name: check there is no failed replication fetch
- # shell: bash
- # run: |
- # if grep -r "failed to fetch" $NODE_DATA_PATH
- # then
- # echo "We find failed replication fetch"
- # exit 1
- # fi
- # env:
- # NODE_DATA_PATH: /home/runner/.local/share/safe/node
- # timeout-minutes: 1
+ - name: File Download
+ run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources > ./download_output 2>&1
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 5
- # - name: Check the home dir leftover space
- # run: |
- # df
- # du -sh /home/runner/
+ - name: showing the download terminal output
+ run: |
+ cat download_output
+ ls -l
+ cd downloaded_resources
+ ls -l
+ shell: bash
+ if: always()
- # - name: Confirm the wallet files (cash_notes, confirmed_spends)
- # run: |
- # pwd
- # ls $CLIENT_DATA_PATH/ -l
- # ls $CLIENT_DATA_PATH/wallet -l
- # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
- # ls $CLIENT_DATA_PATH/wallet/confirmed_spends -l
- # ls $CLIENT_DATA_PATH/logs -l
- # env:
- # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
- # timeout-minutes: 1
+ - name: Stop the local network and upload logs
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ platform: ubuntu-latest
+ log_file_prefix: safe_test_logs_large_file_upload
+ build: true
# replication_bench_with_heavy_upload:
# if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
# name: Replication bench with heavy upload
# runs-on: ubuntu-latest
+ # env:
+ # CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi
# steps:
# - uses: actions/checkout@v4
@@ -1192,14 +1234,28 @@ jobs:
# echo "SAFE_PEERS has been set to $SAFE_PEERS"
# fi
- # - name: Create and fund a wallet to pay for files storage
- # run: |
- # ./target/release/safe --log-output-dest=data-dir wallet create --no-password
- # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
- # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
- # env:
- # SN_LOG: "all"
- # timeout-minutes: 5
+ # - name: Sleep 15s
+ # shell: bash
+ # run: sleep 15
+
+ # - name: Check faucet has been funded
+ # shell: bash
+ # run: |
+ # cash_note_count=$(ls -l /home/runner/.local/share/safe/test_faucet/wallet/cash_notes/ | wc -l)
+ # echo $cash_note_count
+ # if [ "$cash_note_count" -eq 0 ]; then
+ # echo "Error: Expected at least 1 cash note, but found $cash_note_count"
+ # exit 1
+ # fi
+
+ # - name: Create and fund a wallet to pay for files storage
+ # run: |
+ # ./target/release/safe --log-output-dest=data-dir wallet create --no-password
+ # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
+ # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
+ # env:
+ # SN_LOG: "all"
+ # timeout-minutes: 5
# - name: Start a client to upload first file
# run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick
@@ -1207,29 +1263,32 @@ jobs:
# SN_LOG: "all"
# timeout-minutes: 5
- # - name: Ensure no leftover cash_notes and payment files
- # run: |
- # expected_cash_notes_files="1"
- # expected_payment_files="0"
- # pwd
- # ls $CLIENT_DATA_PATH/ -l
- # ls $CLIENT_DATA_PATH/wallet -l
- # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
- # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l)
- # echo "Find $cash_note_files cash_note files"
- # if [ $expected_cash_notes_files -lt $cash_note_files ]; then
- # echo "Got too many cash_note files leftover: $cash_note_files"
- # exit 1
- # fi
- # ls $CLIENT_DATA_PATH/wallet/payments -l
- # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l)
- # if [ $expected_payment_files -lt $payment_files ]; then
- # echo "Got too many payment files leftover: $payment_files"
- # exit 1
- # fi
- # env:
- # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
- # timeout-minutes: 10
+ # - name: Check current directories
+ # run: |
+ # pwd
+ # ls $CLIENT_DATA_PATH/ -l
+ # ls $CLIENT_DATA_PATH/wallet -l
+ # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
+ # timeout-minutes: 1
+
+ # - name: Ensure no leftover cash_notes and payment files
+ # run: |
+ # expected_cash_notes_files="1"
+ # expected_payment_files="0"
+ # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l)
+ # echo "Find $cash_note_files cash_note files"
+ # if [ $expected_cash_notes_files -lt $cash_note_files ]; then
+ # echo "Got too many cash_note files leftover: $cash_note_files"
+ # exit 1
+ # fi
+ # ls $CLIENT_DATA_PATH/wallet/payments -l
+ # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l)
+ # if [ $expected_payment_files -lt $payment_files ]; then
+ # echo "Got too many payment files leftover: $payment_files"
+ # exit 1
+ # fi
+
+ # timeout-minutes: 10
# - name: Wait for certain period
# run: sleep 300
@@ -1241,52 +1300,49 @@ jobs:
# SN_LOG: "all"
# timeout-minutes: 10
- # - name: Ensure no leftover cash_notes and payment files
- # run: |
- # expected_cash_notes_files="1"
- # expected_payment_files="0"
- # pwd
- # ls $CLIENT_DATA_PATH/ -l
- # ls $CLIENT_DATA_PATH/wallet -l
- # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
- # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l)
- # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then
- # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files"
- # exit 1
- # fi
- # ls $CLIENT_DATA_PATH/wallet/payments -l
- # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l)
- # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then
- # echo "Got too many payment files leftover: $payment_files"
- # exit 1
- # fi
- # env:
- # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
- # timeout-minutes: 10
+ # - name: Ensure no leftover cash_notes and payment files
+ # run: |
+ # expected_cash_notes_files="1"
+ # expected_payment_files="0"
+ # pwd
+ # ls $CLIENT_DATA_PATH/ -l
+ # ls $CLIENT_DATA_PATH/wallet -l
+ # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
+ # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l)
+ # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then
+ # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files"
+ # exit 1
+ # fi
+ # ls $CLIENT_DATA_PATH/wallet/payments -l
+ # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l)
+ # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then
+ # echo "Got too many payment files leftover: $payment_files"
+ # exit 1
+ # fi
+ # timeout-minutes: 10
# - name: Wait for certain period
# run: sleep 300
# timeout-minutes: 6
- # # Start a different client to avoid local wallet slow down with more payments handled.
- # - name: Start a different client
- # run: |
- # pwd
- # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first
- # ls -l $SAFE_DATA_PATH
- # ls -l $SAFE_DATA_PATH/client_first
- # mkdir $SAFE_DATA_PATH/client
- # ls -l $SAFE_DATA_PATH
- # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs
- # ls -l $CLIENT_DATA_PATH
- # ./target/release/safe --log-output-dest=data-dir wallet create --no-password
- # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
- # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
- # env:
- # SN_LOG: "all"
- # SAFE_DATA_PATH: /home/runner/.local/share/safe
- # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
- # timeout-minutes: 25
+ # # Start a different client to avoid local wallet slow down with more payments handled.
+ # - name: Start a different client
+ # run: |
+ # pwd
+ # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first
+ # ls -l $SAFE_DATA_PATH
+ # ls -l $SAFE_DATA_PATH/client_first
+ # mkdir $SAFE_DATA_PATH/client
+ # ls -l $SAFE_DATA_PATH
+ # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs
+ # ls -l $CLIENT_DATA_PATH
+ # ./target/release/safe --log-output-dest=data-dir wallet create --no-password
+ # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
+ # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
+ # env:
+ # SN_LOG: "all"
+ # SAFE_DATA_PATH: /home/runner/.local/share/safe
+ # timeout-minutes: 25
# - name: Use second client to upload third file
# run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick
@@ -1294,29 +1350,27 @@ jobs:
# SN_LOG: "all"
# timeout-minutes: 10
- # - name: Ensure no leftover cash_notes and payment files
- # run: |
- # expected_cash_notes_files="1"
- # expected_payment_files="0"
- # pwd
- # ls $CLIENT_DATA_PATH/ -l
- # ls $CLIENT_DATA_PATH/wallet -l
- # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
- # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l)
- # echo "Find $cash_note_files cash_note files"
- # if [ $expected_cash_notes_files -lt $cash_note_files ]; then
- # echo "Got too many cash_note files leftover: $cash_note_files"
- # exit 1
- # fi
- # ls $CLIENT_DATA_PATH/wallet/payments -l
- # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l)
- # if [ $expected_payment_files -lt $payment_files ]; then
- # echo "Got too many payment files leftover: $payment_files"
- # exit 1
- # fi
- # env:
- # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
- # timeout-minutes: 10
+ # - name: Ensure no leftover cash_notes and payment files
+ # run: |
+ # expected_cash_notes_files="1"
+ # expected_payment_files="0"
+ # pwd
+ # ls $CLIENT_DATA_PATH/ -l
+ # ls $CLIENT_DATA_PATH/wallet -l
+ # ls $CLIENT_DATA_PATH/wallet/cash_notes -l
+ # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l)
+ # echo "Find $cash_note_files cash_note files"
+ # if [ $expected_cash_notes_files -lt $cash_note_files ]; then
+ # echo "Got too many cash_note files leftover: $cash_note_files"
+ # exit 1
+ # fi
+ # ls $CLIENT_DATA_PATH/wallet/payments -l
+ # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l)
+ # if [ $expected_payment_files -lt $payment_files ]; then
+ # echo "Got too many payment files leftover: $payment_files"
+ # exit 1
+ # fi
+ # timeout-minutes: 10
# - name: Stop the local network and upload logs
# if: always()
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index aac0ac9ad4..843507abff 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -15,7 +15,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
+ include:
+ - os: ubuntu-latest
+ safe_path: /home/runner/.local/share/safe
+ - os: windows-latest
+ safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe
+ - os: macos-latest
+ safe_path: /Users/runner/Library/Application\ Support/safe
steps:
- uses: actions/checkout@v4
@@ -26,77 +32,181 @@ jobs:
continue-on-error: true
- name: Build binaries
- run: cargo build --release --bin safenode --bin safe --bin faucet
+ run: cargo build --release --features local --bin safenode --bin autonomi
timeout-minutes: 30
- name: Start a local network
uses: maidsafe/sn-local-testnet-action@main
with:
action: start
- interval: 2000
+ enable-evm-testnet: true
node-path: target/release/safenode
- faucet-path: target/release/faucet
platform: ${{ matrix.os }}
build: true
- - name: Check contact peer
+ - name: Check if SAFE_PEERS and EVM_NETWORK are set
shell: bash
- run: echo "Peer is $SAFE_PEERS"
+ run: |
+ if [[ -z "$SAFE_PEERS" ]]; then
+ echo "The SAFE_PEERS variable has not been set"
+ exit 1
+ elif [[ -z "$EVM_NETWORK" ]]; then
+ echo "The EVM_NETWORK variable has not been set"
+ exit 1
+ else
+ echo "SAFE_PEERS has been set to $SAFE_PEERS"
+ echo "EVM_NETWORK has been set to $EVM_NETWORK"
+ fi
# only these unit tests require a network, the rest are run above in unit test section
- - name: Run sn_client --tests
- run: cargo test --package sn_client --release --tests
+ - name: Run autonomi --tests
+ run: cargo test --package autonomi --tests -- --nocapture
env:
- SN_LOG: "all"
+ SN_LOG: "v"
# only set the target dir for windows to bypass the linker issue.
# happens if we build the node manager via testnet action
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 15
- - name: Create and fund a wallet to pay for files storage
+
+ # FIXME: do this in a generic way for localtestnets
+ - name: export default secret key
+ if: matrix.os != 'windows-latest'
+ run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV
+ shell: bash
+ - name: Set secret key for Windows
+ if: matrix.os == 'windows-latest'
+ run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
+ shell: pwsh
+
+ - name: Get file cost
+ run: ./target/release/autonomi --log-output-dest=data-dir file cost "./resources"
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 15
+
+ - name: File upload
+ run: ./target/release/autonomi --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 15
+
+ - name: parse address (unix)
+ if: matrix.os != 'windows-latest'
run: |
- cargo run --bin faucet --release -- --log-output-dest=data-dir send 1000000 $(cargo run --bin safe --release -- --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
- cargo run --bin safe --release -- --log-output-dest=data-dir wallet receive --file transfer_hex
+ UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output)
+ echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV
+ shell: bash
+
+ - name: parse address (win)
+ if: matrix.os == 'windows-latest'
+ run: |
+ $UPLOAD_ADDRESS = rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output
+ echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
+ shell: pwsh
+
+ - name: File Download
+ run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources
env:
- SN_LOG: "all"
- timeout-minutes: 2
+ SN_LOG: "v"
+ timeout-minutes: 5
+
+ - name: Generate register signing key
+ run: ./target/release/autonomi --log-output-dest=data-dir register generate-key
- - name: Start a client to carry out chunk actions
- run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick
+ - name: Create register (writeable by owner)
+ run: ./target/release/autonomi --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1
env:
- SN_LOG: "all"
- timeout-minutes: 2
+ SN_LOG: "v"
+ timeout-minutes: 10
+
+ - name: parse register address (unix)
+ if: matrix.os != 'windows-latest'
+ run: |
+ REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output)
+ echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV
+ shell: bash
+
+ - name: parse register address (win)
+ if: matrix.os == 'windows-latest'
+ run: |
+ $REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output
+ echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
+ shell: pwsh
- # Client FoldersApi tests against local network
- - name: Client FoldersApi tests against local network
- run: cargo test --release --package sn_client --test folders_api
+ - name: Get register
+ run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }}
env:
- SN_LOG: "all"
+ SN_LOG: "v"
+ timeout-minutes: 5
+
+ - name: Edit register
+ run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456
+ env:
+ SN_LOG: "v"
timeout-minutes: 10
- # CLI Acc-Packet files and folders tests against local network
- - name: CLI Acc-Packet files and folders tests
- run: cargo test --release -p sn_cli test_acc_packet -- --nocapture
+ - name: Get register (after edit)
+ run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }}
env:
- SN_LOG: "all"
+ SN_LOG: "v"
+ timeout-minutes: 5
+
+ - name: Create Public Register (writeable by anyone)
+ run: ./target/release/autonomi --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 5
+
+ - name: parse public register address (unix)
+ if: matrix.os != 'windows-latest'
+ run: |
+ PUBLIC_REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output)
+ echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" >> $GITHUB_ENV
+ shell: bash
+
+ - name: parse public register address (win)
+ if: matrix.os == 'windows-latest'
+ run: |
+ $PUBLIC_REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output
+ echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
+ shell: pwsh
+
+ - name: Get Public Register (current key is the owner)
+ run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }}
+ env:
+ SN_LOG: "v"
+ timeout-minutes: 5
+
+ - name: Edit Public Register (current key is the owner)
+ run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222
+ env:
+ SN_LOG: "v"
timeout-minutes: 10
- - name: Start a client to create a register
- run: cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao
+ - name: Delete current register signing key
+ shell: bash
+ run: rm -rf ${{ matrix.safe_path }}/autonomi
+
+ - name: Generate new register signing key
+ run: ./target/release/autonomi --log-output-dest=data-dir register generate-key
+
+ - name: Get Public Register (new signing key is not the owner)
+ run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }}
env:
- SN_LOG: "all"
+ SN_LOG: "v"
timeout-minutes: 2
- - name: Start a client to get a register
- run: cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao
+ - name: Edit Public Register (new signing key is not the owner)
+ run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333
env:
- SN_LOG: "all"
- timeout-minutes: 2
+ SN_LOG: "v"
+ timeout-minutes: 10
- - name: Start a client to edit a register
- run: cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood
+ - name: Get Public Register (new signing key is not the owner)
+ run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }}
env:
- SN_LOG: "all"
+ SN_LOG: "v"
timeout-minutes: 2
- name: Stop the local network and upload logs
@@ -134,31 +244,17 @@ jobs:
run: cargo test --release --lib --bins --no-run
timeout-minutes: 30
- - name: Run CLI tests
- timeout-minutes: 25
- run: cargo test --release --package sn_cli -- --skip test_acc_packet_
-
- - name: Run client tests
- timeout-minutes: 25
- # we do not run the `--tests` here are they are run in the e2e job
- # as they rquire a network
- run: |
- cargo test --release --package sn_client --doc
- cargo test --release --package sn_client --lib
- cargo test --release --package sn_client --bins
- cargo test --release --package sn_client --examples
-
- name: Run node tests
timeout-minutes: 25
run: cargo test --release --package sn_node --lib
- name: Run network tests
timeout-minutes: 25
- run: cargo test --release -p sn_networking --features="open-metrics"
+ run: cargo test --release --package sn_networking --features="open-metrics"
- name: Run protocol tests
timeout-minutes: 25
- run: cargo test --release -p sn_protocol
+ run: cargo test --release --package sn_protocol
- name: Run transfers tests
timeout-minutes: 25
@@ -167,13 +263,12 @@ jobs:
- name: Run logging tests
timeout-minutes: 25
run: cargo test --release --package sn_logging
-
+
- name: Run register tests
- shell: bash
timeout-minutes: 50
+ run: cargo test --release --package sn_registers
env:
PROPTEST_CASES: 512
- run: cargo test --release -p sn_registers
- name: post notification to slack on failure
if: ${{ failure() }}
@@ -183,210 +278,6 @@ jobs:
SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}"
SLACK_TITLE: "Nightly Unit Test Run Failed"
- spend_test:
- name: spend tests against network
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
- steps:
- - uses: actions/checkout@v4
-
- - name: Install Rust
- uses: dtolnay/rust-toolchain@stable
-
- - uses: Swatinem/rust-cache@v2
- continue-on-error: true
-
- - name: Build binaries
- run: cargo build --release --features=local --bin safenode --bin faucet
- timeout-minutes: 30
-
- - name: Build testing executable
- run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run
- env:
- # only set the target dir for windows to bypass the linker issue.
- # happens if we build the node manager via testnet action
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- timeout-minutes: 30
-
- - name: Start a local network
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: start
- interval: 2000
- node-path: target/release/safenode
- faucet-path: target/release/faucet
- platform: ${{ matrix.os }}
- build: true
-
- - name: execute the sequential transfers test
- run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1
- env:
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- SN_LOG: "all"
- timeout-minutes: 10
-
- - name: execute the storage payment tests
- run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1
- env:
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- SN_LOG: "all"
- timeout-minutes: 10
-
- - name: execute the double spend tests
- run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1
- env:
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- timeout-minutes: 25
-
- - name: Small wait to allow reward receipt
- run: sleep 30
- timeout-minutes: 1
-
- - name: Stop the local network and upload logs
- if: always()
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: stop
- log_file_prefix: safe_test_logs_spend
- platform: ${{ matrix.os }}
-
- - name: post notification to slack on failure
- if: ${{ failure() }}
- uses: bryannice/gitactions-slack-notification@2.0.0
- env:
- SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }}
- SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}"
- SLACK_TITLE: "Nightly Spend Test Run Failed"
-
- # runs with increased node count
- spend_simulation:
- name: spend simulation
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
- steps:
- - uses: actions/checkout@v4
-
- - name: Install Rust
- uses: dtolnay/rust-toolchain@stable
-
- - uses: Swatinem/rust-cache@v2
- continue-on-error: true
-
- - name: Build binaries
- run: cargo build --release --features=local --bin safenode --bin faucet
- timeout-minutes: 30
-
- - name: Build testing executable
- run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run
- env:
- # only set the target dir for windows to bypass the linker issue.
- # happens if we build the node manager via testnet action
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- timeout-minutes: 30
-
- - name: Start a local network
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: start
- interval: 2000
- node-count: 50
- node-path: target/release/safenode
- faucet-path: target/release/faucet
- platform: ${{ matrix.os }}
- build: true
-
- - name: execute the spend simulation test
- run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture
- env:
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- timeout-minutes: 25
-
- - name: Small wait to allow reward receipt
- run: sleep 30
- timeout-minutes: 1
-
- - name: Stop the local network and upload logs
- if: always()
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: stop
- log_file_prefix: safe_test_logs_spend_simulation
- platform: ${{ matrix.os }}
-
- - name: post notification to slack on failure
- if: ${{ failure() }}
- uses: bryannice/gitactions-slack-notification@2.0.0
- env:
- SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }}
- SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}"
- SLACK_TITLE: "Nightly Spend Test Run Failed"
-
- token_distribution_test:
- if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
- name: token distribution test
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
- steps:
- - uses: actions/checkout@v4
-
- - name: Install Rust
- uses: dtolnay/rust-toolchain@stable
-
- - uses: Swatinem/rust-cache@v2
-
- - name: Build binaries
- run: cargo build --release --features=local,distribution --bin safenode --bin faucet
- timeout-minutes: 30
-
- - name: Build testing executable
- run: cargo test --release --features=local,distribution --no-run
- env:
- # only set the target dir for windows to bypass the linker issue.
- # happens if we build the node manager via testnet action
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- timeout-minutes: 30
-
- - name: Start a local network
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: start
- interval: 2000
- node-path: target/release/safenode
- faucet-path: target/release/faucet
- platform: ${{ matrix.os }}
- build: true
-
- - name: Check SAFE_PEERS was set
- shell: bash
- run: |
- if [[ -z "$SAFE_PEERS" ]]; then
- echo "The SAFE_PEERS variable has not been set"
- exit 1
- else
- echo "SAFE_PEERS has been set to $SAFE_PEERS"
- fi
-
- - name: execute token_distribution tests
- run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1
- env:
- SN_LOG: "all"
- CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
- timeout-minutes: 25
-
- - name: Stop the local network and upload logs
- if: always()
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: stop
- log_file_prefix: safe_test_logs_token_distribution
- platform: ${{ matrix.os }}
-
churn:
name: Network churning tests
runs-on: ${{ matrix.os }}
@@ -412,7 +303,7 @@ jobs:
continue-on-error: true
- name: Build binaries
- run: cargo build --release --features local --bin safenode --bin faucet
+ run: cargo build --release --features local --bin safenode
timeout-minutes: 30
- name: Build churn tests
@@ -427,14 +318,13 @@ jobs:
uses: maidsafe/sn-local-testnet-action@main
with:
action: start
- interval: 2000
+ enable-evm-testnet: true
node-path: target/release/safenode
- faucet-path: target/release/faucet
platform: ${{ matrix.os }}
build: true
- name: Chunks data integrity during nodes churn (during 10min) (in theory)
- run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture
+ run: cargo test --release -p sn_node --features=local --test data_with_churn -- --nocapture
env:
TEST_DURATION_MINS: 60
TEST_CHURN_CYCLES: 6
@@ -442,7 +332,46 @@ jobs:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 90
- - name: Verify restart of nodes using rg
+ - name: Stop the local network and upload logs
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ log_file_prefix: safe_test_logs_churn
+ platform: ${{ matrix.os }}
+
+
+ - name: Get total node count
+ shell: bash
+ timeout-minutes: 1
+ run: |
+ node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
+ echo "Node dir count is $node_count"
+
+ - name: Get restart of nodes using rg
+ shell: bash
+ timeout-minutes: 1
+ # get the counts, then the specific line, and then the digit count only
+ # then check we have an expected level of restarts
+ # TODO: make this use an env var, or relate to testnet size
+ run: |
+ restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \
+ rg "(\d+) matches" | rg "\d+" -o)
+ echo "Restarted $restart_count nodes"
+
+ - name: Get peers removed from nodes using rg
+ shell: bash
+ timeout-minutes: 1
+ run: |
+ peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \
+ rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; }
+ if [ -z "$peer_removed" ]; then
+ echo "No peer removal count found"
+ exit 1
+ fi
+ echo "PeerRemovedFromRoutingTable $peer_removed times"
+
+ - name: Verify peers removed exceed restarted node counts
shell: bash
timeout-minutes: 1
# get the counts, then the specific line, and then the digit count only
@@ -459,8 +388,6 @@ jobs:
echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count"
exit 1
fi
- node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
- echo "Node dir count is $node_count"
# TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here
# if [ $restart_count -lt $node_count ]; then
@@ -484,14 +411,6 @@ jobs:
exit 1
fi
- - name: Stop the local network and upload logs
- if: always()
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: stop
- log_file_prefix: safe_test_logs_churn
- platform: ${{ matrix.os }}
-
- name: post notification to slack on failure
if: ${{ failure() }}
uses: bryannice/gitactions-slack-notification@2.0.0
@@ -537,7 +456,7 @@ jobs:
continue-on-error: true
- name: Build binaries
- run: cargo build --release --features local --bin safenode --bin faucet
+ run: cargo build --release --features local --bin safenode
timeout-minutes: 30
- name: Build data location and routing table tests
@@ -552,31 +471,38 @@ jobs:
uses: maidsafe/sn-local-testnet-action@main
with:
action: start
- interval: 2000
+ enable-evm-testnet: true
node-path: target/release/safenode
- faucet-path: target/release/faucet
platform: ${{ matrix.os }}
build: true
- name: Verify the Routing table of the nodes
- run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture
+ run: cargo test --release -p sn_node --features=local --test verify_routing_table -- --nocapture
env:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 5
- name: Verify the location of the data on the network
- run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture
+ run: cargo test --release -p sn_node --features=local --test verify_data_location -- --nocapture
env:
SN_LOG: "all"
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 90
- name: Verify the routing tables of the nodes
- run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture
+ run: cargo test --release -p sn_node --features=local --test verify_routing_table -- --nocapture
env:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 5
+ - name: Stop the local network and upload logs
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ log_file_prefix: safe_test_logs_data_location
+ platform: ${{ matrix.os }}
+
- name: Verify restart of nodes using rg
shell: bash
timeout-minutes: 1
@@ -597,14 +523,6 @@ jobs:
node_count=$(ls "${{ matrix.node_data_path }}" | wc -l)
echo "Node dir count is $node_count"
- - name: Stop the local network and upload logs
- if: always()
- uses: maidsafe/sn-local-testnet-action@main
- with:
- action: stop
- log_file_prefix: safe_test_logs_data_location
- platform: ${{ matrix.os }}
-
- name: post notification to slack on failure
if: ${{ failure() }}
uses: bryannice/gitactions-slack-notification@2.0.0
diff --git a/.github/workflows/nightly_wan.yml b/.github/workflows/nightly_wan.yml
index 9c84f58488..e5f4a42511 100644
--- a/.github/workflows/nightly_wan.yml
+++ b/.github/workflows/nightly_wan.yml
@@ -129,7 +129,7 @@ jobs:
path: |
~/.local/share/safe/node/*/logs/*.log*
~/.local/share/safe/*/*/*.log*
- ~/.local/share/safe/client/logs/*/*.log*
+ ~/.local/share/safe/autonomi/logs/*/*.log*
- name: destroy network
if: always()
@@ -235,7 +235,7 @@ jobs:
# path: |
# ~/.local/share/safe/node/*/logs/*.log*
# ~/.local/share/safe/*/*/*.log*
- # ~/.local/share/safe/client/logs/*/*.log*
+ # ~/.local/share/safe/autonomi/logs/*/*.log*
# - name: destroy network
# uses: maidsafe/sn-testnet-control-action/destroy-network@main
@@ -349,7 +349,7 @@ jobs:
# path: |
# ~/.local/share/safe/node/*/logs/*.log*
# ~/.local/share/safe/*/*/*.log*
- # ~/.local/share/safe/client/logs/*/*.log*
+ # ~/.local/share/safe/autonomi/logs/*/*.log*
#
# - name: Stop the WAN network
# if: always()
@@ -555,7 +555,7 @@ jobs:
# path: |
# ~/.local/share/safe/node/*/logs/*.log*
# ~/.local/share/safe/*/*/*.log*
- # ~/.local/share/safe/client/logs/*/*.log*
+ # ~/.local/share/safe/autonomi/logs/*/*.log*
#
# - name: Stop the WAN network
# if: always()
diff --git a/.github/workflows/nightly_wan_churn.yml b/.github/workflows/nightly_wan_churn.yml
index 5101f8fdd6..2cbf72fd8c 100644
--- a/.github/workflows/nightly_wan_churn.yml
+++ b/.github/workflows/nightly_wan_churn.yml
@@ -127,7 +127,7 @@ jobs:
path: |
~/.local/share/safe/node/*/logs/*.log*
~/.local/share/safe/*/*/*.log*
- ~/.local/share/safe/client/logs/*/*.log*
+ ~/.local/share/safe/autonomi/logs/*/*.log*
- name: Stop the WAN network
if: always()
diff --git a/.github/workflows/node_man_tests.yml b/.github/workflows/node_man_tests.yml
index 55cd701cbf..54d6d3d625 100644
--- a/.github/workflows/node_man_tests.yml
+++ b/.github/workflows/node_man_tests.yml
@@ -25,7 +25,7 @@ jobs:
- uses: Swatinem/rust-cache@v2
- name: cargo cache registry, index and build
- uses: actions/cache@v4.0.2
+ uses: actions/cache@v4.1.2
with:
path: |
~/.cargo/registry
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1f4d77a9c6..d68be75785 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,80 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
*When editing this file, please respect a line length of 100.*
+## 2024-10-28
+
+## Autonomi API/CLI
+
+#### Added
+
+- Private data support.
+- Local user data support.
+- Network Vault containing user data encrypted.
+- Archives with Metadata.
+- Prepaid upload support for data_put using receipts.
+
+#### Changed
+
+- Contract token approval amount set to infinite before doing data payments.
+
+### Client
+
+#### Added
+
+- Expose APIs in WASM (e.g. archives, vault and user data within vault).
+- Uploads are not run in parallel.
+- Support for local wallets.
+- Provide `wallet create` command.
+- Provide `wallet balance` command.
+
+#### Changed
+
+- Take metadata from file system and add `uploaded` field for time of upload.
+
+#### Fixed
+
+- Make sure we use the new client path throughout the codebase
+
+### Network
+
+#### Added
+
+- Get range used for store cost and register queries.
+- Re-enabled large_file_upload, memcheck, benchmark CI tests.
+
+#### Changed
+
+- Scratchpad modifications to support multiple data encodings.
+- Registers are now merged at the network level, preventing failures during update and during
+ replication.
+- Libp2p config and get range tweaks reduce intensity of operations. Brings down CPU usage
+ considerably.
+- Libp2pโs native kad bootstrap interval introduced in 0.54.1 is intensive, and as we roll our own,
+ we significantly reduce the kad period to lighten the CPU load.
+- Wipe nodeโs storage dir when restarting for new network
+
+#### Fixed
+
+- Fixes in networking code for WASM compatibility (replacing `std::time` with compatible
+ alternative).
+- Event dropped errors should not happen if the event is not dropped.
+- Reduce outdated connection pruning frequency.
+
+### Node Manager
+
+#### Fixed
+
+- Local node register is cleaned up when --clean flag applied (prevents some errors when register
+ changes).
+
+### Launchpad
+
+#### Fixed
+
+- Status screen is updated after nodes have been reset.
+- Rewards Address is required before starting nodes. User input is required.
+- Spinner does not stop spinning after two minutes when nodes are running.
+
## 2024-10-24
### Network
diff --git a/Cargo.lock b/Cargo.lock
index a5c06f7ce9..dfcaa5e8c7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
[[package]]
name = "alloy"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2"
+checksum = "d8cbebb817e6ada1abb27e642592a39eebc963eb0b9e78f66c467549f3903770"
dependencies = [
"alloy-consensus",
"alloy-contract",
@@ -151,9 +151,9 @@ dependencies = [
[[package]]
name = "alloy-consensus"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e"
+checksum = "cdf02dfacfc815214f9b54ff50d54900ba527a68fd73e2c5637ced3460005045"
dependencies = [
"alloy-eips",
"alloy-primitives",
@@ -167,9 +167,9 @@ dependencies = [
[[package]]
name = "alloy-contract"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad"
+checksum = "d45354c6946d064827d3b85041876aad9490b634f1761139934f8b1f65686b09"
dependencies = [
"alloy-dyn-abi",
"alloy-json-abi",
@@ -228,20 +228,21 @@ dependencies = [
[[package]]
name = "alloy-eip7702"
-version = "0.1.1"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04"
+checksum = "c15873ee28dfe5a1aeddd762483bc7f378b465ec49bdce8165c4c46b4f55cb0a"
dependencies = [
"alloy-primitives",
"alloy-rlp",
+ "derive_more",
"serde",
]
[[package]]
name = "alloy-eips"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85"
+checksum = "769da342b6bcd945013925ef4c40763cc82f11e002c60702dba8b444bb60e5a7"
dependencies = [
"alloy-eip2930",
"alloy-eip7702",
@@ -257,9 +258,9 @@ dependencies = [
[[package]]
name = "alloy-genesis"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3"
+checksum = "c698ce0ada980b17f0323e1a28c7da8a2e9abc6dff5be9ee33d1525b28ac46b6"
dependencies = [
"alloy-primitives",
"alloy-serde",
@@ -280,9 +281,9 @@ dependencies = [
[[package]]
name = "alloy-json-rpc"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7"
+checksum = "c1050e1d65524c030b17442b6546b564da51fdab7f71bd534b001ba65f2ebb16"
dependencies = [
"alloy-primitives",
"alloy-sol-types",
@@ -294,9 +295,9 @@ dependencies = [
[[package]]
name = "alloy-network"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd"
+checksum = "da34a18446a27734473af3d77eb21c5ebbdf97ea8eb65c39c0b50916bc659023"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -315,9 +316,9 @@ dependencies = [
[[package]]
name = "alloy-network-primitives"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd"
+checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -328,9 +329,9 @@ dependencies = [
[[package]]
name = "alloy-node-bindings"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454"
+checksum = "439fc6a933b9f8e8b272a8cac35dbeabaf2b2eaf9590482bebedb5782153118e"
dependencies = [
"alloy-genesis",
"alloy-primitives",
@@ -372,9 +373,9 @@ dependencies = [
[[package]]
name = "alloy-provider"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6"
+checksum = "c45dbc0e3630becef9e988b69d43339f68d67e32a854e3c855bc28bd5031895b"
dependencies = [
"alloy-chains",
"alloy-consensus",
@@ -387,6 +388,7 @@ dependencies = [
"alloy-rpc-client",
"alloy-rpc-types-anvil",
"alloy-rpc-types-eth",
+ "alloy-signer",
"alloy-signer-local",
"alloy-transport",
"alloy-transport-http",
@@ -397,14 +399,17 @@ dependencies = [
"futures",
"futures-utils-wasm",
"lru",
+ "parking_lot",
"pin-project",
"reqwest 0.12.7",
+ "schnellru",
"serde",
"serde_json",
"thiserror",
"tokio",
"tracing",
"url",
+ "wasmtimer",
]
[[package]]
@@ -431,9 +436,9 @@ dependencies = [
[[package]]
name = "alloy-rpc-client"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc"
+checksum = "917e5504e4f8f7e39bdc322ff81589ed54c1e462240adaeb58162c2d986a5a2b"
dependencies = [
"alloy-json-rpc",
"alloy-primitives",
@@ -449,13 +454,14 @@ dependencies = [
"tower 0.5.1",
"tracing",
"url",
+ "wasmtimer",
]
[[package]]
name = "alloy-rpc-types"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06"
+checksum = "07c7eb2dc6db1dd41e5e7bd2b98a38813854efc30e034afd90d1e420e7f3de2b"
dependencies = [
"alloy-primitives",
"alloy-rpc-types-anvil",
@@ -466,9 +472,9 @@ dependencies = [
[[package]]
name = "alloy-rpc-types-anvil"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07"
+checksum = "2640928d9b1d43bb1cec7a0d615e10c2b407c5bd8ff1fcbe49e6318a2b62d731"
dependencies = [
"alloy-primitives",
"alloy-serde",
@@ -477,9 +483,9 @@ dependencies = [
[[package]]
name = "alloy-rpc-types-eth"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87"
+checksum = "e855b0daccf2320ba415753c3fed422abe9d3ad5d77b2d6cafcc9bcf32fe387f"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -496,9 +502,9 @@ dependencies = [
[[package]]
name = "alloy-serde"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600"
+checksum = "35c2661ca6785add8fc37aff8005439c806ffad58254c19939c6f59ac0d6596e"
dependencies = [
"alloy-primitives",
"serde",
@@ -507,9 +513,9 @@ dependencies = [
[[package]]
name = "alloy-signer"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504"
+checksum = "67eca011160d18a7dc6d8cdc1e8dc13e2e86c908f8e41b02aa76e429d6fe7085"
dependencies = [
"alloy-primitives",
"async-trait",
@@ -521,9 +527,9 @@ dependencies = [
[[package]]
name = "alloy-signer-local"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f"
+checksum = "1c54b195a6ee5a83f32e7c697b4e6b565966737ed5a2ef9176bbbb39f720d023"
dependencies = [
"alloy-consensus",
"alloy-network",
@@ -610,9 +616,9 @@ dependencies = [
[[package]]
name = "alloy-transport"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904"
+checksum = "3e4a136e733f55fef0870b81e1f8f1db28e78973d1b1ae5a5df642ba39538a07"
dependencies = [
"alloy-json-rpc",
"base64 0.22.1",
@@ -626,13 +632,14 @@ dependencies = [
"tracing",
"url",
"wasm-bindgen-futures",
+ "wasmtimer",
]
[[package]]
name = "alloy-transport-http"
-version = "0.4.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212"
+checksum = "1a6b358a89b6d107b92d09b61a61fbc04243942182709752c796f4b29402cead"
dependencies = [
"alloy-json-rpc",
"alloy-transport",
@@ -1069,9 +1076,12 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "autonomi"
-version = "0.2.0"
+version = "0.2.2"
dependencies = [
+ "alloy",
"bip39",
+ "blst",
+ "blstrs 0.7.1",
"blsttc",
"bytes",
"console_error_panic_hook",
@@ -1113,23 +1123,32 @@ dependencies = [
[[package]]
name = "autonomi-cli"
-version = "0.1.2"
+version = "0.1.3"
dependencies = [
"autonomi",
"clap",
"color-eyre",
+ "const-hex",
"criterion",
"dirs-next",
"eyre",
+ "hex 0.4.3",
"indicatif",
+ "prettytable",
"rand 0.8.5",
"rayon",
+ "ring 0.17.8",
+ "rpassword",
+ "serde",
+ "serde_json",
"sn_build_info",
"sn_logging",
"sn_peers_acquisition",
"tempfile",
+ "thiserror",
"tokio",
"tracing",
+ "walkdir",
]
[[package]]
@@ -1412,7 +1431,23 @@ dependencies = [
"byte-slice-cast",
"ff 0.12.1",
"group 0.12.1",
- "pairing",
+ "pairing 0.22.0",
+ "rand_core 0.6.4",
+ "serde",
+ "subtle",
+]
+
+[[package]]
+name = "blstrs"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29"
+dependencies = [
+ "blst",
+ "byte-slice-cast",
+ "ff 0.13.0",
+ "group 0.13.0",
+ "pairing 0.23.0",
"rand_core 0.6.4",
"serde",
"subtle",
@@ -1425,12 +1460,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1186a39763321a0b73d1a10aa4fc067c5d042308509e8f6cc31d2c2a7ac61ac2"
dependencies = [
"blst",
- "blstrs",
+ "blstrs 0.6.2",
"ff 0.12.1",
"group 0.12.1",
"hex 0.4.3",
"hex_fmt",
- "pairing",
+ "pairing 0.22.0",
"rand 0.8.5",
"rand_chacha 0.3.1",
"serde",
@@ -1887,7 +1922,7 @@ version = "0.15.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb"
dependencies = [
- "encode_unicode",
+ "encode_unicode 0.3.6",
"lazy_static",
"libc",
"unicode-width",
@@ -1906,9 +1941,9 @@ dependencies = [
[[package]]
name = "const-hex"
-version = "1.12.0"
+version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6"
+checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -2184,6 +2219,27 @@ dependencies = [
"subtle",
]
+[[package]]
+name = "csv"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe"
+dependencies = [
+ "csv-core",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
+dependencies = [
+ "memchr",
+]
+
[[package]]
name = "ctr"
version = "0.9.2"
@@ -2677,6 +2733,12 @@ version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
+[[package]]
+name = "encode_unicode"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"
+
[[package]]
name = "encoding_rs"
version = "0.8.34"
@@ -2747,7 +2809,7 @@ dependencies = [
[[package]]
name = "evm_testnet"
-version = "0.1.1"
+version = "0.1.2"
dependencies = [
"clap",
"dirs-next",
@@ -2758,7 +2820,7 @@ dependencies = [
[[package]]
name = "evmlib"
-version = "0.1.1"
+version = "0.1.2"
dependencies = [
"alloy",
"dirs-next",
@@ -2838,6 +2900,7 @@ version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449"
dependencies = [
+ "bitvec",
"rand_core 0.6.4",
"subtle",
]
@@ -3771,7 +3834,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
dependencies = [
"ff 0.13.0",
+ "rand 0.8.5",
"rand_core 0.6.4",
+ "rand_xorshift 0.3.0",
"subtle",
]
@@ -5565,7 +5630,7 @@ dependencies = [
[[package]]
name = "nat-detection"
-version = "0.2.8"
+version = "0.2.9"
dependencies = [
"clap",
"clap-verbosity-flag",
@@ -5682,7 +5747,7 @@ dependencies = [
[[package]]
name = "node-launchpad"
-version = "0.4.1"
+version = "0.4.2"
dependencies = [
"atty",
"better-panic",
@@ -6074,6 +6139,15 @@ dependencies = [
"group 0.12.1",
]
+[[package]]
+name = "pairing"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f"
+dependencies = [
+ "group 0.13.0",
+]
+
[[package]]
name = "pairing-plus"
version = "0.19.0"
@@ -6483,6 +6557,20 @@ dependencies = [
"yansi",
]
+[[package]]
+name = "prettytable"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46480520d1b77c9a3482d39939fcf96831537a250ec62d4fd8fbdf8e0302e781"
+dependencies = [
+ "csv",
+ "encode_unicode 1.0.0",
+ "is-terminal",
+ "lazy_static",
+ "term",
+ "unicode-width",
+]
+
[[package]]
name = "primitive-types"
version = "0.12.2"
@@ -7325,6 +7413,17 @@ dependencies = [
"serde_derive",
]
+[[package]]
+name = "rpassword"
+version = "7.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f"
+dependencies = [
+ "libc",
+ "rtoolbox",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "rtnetlink"
version = "0.10.1"
@@ -7340,6 +7439,16 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "rtoolbox"
+version = "0.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e"
+dependencies = [
+ "libc",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "ruint"
version = "1.12.3"
@@ -7567,6 +7676,17 @@ dependencies = [
"winapi-util",
]
+[[package]]
+name = "schnellru"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367"
+dependencies = [
+ "ahash",
+ "cfg-if",
+ "hashbrown 0.13.2",
+]
+
[[package]]
name = "scoped-tls"
version = "1.0.1"
@@ -7763,9 +7883,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.128"
+version = "1.0.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
+checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
dependencies = [
"itoa",
"memchr",
@@ -8027,7 +8147,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "sn-node-manager"
-version = "0.11.0"
+version = "0.11.1"
dependencies = [
"assert_cmd",
"assert_fs",
@@ -8103,7 +8223,7 @@ dependencies = [
[[package]]
name = "sn_build_info"
-version = "0.1.16"
+version = "0.1.17"
dependencies = [
"chrono",
"tracing",
@@ -8145,7 +8265,7 @@ dependencies = [
[[package]]
name = "sn_evm"
-version = "0.1.1"
+version = "0.1.2"
dependencies = [
"custom_debug",
"evmlib",
@@ -8168,7 +8288,7 @@ dependencies = [
[[package]]
name = "sn_logging"
-version = "0.2.37"
+version = "0.2.38"
dependencies = [
"chrono",
"color-eyre",
@@ -8193,7 +8313,7 @@ dependencies = [
[[package]]
name = "sn_metrics"
-version = "0.1.17"
+version = "0.1.18"
dependencies = [
"clap",
"color-eyre",
@@ -8207,7 +8327,7 @@ dependencies = [
[[package]]
name = "sn_networking"
-version = "0.19.0"
+version = "0.19.1"
dependencies = [
"aes-gcm-siv",
"async-trait",
@@ -8252,7 +8372,7 @@ dependencies = [
[[package]]
name = "sn_node"
-version = "0.112.1"
+version = "0.112.2"
dependencies = [
"assert_fs",
"async-trait",
@@ -8309,7 +8429,7 @@ dependencies = [
[[package]]
name = "sn_node_rpc_client"
-version = "0.6.32"
+version = "0.6.33"
dependencies = [
"assert_fs",
"async-trait",
@@ -8336,7 +8456,7 @@ dependencies = [
[[package]]
name = "sn_peers_acquisition"
-version = "0.5.4"
+version = "0.5.5"
dependencies = [
"clap",
"lazy_static",
@@ -8352,7 +8472,7 @@ dependencies = [
[[package]]
name = "sn_protocol"
-version = "0.17.12"
+version = "0.17.13"
dependencies = [
"blsttc",
"bytes",
@@ -8382,7 +8502,7 @@ dependencies = [
[[package]]
name = "sn_registers"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"blsttc",
"crdts",
@@ -8399,7 +8519,7 @@ dependencies = [
[[package]]
name = "sn_service_management"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-trait",
"dirs-next",
@@ -8425,7 +8545,7 @@ dependencies = [
[[package]]
name = "sn_transfers"
-version = "0.20.0"
+version = "0.20.1"
dependencies = [
"assert_fs",
"blsttc",
@@ -8740,6 +8860,17 @@ dependencies = [
"windows-sys 0.59.0",
]
+[[package]]
+name = "term"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f"
+dependencies = [
+ "dirs-next",
+ "rustversion",
+ "winapi",
+]
+
[[package]]
name = "terminal_size"
version = "0.3.0"
@@ -8758,7 +8889,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "test_utils"
-version = "0.4.8"
+version = "0.4.9"
dependencies = [
"bytes",
"color-eyre",
@@ -8902,7 +9033,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "token_supplies"
-version = "0.1.55"
+version = "0.1.56"
dependencies = [
"dirs-next",
"reqwest 0.11.27",
diff --git a/README.md b/README.md
index 48751adf0e..67ea01d426 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ Libp2p.
### For Users
-- [CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line
+- [CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi-cli/README.md) The Command Line
Interface, allowing users to interact with the network from their terminal.
- [Node](https://github.com/maidsafe//safe_network/blob/main/sn_node/README.md) The backbone of the
safe network. Nodes can be run on commodity hardware and provide storage space and validation of
@@ -32,10 +32,10 @@ You should build from the `stable` branch, as follows:
```
git checkout stable
-export FOUNDATION_PK=88a82d718d16dccc839188eddc9a46cb216667c940cd46285199458c919a170a55490db09763ae216ed25e9db78c3576
-export GENESIS_PK=aa3526db2dbc43998e0b541b8455e2ce9dd4f1cad80090e671da16e3cd11cd5e3550f74c3cefd09ad253d93cacae2320
-export NETWORK_ROYALTIES_PK=8b5463a2c8142959a7b7cfd9295587812eb07ccbe13a85865503c8004eeeb6889ccace3588dcf9f7396784d9ee48f4d5
-export PAYMENT_FORWARD_PK=87d5b511a497183c945df63ab8790a4b94cfe452d00bfbdb39e41ee861384fe0de716a224da1c6fd11356de49877dfc2
+export FOUNDATION_PK=b20c916c7a28707018292f06dfdb66ab88ebcbad9c78d18135e843a91b1d66b875b24d2c27d8d1ad4637c2d5811896fe
+export GENESIS_PK=93f7355906fa8c1a639bac80f4619dbb4cf5f71c47827d1ff2c30f0d133f6b841859662cbf7e0bbceca0eb0f521f6ebc
+export NETWORK_ROYALTIES_PK=af451aa34a0d16c50eb217b91ab6b2ca75ef43b9c20449384ff1e90dbf8477351499cca985828e33b208805dadc80c63
+export PAYMENT_FORWARD_PK=adc6401588af49c60af6717a60546207abddb4e150014b4ab6c407ef6d7b3d3899b8892a91ab23042378b7b285e655fc
cargo build --release --features=network-contacts --bin safenode
```
@@ -69,7 +69,7 @@ cargo build --release --features=network-contacts --bin safenode
- [Autonomi API](https://github.com/maidsafe/safe_network/blob/main/autonomi/README.md) The client APIs
allowing use of the Autonomi Network to users and developers.
-- [Autonomi CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line
+- [Autonomi CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi-cli/README.md) The Command Line
Interface, allowing users to interact with the network from their terminal.
- [Node](https://github.com/maidsafe/safe_network/blob/main/sn_node/README.md) The backbone of the
autonomi network. Nodes can be run on commodity hardware and run the Network.
diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml
index 05208d3325..fb49e41f33 100644
--- a/autonomi-cli/Cargo.toml
+++ b/autonomi-cli/Cargo.toml
@@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "]
name = "autonomi-cli"
description = "Autonomi CLI"
license = "GPL-3.0"
-version = "0.1.2"
+version = "0.1.3"
edition = "2021"
homepage = "https://maidsafe.net"
readme = "README.md"
@@ -24,16 +24,22 @@ name = "files"
harness = false
[dependencies]
-autonomi = { path = "../autonomi", version = "0.2.0", features = [
+autonomi = { path = "../autonomi", version = "0.2.2", features = [
"data",
"fs",
+ "vault",
"registers",
"loud",
] }
clap = { version = "4.2.1", features = ["derive"] }
color-eyre = "~0.6"
+const-hex = "1.13.1"
dirs-next = "~2.0.0"
+prettytable = "0.10.0"
+thiserror = "1.0"
indicatif = { version = "0.17.5", features = ["tokio"] }
+rand = { version = "~0.8.5", features = ["small_rng"] }
+rpassword = "7.0"
tokio = { version = "1.32.0", features = [
"io-util",
"macros",
@@ -44,12 +50,17 @@ tokio = { version = "1.32.0", features = [
"fs",
] }
tracing = { version = "~0.1.26" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" }
-sn_build_info = { path = "../sn_build_info", version = "0.1.16" }
-sn_logging = { path = "../sn_logging", version = "0.2.37" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" }
+sn_build_info = { path = "../sn_build_info", version = "0.1.17" }
+sn_logging = { path = "../sn_logging", version = "0.2.38" }
+walkdir = "2.5.0"
+serde_json = "1.0.132"
+serde = "1.0.210"
+hex = "0.4.3"
+ring = "0.17.8"
[dev-dependencies]
-autonomi = { path = "../autonomi", version = "0.2.0", features = [
+autonomi = { path = "../autonomi", version = "0.2.2", features = [
"data",
"fs",
] }
diff --git a/autonomi-cli/README.md b/autonomi-cli/README.md
index 8bc2277655..6da4930612 100644
--- a/autonomi-cli/README.md
+++ b/autonomi-cli/README.md
@@ -1,7 +1,7 @@
# A CLI for the Autonomi Network
```
-Usage: autonomi_cli [OPTIONS]
+Usage: autonomi [OPTIONS]
Commands:
file Operations related to file handling
diff --git a/autonomi-cli/benches/files.rs b/autonomi-cli/benches/files.rs
index f545936334..4b4794c16e 100644
--- a/autonomi-cli/benches/files.rs
+++ b/autonomi-cli/benches/files.rs
@@ -99,7 +99,7 @@ fn get_cli_path() -> PathBuf {
path.push("target");
}
path.push("release");
- path.push("autonomi_cli");
+ path.push("autonomi");
path
}
diff --git a/autonomi-cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs
index af0db16c2c..9233507264 100644
--- a/autonomi-cli/src/access/data_dir.rs
+++ b/autonomi-cli/src/access/data_dir.rs
@@ -6,14 +6,23 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
-use color_eyre::eyre::{eyre, Context, Result};
+use color_eyre::{
+ eyre::{eyre, Context, Result},
+ Section,
+};
use std::path::PathBuf;
pub fn get_client_data_dir_path() -> Result {
let mut home_dirs = dirs_next::data_dir()
.ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?;
home_dirs.push("safe");
- home_dirs.push("client");
- std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?;
+ home_dirs.push("autonomi");
+ std::fs::create_dir_all(home_dirs.as_path())
+ .wrap_err("Failed to create data dir")
+ .with_suggestion(|| {
+ format!(
+ "make sure you have the correct permissions to access the data dir: {home_dirs:?}"
+ )
+ })?;
Ok(home_dirs)
}
diff --git a/autonomi-cli/src/access/keys.rs b/autonomi-cli/src/access/keys.rs
index 18310f4831..cfaa5284b7 100644
--- a/autonomi-cli/src/access/keys.rs
+++ b/autonomi-cli/src/access/keys.rs
@@ -6,9 +6,11 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
+use crate::wallet::load_wallet_private_key;
use autonomi::client::registers::RegisterSecretKey;
+use autonomi::client::vault::VaultSecretKey;
use autonomi::{get_evm_network_from_env, Wallet};
-use color_eyre::eyre::{Context, Result};
+use color_eyre::eyre::{eyre, Context, Result};
use color_eyre::Section;
use std::env;
use std::fs;
@@ -17,13 +19,12 @@ use std::path::PathBuf;
const SECRET_KEY_ENV: &str = "SECRET_KEY";
const REGISTER_SIGNING_KEY_ENV: &str = "REGISTER_SIGNING_KEY";
-const SECRET_KEY_FILE: &str = "secret_key";
const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key";
/// EVM wallet
-pub fn load_evm_wallet() -> Result {
+pub fn load_evm_wallet_from_env() -> Result {
let secret_key =
- get_secret_key().wrap_err("The secret key is required to perform this action")?;
+ get_secret_key_from_env().wrap_err("The secret key is required to perform this action")?;
let network = get_evm_network_from_env()?;
let wallet = Wallet::new_from_private_key(network, &secret_key)
.wrap_err("Failed to load EVM wallet from key")?;
@@ -31,24 +32,16 @@ pub fn load_evm_wallet() -> Result {
}
/// EVM wallet private key
-pub fn get_secret_key() -> Result {
- // try env var first
- let why_env_failed = match env::var(SECRET_KEY_ENV) {
- Ok(key) => return Ok(key),
- Err(e) => e,
- };
-
- // try from data dir
- let dir = super::data_dir::get_client_data_dir_path()
- .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir"))
- .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var"))?;
+pub fn get_secret_key_from_env() -> Result {
+ env::var(SECRET_KEY_ENV).wrap_err(eyre!(
+ "make sure you've provided the {SECRET_KEY_ENV} env var"
+ ))
+}
- // load the key from file
- let key_path = dir.join(SECRET_KEY_FILE);
- fs::read_to_string(&key_path)
- .wrap_err("Failed to read secret key from file")
- .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var or have the key in a file at {key_path:?}"))
- .with_suggestion(|| "the secret key should be a hex encoded string of your evm wallet private key")
+pub fn get_vault_secret_key() -> Result {
+ let secret_key = load_wallet_private_key()?;
+ autonomi::client::vault::derive_vault_key(&secret_key)
+ .wrap_err("Failed to derive vault secret key from EVM secret key")
}
pub fn create_register_signing_key_file(key: RegisterSecretKey) -> Result {
diff --git a/autonomi-cli/src/access/mod.rs b/autonomi-cli/src/access/mod.rs
index ac80eeca88..327dc6db51 100644
--- a/autonomi-cli/src/access/mod.rs
+++ b/autonomi-cli/src/access/mod.rs
@@ -9,3 +9,4 @@
pub mod data_dir;
pub mod keys;
pub mod network;
+pub mod user_data;
diff --git a/autonomi-cli/src/access/user_data.rs b/autonomi-cli/src/access/user_data.rs
new file mode 100644
index 0000000000..57deb85785
--- /dev/null
+++ b/autonomi-cli/src/access/user_data.rs
@@ -0,0 +1,177 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use std::collections::HashMap;
+
+use autonomi::client::{
+ address::{addr_to_str, str_to_addr},
+ archive::ArchiveAddr,
+ archive_private::PrivateArchiveAccess,
+ registers::{RegisterAddress, RegisterSecretKey},
+ vault::UserData,
+};
+use color_eyre::eyre::Result;
+
+use super::{
+ data_dir::get_client_data_dir_path,
+ keys::{create_register_signing_key_file, get_register_signing_key},
+};
+
+use serde::{Deserialize, Serialize};
+
+#[derive(Serialize, Deserialize)]
+struct PrivateFileArchive {
+ name: String,
+ secret_access: String,
+}
+
+pub fn get_local_user_data() -> Result {
+ let register_sk = get_register_signing_key().map(|k| k.to_hex()).ok();
+ let registers = get_local_registers()?;
+ let file_archives = get_local_public_file_archives()?;
+ let private_file_archives = get_local_private_file_archives()?;
+
+ let user_data = UserData {
+ register_sk,
+ registers,
+ file_archives,
+ private_file_archives,
+ };
+ Ok(user_data)
+}
+
+pub fn get_local_private_file_archives() -> Result> {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let private_file_archives_path = user_data_path.join("private_file_archives");
+ std::fs::create_dir_all(&private_file_archives_path)?;
+
+ let mut private_file_archives = HashMap::new();
+ for entry in walkdir::WalkDir::new(private_file_archives_path)
+ .min_depth(1)
+ .max_depth(1)
+ {
+ let entry = entry?;
+ let file_content = std::fs::read_to_string(entry.path())?;
+ let private_file_archive: PrivateFileArchive = serde_json::from_str(&file_content)?;
+ let private_file_archive_access =
+ PrivateArchiveAccess::from_hex(&private_file_archive.secret_access)?;
+ private_file_archives.insert(private_file_archive_access, private_file_archive.name);
+ }
+ Ok(private_file_archives)
+}
+
+pub fn get_local_private_archive_access(local_addr: &str) -> Result {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let private_file_archives_path = user_data_path.join("private_file_archives");
+ let file_path = private_file_archives_path.join(local_addr);
+ let file_content = std::fs::read_to_string(file_path)?;
+ let private_file_archive: PrivateFileArchive = serde_json::from_str(&file_content)?;
+ let private_file_archive_access =
+ PrivateArchiveAccess::from_hex(&private_file_archive.secret_access)?;
+ Ok(private_file_archive_access)
+}
+
+pub fn get_local_registers() -> Result> {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let registers_path = user_data_path.join("registers");
+ std::fs::create_dir_all(®isters_path)?;
+
+ let mut registers = HashMap::new();
+ for entry in walkdir::WalkDir::new(registers_path)
+ .min_depth(1)
+ .max_depth(1)
+ {
+ let entry = entry?;
+ let file_name = entry.file_name().to_string_lossy();
+ let register_address = RegisterAddress::from_hex(&file_name)?;
+ let file_content = std::fs::read_to_string(entry.path())?;
+ let register_name = file_content;
+ registers.insert(register_address, register_name);
+ }
+ Ok(registers)
+}
+
+pub fn get_local_public_file_archives() -> Result> {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let file_archives_path = user_data_path.join("file_archives");
+ std::fs::create_dir_all(&file_archives_path)?;
+
+ let mut file_archives = HashMap::new();
+ for entry in walkdir::WalkDir::new(file_archives_path)
+ .min_depth(1)
+ .max_depth(1)
+ {
+ let entry = entry?;
+ let file_name = entry.file_name().to_string_lossy();
+ let file_archive_address = str_to_addr(&file_name)?;
+ let file_archive_name = std::fs::read_to_string(entry.path())?;
+ file_archives.insert(file_archive_address, file_archive_name);
+ }
+ Ok(file_archives)
+}
+
+pub fn write_local_user_data(user_data: &UserData) -> Result<()> {
+ if let Some(register_key) = &user_data.register_sk {
+ let sk = RegisterSecretKey::from_hex(register_key)?;
+ create_register_signing_key_file(sk)?;
+ }
+
+ for (register, name) in user_data.registers.iter() {
+ write_local_register(register, name)?;
+ }
+
+ for (archive, name) in user_data.file_archives.iter() {
+ write_local_public_file_archive(addr_to_str(*archive), name)?;
+ }
+
+ for (archive, name) in user_data.private_file_archives.iter() {
+ write_local_private_file_archive(archive.to_hex(), archive.address(), name)?;
+ }
+
+ Ok(())
+}
+
+pub fn write_local_register(register: &RegisterAddress, name: &str) -> Result<()> {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let registers_path = user_data_path.join("registers");
+ std::fs::create_dir_all(®isters_path)?;
+ std::fs::write(registers_path.join(register.to_hex()), name)?;
+ Ok(())
+}
+
+pub fn write_local_public_file_archive(archive: String, name: &str) -> Result<()> {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let file_archives_path = user_data_path.join("file_archives");
+ std::fs::create_dir_all(&file_archives_path)?;
+ std::fs::write(file_archives_path.join(archive), name)?;
+ Ok(())
+}
+
+pub fn write_local_private_file_archive(
+ archive: String,
+ local_addr: String,
+ name: &str,
+) -> Result<()> {
+ let data_dir = get_client_data_dir_path()?;
+ let user_data_path = data_dir.join("user_data");
+ let private_file_archives_path = user_data_path.join("private_file_archives");
+ std::fs::create_dir_all(&private_file_archives_path)?;
+ let file_name = local_addr;
+ let content = serde_json::to_string(&PrivateFileArchive {
+ name: name.to_string(),
+ secret_access: archive,
+ })?;
+ std::fs::write(private_file_archives_path.join(file_name), content)?;
+ Ok(())
+}
diff --git a/autonomi-cli/src/actions/connect.rs b/autonomi-cli/src/actions/connect.rs
index 9eccb3bbfb..cfe971d14e 100644
--- a/autonomi-cli/src/actions/connect.rs
+++ b/autonomi-cli/src/actions/connect.rs
@@ -24,10 +24,12 @@ pub async fn connect_to_network(peers: Vec) -> Result {
match Client::connect(&peers).await {
Ok(client) => {
+ info!("Connected to the Network");
progress_bar.finish_with_message("Connected to the Network");
Ok(client)
}
Err(e) => {
+ error!("Failed to connect to the network: {e}");
progress_bar.finish_with_message("Failed to connect to the network");
bail!("Failed to connect to the network: {e}")
}
diff --git a/autonomi-cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs
index ba004930e3..ff737ac2c1 100644
--- a/autonomi-cli/src/actions/download.rs
+++ b/autonomi-cli/src/actions/download.rs
@@ -7,22 +7,94 @@
// permissions and limitations relating to use of the SAFE Network Software.
use super::get_progress_bar;
-use autonomi::{client::address::str_to_addr, Client};
-use color_eyre::eyre::{eyre, Context, Result};
+use autonomi::{
+ client::{address::str_to_addr, archive::ArchiveAddr, archive_private::PrivateArchiveAccess},
+ Client,
+};
+use color_eyre::{
+ eyre::{eyre, Context, Result},
+ Section,
+};
use std::path::PathBuf;
pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> {
- let address = str_to_addr(addr).wrap_err("Failed to parse data address")?;
+ let public_address = str_to_addr(addr).ok();
+ let private_address = crate::user_data::get_local_private_archive_access(addr)
+ .inspect_err(|e| error!("Failed to get private archive access: {e}"))
+ .ok();
+
+ match (public_address, private_address) {
+ (Some(public_address), _) => download_public(addr, public_address, dest_path, client).await,
+ (_, Some(private_address)) => download_private(addr, private_address, dest_path, client).await,
+ _ => Err(eyre!("Failed to parse data address {addr}"))
+ .with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7")
+ .with_suggestion(|| "Private addresses look like this: 1358645341480028172")
+ .with_suggestion(|| "Try the `file list` command to get addresses you have access to"),
+ }
+}
+
+async fn download_private(
+ addr: &str,
+ private_address: PrivateArchiveAccess,
+ dest_path: &str,
+ client: &mut Client,
+) -> Result<()> {
+ let archive = client
+ .private_archive_get(private_address)
+ .await
+ .wrap_err("Failed to fetch data from address")?;
+
+ let progress_bar = get_progress_bar(archive.iter().count() as u64)?;
+ let mut all_errs = vec![];
+ for (path, access, _meta) in archive.iter() {
+ progress_bar.println(format!("Fetching file: {path:?}..."));
+ let bytes = match client.private_data_get(access.clone()).await {
+ Ok(bytes) => bytes,
+ Err(e) => {
+ let err = format!("Failed to fetch file {path:?}: {e}");
+ all_errs.push(err);
+ continue;
+ }
+ };
+
+ let path = PathBuf::from(dest_path).join(path);
+ let here = PathBuf::from(".");
+ let parent = path.parent().unwrap_or_else(|| &here);
+ std::fs::create_dir_all(parent)?;
+ std::fs::write(path, bytes)?;
+ progress_bar.clone().inc(1);
+ }
+ progress_bar.finish_and_clear();
+
+ if all_errs.is_empty() {
+ info!("Successfully downloaded private data with local address: {addr}");
+ println!("Successfully downloaded private data with local address: {addr}");
+ Ok(())
+ } else {
+ let err_no = all_errs.len();
+ eprintln!("{err_no} errors while downloading private data with local address: {addr}");
+ eprintln!("{all_errs:#?}");
+ error!("Errors while downloading private data with local address {addr}: {all_errs:#?}");
+ Err(eyre!("Errors while downloading private data"))
+ }
+}
+
+async fn download_public(
+ addr: &str,
+ address: ArchiveAddr,
+ dest_path: &str,
+ client: &mut Client,
+) -> Result<()> {
let archive = client
.archive_get(address)
.await
.wrap_err("Failed to fetch data from address")?;
- let progress_bar = get_progress_bar(archive.map.len() as u64)?;
+ let progress_bar = get_progress_bar(archive.iter().count() as u64)?;
let mut all_errs = vec![];
- for (path, addr) in archive.map {
+ for (path, addr, _meta) in archive.iter() {
progress_bar.println(format!("Fetching file: {path:?}..."));
- let bytes = match client.data_get(addr).await {
+ let bytes = match client.data_get(*addr).await {
Ok(bytes) => bytes,
Err(e) => {
let err = format!("Failed to fetch file {path:?}: {e}");
@@ -41,12 +113,14 @@ pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Resul
progress_bar.finish_and_clear();
if all_errs.is_empty() {
+ info!("Successfully downloaded data at: {addr}");
println!("Successfully downloaded data at: {addr}");
Ok(())
} else {
let err_no = all_errs.len();
eprintln!("{err_no} errors while downloading data at: {addr}");
eprintln!("{all_errs:#?}");
+ error!("Errors while downloading data at {addr}: {all_errs:#?}");
Err(eyre!("Errors while downloading data"))
}
}
diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs
index bb718df43a..663898b6ea 100644
--- a/autonomi-cli/src/commands.rs
+++ b/autonomi-cli/src/commands.rs
@@ -9,6 +9,7 @@
mod file;
mod register;
mod vault;
+mod wallet;
use clap::Subcommand;
use color_eyre::Result;
@@ -34,6 +35,12 @@ pub enum SubCmd {
#[command(subcommand)]
command: VaultCmd,
},
+
+ /// Operations related to wallet management.
+ Wallet {
+ #[command(subcommand)]
+ command: WalletCmd,
+ },
}
#[derive(Subcommand, Debug)]
@@ -44,10 +51,13 @@ pub enum FileCmd {
file: String,
},
- /// Upload a file and pay for it.
+ /// Upload a file and pay for it. Data on the Network is private by default.
Upload {
/// The file to upload.
file: String,
+ /// Upload the file as public. Everyone can see public data on the Network.
+ #[arg(short, long)]
+ public: bool,
},
/// Download a file from the given address.
@@ -123,10 +133,42 @@ pub enum VaultCmd {
Cost,
/// Create a vault at a deterministic address based on your `SECRET_KEY`.
+ /// Pushing an encrypted backup of your local user data to the network
Create,
+ /// Load an existing vault from the network.
+ /// Use this when loading your user data to a new device.
+ /// You need to have your original `SECRET_KEY` to load the vault.
+ Load,
+
/// Sync vault with the network, including registers and files.
- Sync,
+ /// Loads existing user data from the network and merges it with your local user data.
+ /// Pushes your local user data to the network.
+ Sync {
+ /// Force push your local user data to the network.
+ /// This will overwrite any existing data in your vault.
+ #[arg(short, long)]
+ force: bool,
+ },
+}
+
+#[derive(Subcommand, Debug)]
+pub enum WalletCmd {
+ /// Create a wallet.
+ Create {
+ /// Optional flag to not add a password.
+ #[clap(long, action)]
+ no_password: bool,
+ /// Optional hex-encoded private key.
+ #[clap(long)]
+ private_key: Option,
+ /// Optional password to encrypt the wallet with.
+ #[clap(long, short)]
+ password: Option,
+ },
+
+ /// Check the balance of the wallet.
+ Balance,
}
pub async fn handle_subcommand(opt: Opt) -> Result<()> {
@@ -136,11 +178,11 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> {
match cmd {
SubCmd::File { command } => match command {
FileCmd::Cost { file } => file::cost(&file, peers.await?).await,
- FileCmd::Upload { file } => file::upload(&file, peers.await?).await,
+ FileCmd::Upload { file, public } => file::upload(&file, public, peers.await?).await,
FileCmd::Download { addr, dest_file } => {
file::download(&addr, &dest_file, peers.await?).await
}
- FileCmd::List => file::list(peers.await?),
+ FileCmd::List => file::list(),
},
SubCmd::Register { command } => match command {
RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite),
@@ -156,12 +198,21 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> {
value,
} => register::edit(address, name, &value, peers.await?).await,
RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await,
- RegisterCmd::List => register::list(peers.await?),
+ RegisterCmd::List => register::list(),
},
SubCmd::Vault { command } => match command {
- VaultCmd::Cost => vault::cost(peers.await?),
- VaultCmd::Create => vault::create(peers.await?),
- VaultCmd::Sync => vault::sync(peers.await?),
+ VaultCmd::Cost => vault::cost(peers.await?).await,
+ VaultCmd::Create => vault::create(peers.await?).await,
+ VaultCmd::Load => vault::load(peers.await?).await,
+ VaultCmd::Sync { force } => vault::sync(peers.await?, force).await,
+ },
+ SubCmd::Wallet { command } => match command {
+ WalletCmd::Create {
+ no_password,
+ private_key,
+ password,
+ } => wallet::create(no_password, private_key, password),
+ WalletCmd::Balance => Ok(wallet::balance().await?),
},
}
}
diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs
index d99a848214..6d3f051015 100644
--- a/autonomi-cli/src/commands/file.rs
+++ b/autonomi-cli/src/commands/file.rs
@@ -7,16 +7,19 @@
// permissions and limitations relating to use of the SAFE Network Software.
use crate::utils::collect_upload_summary;
+use crate::wallet::load_wallet;
use autonomi::client::address::addr_to_str;
use autonomi::Multiaddr;
use color_eyre::eyre::Context;
use color_eyre::eyre::Result;
+use color_eyre::Section;
use std::path::PathBuf;
pub async fn cost(file: &str, peers: Vec) -> Result<()> {
let client = crate::actions::connect_to_network(peers).await?;
println!("Getting upload cost...");
+ info!("Calculating cost for file: {file}");
let cost = client
.file_cost(&PathBuf::from(file))
.await
@@ -24,42 +27,112 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> {
println!("Estimate cost to upload file: {file}");
println!("Total cost: {cost}");
+ info!("Total cost: {cost} for file: {file}");
Ok(())
}
-pub async fn upload(file: &str, peers: Vec) -> Result<()> {
- let wallet = crate::keys::load_evm_wallet()?;
+
+pub async fn upload(file: &str, public: bool, peers: Vec) -> Result<()> {
+ let wallet = load_wallet()?;
let mut client = crate::actions::connect_to_network(peers).await?;
let event_receiver = client.enable_client_events();
let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver);
println!("Uploading data to network...");
+ info!(
+ "Uploading {} file: {file}",
+ if public { "public" } else { "private" }
+ );
- let xor_name = client
- .dir_upload(PathBuf::from(file), &wallet)
- .await
- .wrap_err("Failed to upload file")?;
- let addr = addr_to_str(xor_name);
-
- println!("Successfully uploaded: {file}");
- println!("At address: {addr}");
- if let Ok(()) = upload_completed_tx.send(()) {
- let summary = upload_summary_thread.await?;
- if summary.record_count == 0 {
- println!("All chunks already exist on the network");
- } else {
- println!("Number of chunks uploaded: {}", summary.record_count);
- println!("Total cost: {} AttoTokens", summary.tokens_spent);
- }
+ let dir_path = PathBuf::from(file);
+ let name = dir_path
+ .file_name()
+ .map(|n| n.to_string_lossy().to_string())
+ .unwrap_or(file.to_string());
+
+ // upload dir
+ let local_addr;
+ let archive = if public {
+ let xor_name = client
+ .dir_upload(dir_path, &wallet)
+ .await
+ .wrap_err("Failed to upload file")?;
+ local_addr = addr_to_str(xor_name);
+ local_addr.clone()
+ } else {
+ let private_data_access = client
+ .private_dir_upload(dir_path, &wallet)
+ .await
+ .wrap_err("Failed to upload file")?;
+ local_addr = private_data_access.address();
+ private_data_access.to_hex()
+ };
+
+ // wait for upload to complete
+ if let Err(e) = upload_completed_tx.send(()) {
+ error!("Failed to send upload completed event: {e:?}");
+ eprintln!("Failed to send upload completed event: {e:?}");
}
+ // get summary
+ let summary = upload_summary_thread.await?;
+ if summary.record_count == 0 {
+ println!("All chunks already exist on the network.");
+ } else {
+ println!("Successfully uploaded: {file}");
+ println!("At address: {local_addr}");
+ info!("Successfully uploaded: {file} at address: {local_addr}");
+ println!("Number of chunks uploaded: {}", summary.record_count);
+ println!("Total cost: {} AttoTokens", summary.tokens_spent);
+ }
+ info!("Summary for upload of file {file} at {local_addr:?}: {summary:?}");
+
+ // save to local user data
+ let writer = if public {
+ crate::user_data::write_local_public_file_archive(archive, &name)
+ } else {
+ crate::user_data::write_local_private_file_archive(archive, local_addr, &name)
+ };
+ writer
+ .wrap_err("Failed to save file to local user data")
+ .with_suggestion(|| "Local user data saves the file address above to disk, without it you need to keep track of the address yourself")?;
+ info!("Saved file to local user data");
+
Ok(())
}
+
pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> {
let mut client = crate::actions::connect_to_network(peers).await?;
crate::actions::download(addr, dest_path, &mut client).await
}
-pub fn list(_peers: Vec) -> Result<()> {
- println!("The file list feature is coming soon!");
+pub fn list() -> Result<()> {
+ // get public file archives
+ println!("Retrieving local user data...");
+ let file_archives = crate::user_data::get_local_public_file_archives()
+ .wrap_err("Failed to get local public file archives")?;
+
+ println!(
+ "โ
You have {} public file archive(s):",
+ file_archives.len()
+ );
+ for (addr, name) in file_archives {
+ println!("{}: {}", name, addr_to_str(addr));
+ }
+
+ // get private file archives
+ println!();
+ let private_file_archives = crate::user_data::get_local_private_file_archives()
+ .wrap_err("Failed to get local private file archives")?;
+
+ println!(
+ "โ
You have {} private file archive(s):",
+ private_file_archives.len()
+ );
+ for (addr, name) in private_file_archives {
+ println!("{}: {}", name, addr.address());
+ }
+
+ println!();
+ println!("> Note that private data addresses are not network addresses, they are only used for referring to private data client side.");
Ok(())
}
diff --git a/autonomi-cli/src/commands/register.rs b/autonomi-cli/src/commands/register.rs
index d559e6cc55..0aad3ab844 100644
--- a/autonomi-cli/src/commands/register.rs
+++ b/autonomi-cli/src/commands/register.rs
@@ -7,6 +7,7 @@
// permissions and limitations relating to use of the SAFE Network Software.
use crate::utils::collect_upload_summary;
+use crate::wallet::load_wallet;
use autonomi::client::registers::RegisterAddress;
use autonomi::client::registers::RegisterPermissions;
use autonomi::client::registers::RegisterSecretKey;
@@ -21,6 +22,7 @@ pub fn generate_key(overwrite: bool) -> Result<()> {
// check if the key already exists
let key_path = crate::keys::get_register_signing_key_path()?;
if key_path.exists() && !overwrite {
+ error!("Register key already exists at: {key_path:?}");
return Err(eyre!("Register key already exists at: {}", key_path.display()))
.with_suggestion(|| "if you want to overwrite the existing key, run the command with the --overwrite flag")
.with_warning(|| "overwriting the existing key might result in loss of access to any existing registers created using that key");
@@ -30,6 +32,7 @@ pub fn generate_key(overwrite: bool) -> Result<()> {
let key = RegisterSecretKey::random();
let path = crate::keys::create_register_signing_key_file(key)
.wrap_err("Failed to create new register key")?;
+ info!("Created new register key at: {path:?}");
println!("โ
Created new register key at: {}", path.display());
Ok(())
}
@@ -43,12 +46,13 @@ pub async fn cost(name: &str, peers: Vec) -> Result<()> {
.register_cost(name.to_string(), register_key)
.await
.wrap_err("Failed to get cost for register")?;
+ info!("Estimated cost to create a register with name {name}: {cost}");
println!("โ
The estimated cost to create a register with name {name} is: {cost}");
Ok(())
}
pub async fn create(name: &str, value: &str, public: bool, peers: Vec) -> Result<()> {
- let wallet = crate::keys::load_evm_wallet()?;
+ let wallet = load_wallet()?;
let register_key = crate::keys::get_register_signing_key()
.wrap_err("The register key is required to perform this action")?;
let mut client = crate::actions::connect_to_network(peers).await?;
@@ -56,8 +60,10 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec
let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver);
println!("Creating register with name: {name}");
+ info!("Creating register with name: {name}");
let register = if public {
println!("With public write access");
+ info!("With public write access");
let permissions = RegisterPermissions::new_anyone_can_write();
client
.register_create_with_permissions(
@@ -71,6 +77,7 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec
.wrap_err("Failed to create register")?
} else {
println!("With private write access");
+ info!("With private write access");
client
.register_create(
value.as_bytes().to_vec().into(),
@@ -84,18 +91,28 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec
let address = register.address();
- println!("โ
Register created at address: {address}");
- println!("With name: {name}");
- println!("And initial value: [{value}]");
+ if let Err(e) = upload_completed_tx.send(()) {
+ error!("Failed to send upload completed event: {e:?}");
+ eprintln!("Failed to send upload completed event: {e:?}");
+ }
- if let Ok(()) = upload_completed_tx.send(()) {
- let summary = upload_summary_thread.await?;
- if summary.record_count == 0 {
- println!("The register was already created on the network. No tokens were spent.");
- } else {
- println!("Total cost: {} AttoTokens", summary.tokens_spent);
- }
+ let summary = upload_summary_thread.await?;
+ if summary.record_count == 0 {
+ println!("โ
The register already exists on the network at address: {address}.");
+ println!("No tokens were spent.");
+ } else {
+ println!("โ
Register created at address: {address}");
+ println!("With name: {name}");
+ println!("And initial value: [{value}]");
+ info!("Register created at address: {address} with name: {name}");
+ println!("Total cost: {} AttoTokens", summary.tokens_spent);
}
+ info!("Summary of register creation: {summary:?}");
+
+ crate::user_data::write_local_register(address, name)
+ .wrap_err("Failed to save register to local user data")
+ .with_suggestion(|| "Local user data saves the register address above to disk, without it you need to keep track of the address yourself")?;
+ info!("Saved register to local user data");
Ok(())
}
@@ -116,13 +133,16 @@ pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<(
};
println!("Getting register at address: {address}");
+ info!("Getting register at address: {address}");
let register = client
.register_get(address)
.await
@@ -157,6 +179,7 @@ pub async fn get(address: String, name: bool, peers: Vec) -> Result<(
let values = register.values();
println!("โ
Register found at address: {address}");
+ info!("Register found at address: {address}");
match values.as_slice() {
[one] => println!("With value: [{:?}]", String::from_utf8_lossy(one)),
_ => {
@@ -169,7 +192,12 @@ pub async fn get(address: String, name: bool, peers: Vec) -> Result<(
Ok(())
}
-pub fn list(_peers: Vec) -> Result<()> {
- println!("The register feature is coming soon!");
+pub fn list() -> Result<()> {
+ println!("Retrieving local user data...");
+ let registers = crate::user_data::get_local_registers()?;
+ println!("โ
You have {} register(s):", registers.len());
+ for (addr, name) in registers {
+ println!("{}: {}", name, addr.to_hex());
+ }
Ok(())
}
diff --git a/autonomi-cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs
index 9a8d708824..e7ce3f95c8 100644
--- a/autonomi-cli/src/commands/vault.rs
+++ b/autonomi-cli/src/commands/vault.rs
@@ -6,20 +6,108 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
+use crate::wallet::load_wallet;
use autonomi::Multiaddr;
+use color_eyre::eyre::Context;
use color_eyre::eyre::Result;
+use color_eyre::Section;
-pub fn cost(_peers: Vec) -> Result<()> {
- println!("The vault feature is coming soon!");
+pub async fn cost(peers: Vec) -> Result<()> {
+ let client = crate::actions::connect_to_network(peers).await?;
+ let vault_sk = crate::keys::get_vault_secret_key()?;
+
+ println!("Getting cost to create a new vault...");
+ let total_cost = client.vault_cost(&vault_sk).await?;
+
+ if total_cost.is_zero() {
+ println!("Vault already exists, modifying an existing vault is free");
+ } else {
+ println!("Cost to create a new vault: {total_cost} AttoTokens");
+ }
Ok(())
}
-pub fn create(_peers: Vec) -> Result<()> {
- println!("The vault feature is coming soon!");
+pub async fn create(peers: Vec) -> Result<()> {
+ let client = crate::actions::connect_to_network(peers).await?;
+ let wallet = load_wallet()?;
+ let vault_sk = crate::keys::get_vault_secret_key()?;
+
+ println!("Retrieving local user data...");
+ let local_user_data = crate::user_data::get_local_user_data()?;
+ let file_archives_len = local_user_data.file_archives.len();
+ let private_file_archives_len = local_user_data.private_file_archives.len();
+ let registers_len = local_user_data.registers.len();
+
+ println!("Pushing to network vault...");
+ let total_cost = client
+ .put_user_data_to_vault(&vault_sk, &wallet, local_user_data)
+ .await?;
+
+ if total_cost.is_zero() {
+ println!("โ
Successfully pushed user data to existing vault");
+ } else {
+ println!("โ
Successfully created new vault containing local user data");
+ }
+
+ println!("Total cost: {total_cost} AttoTokens");
+ println!("Vault contains:");
+ println!("{file_archives_len} public file archive(s)");
+ println!("{private_file_archives_len} private file archive(s)");
+ println!("{registers_len} register(s)");
Ok(())
}
-pub fn sync(_peers: Vec) -> Result<()> {
- println!("The vault feature is coming soon!");
+pub async fn sync(peers: Vec, force: bool) -> Result<()> {
+ let client = crate::actions::connect_to_network(peers).await?;
+ let vault_sk = crate::keys::get_vault_secret_key()?;
+ let wallet = load_wallet()?;
+
+ println!("Fetching vault from network...");
+ let net_user_data = client
+ .get_user_data_from_vault(&vault_sk)
+ .await
+ .wrap_err("Failed to fetch vault from network")
+ .with_suggestion(|| "Make sure you have already created a vault on the network")?;
+
+ if force {
+ println!("The force flag was provided, overwriting user data in the vault with local user data...");
+ } else {
+ println!("Syncing vault with local user data...");
+ crate::user_data::write_local_user_data(&net_user_data)?;
+ }
+
+ println!("Pushing local user data to network vault...");
+ let local_user_data = crate::user_data::get_local_user_data()?;
+ let file_archives_len = local_user_data.file_archives.len();
+ let private_file_archives_len = local_user_data.private_file_archives.len();
+ let registers_len = local_user_data.registers.len();
+ client
+ .put_user_data_to_vault(&vault_sk, &wallet, local_user_data)
+ .await?;
+
+ println!("โ
Successfully synced vault");
+ println!("Vault contains:");
+ println!("{file_archives_len} public file archive(s)");
+ println!("{private_file_archives_len} private file archive(s)");
+ println!("{registers_len} register(s)");
+ Ok(())
+}
+
+pub async fn load(peers: Vec) -> Result<()> {
+ let client = crate::actions::connect_to_network(peers).await?;
+ let vault_sk = crate::keys::get_vault_secret_key()?;
+
+ println!("Retrieving vault from network...");
+ let user_data = client.get_user_data_from_vault(&vault_sk).await?;
+ println!("Writing user data to disk...");
+ crate::user_data::write_local_user_data(&user_data)?;
+
+ println!("โ
Successfully loaded vault with:");
+ println!("{} public file archive(s)", user_data.file_archives.len());
+ println!(
+ "{} private file archive(s)",
+ user_data.private_file_archives.len()
+ );
+ println!("{} register(s)", user_data.registers.len());
Ok(())
}
diff --git a/autonomi-cli/src/commands/wallet.rs b/autonomi-cli/src/commands/wallet.rs
new file mode 100644
index 0000000000..3b31a873b2
--- /dev/null
+++ b/autonomi-cli/src/commands/wallet.rs
@@ -0,0 +1,85 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use crate::wallet::fs::{select_wallet, store_private_key};
+use crate::wallet::input::request_password;
+use crate::wallet::DUMMY_NETWORK;
+use autonomi::Wallet;
+use color_eyre::eyre::eyre;
+use color_eyre::Result;
+use prettytable::{Cell, Row, Table};
+
+const WALLET_PASSWORD_REQUIRED: bool = false;
+
+pub fn create(
+ no_password: bool,
+ private_key: Option,
+ password: Option,
+) -> Result<()> {
+ if no_password && password.is_some() {
+ return Err(eyre!(
+ "Only one of `--no-password` or `--password` may be specified"
+ ));
+ }
+
+ // Set a password for encryption or not
+ let encryption_password: Option = match (no_password, password) {
+ (true, _) => None,
+ (false, Some(pass)) => Some(pass.to_owned()),
+ (false, None) => request_password(WALLET_PASSWORD_REQUIRED),
+ };
+
+ let wallet_private_key = if let Some(private_key) = private_key {
+ // Validate imported key
+ Wallet::new_from_private_key(DUMMY_NETWORK, &private_key)
+ .map_err(|_| eyre!("Please provide a valid secret key in hex format"))?;
+
+ private_key
+ } else {
+ // Create a new key
+ Wallet::random_private_key()
+ };
+
+ let wallet_address = Wallet::new_from_private_key(DUMMY_NETWORK, &wallet_private_key)
+ .expect("Infallible")
+ .address()
+ .to_string();
+
+ // Save the private key file
+ let file_path = store_private_key(&wallet_private_key, encryption_password)?;
+
+ println!("Wallet address: {wallet_address}");
+ println!("Stored wallet in: {file_path:?}");
+
+ Ok(())
+}
+
+pub async fn balance() -> Result<()> {
+ let wallet = select_wallet()?;
+
+ let token_balance = wallet.balance_of_tokens().await?;
+ let gas_balance = wallet.balance_of_gas_tokens().await?;
+
+ println!("Wallet balances: {}", wallet.address());
+
+ let mut table = Table::new();
+
+ table.add_row(Row::new(vec![
+ Cell::new("Token Balance"),
+ Cell::new(&token_balance.to_string()),
+ ]));
+
+ table.add_row(Row::new(vec![
+ Cell::new("Gas Balance"),
+ Cell::new(&gas_balance.to_string()),
+ ]));
+
+ table.printstd();
+
+ Ok(())
+}
diff --git a/autonomi-cli/src/main.rs b/autonomi-cli/src/main.rs
index de4cdcf4c4..f86d74f484 100644
--- a/autonomi-cli/src/main.rs
+++ b/autonomi-cli/src/main.rs
@@ -14,10 +14,12 @@ mod actions;
mod commands;
mod opt;
mod utils;
+mod wallet;
pub use access::data_dir;
pub use access::keys;
pub use access::network;
+pub use access::user_data;
use clap::Parser;
use color_eyre::Result;
diff --git a/autonomi-cli/src/opt.rs b/autonomi-cli/src/opt.rs
index 8f3fb20967..a49f6029b1 100644
--- a/autonomi-cli/src/opt.rs
+++ b/autonomi-cli/src/opt.rs
@@ -27,9 +27,9 @@ pub(crate) struct Opt {
/// `data-dir` is the default value.
///
/// The data directory location is platform specific:
- /// - Linux: $HOME/.local/share/safe/client/logs
- /// - macOS: $HOME/Library/Application Support/safe/client/logs
- /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs
+ /// - Linux: $HOME/.local/share/safe/autonomi/logs
+ /// - macOS: $HOME/Library/Application Support/safe/autonomi/logs
+ /// - Windows: C:\Users\\AppData\Roaming\safe\autonomi\logs
#[allow(rustdoc::invalid_html_tags)]
#[clap(long, value_parser = LogOutputDest::parse_from_str, verbatim_doc_comment, default_value = "data-dir")]
pub log_output_dest: LogOutputDest,
diff --git a/autonomi-cli/src/wallet/encryption.rs b/autonomi-cli/src/wallet/encryption.rs
new file mode 100644
index 0000000000..bc673574ce
--- /dev/null
+++ b/autonomi-cli/src/wallet/encryption.rs
@@ -0,0 +1,171 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use crate::wallet::error::Error;
+use rand::Rng;
+use ring::aead::{BoundKey, Nonce, NonceSequence};
+use ring::error::Unspecified;
+use std::num::NonZeroU32;
+use std::sync::LazyLock;
+
+const SALT_LENGTH: usize = 8;
+const NONCE_LENGTH: usize = 12;
+
+/// Number of iterations for pbkdf2.
+static ITERATIONS: LazyLock =
+ LazyLock::new(|| NonZeroU32::new(100_000).expect("Infallible"));
+
+struct NonceSeq([u8; 12]);
+
+impl NonceSequence for NonceSeq {
+ fn advance(&mut self) -> Result {
+ Nonce::try_assume_unique_for_key(&self.0)
+ }
+}
+
+pub fn encrypt_private_key(private_key: &str, password: &str) -> Result {
+ // Generate a random salt
+ // Salt is used to ensure unique derived keys even for identical passwords
+ let mut salt = [0u8; SALT_LENGTH];
+ rand::thread_rng().fill(&mut salt);
+
+ // Generate a random nonce
+ // Nonce is used to ensure unique encryption outputs even for identical inputs
+ let mut nonce = [0u8; NONCE_LENGTH];
+ rand::thread_rng().fill(&mut nonce);
+
+ let mut key = [0; 32];
+
+ // Derive a key from the password using PBKDF2 with HMAC
+ // PBKDF2 is used for key derivation to mitigate brute-force attacks by making key derivation computationally expensive
+ // HMAC is used as the pseudorandom function for its security properties
+ ring::pbkdf2::derive(
+ ring::pbkdf2::PBKDF2_HMAC_SHA512,
+ *ITERATIONS,
+ &salt,
+ password.as_bytes(),
+ &mut key,
+ );
+
+ // Create an unbound key using CHACHA20_POLY1305 algorithm
+ // CHACHA20_POLY1305 is a fast and secure AEAD (Authenticated Encryption with Associated Data) algorithm
+ let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key)
+ .map_err(|_| Error::FailedToEncryptKey(String::from("Could not create unbound key")))?;
+
+ // Create a sealing key with the unbound key and nonce
+ let mut sealing_key = ring::aead::SealingKey::new(unbound_key, NonceSeq(nonce));
+ let aad = ring::aead::Aad::from(&[]);
+
+ // Convert the secret key to bytes
+ let private_key_bytes = String::from(private_key).into_bytes();
+ let mut encrypted_private_key = private_key_bytes;
+
+ // seal_in_place_append_tag encrypts the data and appends an authentication tag to ensure data integrity
+ sealing_key
+ .seal_in_place_append_tag(aad, &mut encrypted_private_key)
+ .map_err(|_| Error::FailedToEncryptKey(String::from("Could not seal sealing key")))?;
+
+ let mut encrypted_data = Vec::new();
+ encrypted_data.extend_from_slice(&salt);
+ encrypted_data.extend_from_slice(&nonce);
+ encrypted_data.extend_from_slice(&encrypted_private_key);
+
+ // Return the encrypted secret key along with salt and nonce encoded as hex strings
+ Ok(hex::encode(encrypted_data))
+}
+
+pub fn decrypt_private_key(encrypted_data: &str, password: &str) -> Result {
+ let encrypted_data = hex::decode(encrypted_data)
+ .map_err(|_| Error::FailedToDecryptKey(String::from("Encrypted data is invalid")))?;
+
+ let salt: [u8; SALT_LENGTH] = encrypted_data[..SALT_LENGTH]
+ .try_into()
+ .map_err(|_| Error::FailedToDecryptKey(String::from("Could not find salt")))?;
+
+ let nonce: [u8; NONCE_LENGTH] = encrypted_data[SALT_LENGTH..SALT_LENGTH + NONCE_LENGTH]
+ .try_into()
+ .map_err(|_| Error::FailedToDecryptKey(String::from("Could not find nonce")))?;
+
+ let encrypted_private_key = &encrypted_data[SALT_LENGTH + NONCE_LENGTH..];
+
+ let mut key = [0; 32];
+
+ // Reconstruct the key from salt and password
+ ring::pbkdf2::derive(
+ ring::pbkdf2::PBKDF2_HMAC_SHA512,
+ *ITERATIONS,
+ &salt,
+ password.as_bytes(),
+ &mut key,
+ );
+
+ // Create an unbound key from the previously reconstructed key
+ let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key)
+ .map_err(|_| Error::FailedToDecryptKey(String::from("Could not create unbound key")))?;
+
+ // Create an opening key using the unbound key and original nonce
+ let mut opening_key = ring::aead::OpeningKey::new(unbound_key, NonceSeq(nonce));
+ let aad = ring::aead::Aad::from(&[]);
+
+ let mut encrypted_private_key = encrypted_private_key.to_vec();
+
+ // Decrypt the encrypted secret key bytes
+ let decrypted_data = opening_key
+ .open_in_place(aad, &mut encrypted_private_key)
+ .map_err(|_| {
+ Error::FailedToDecryptKey(String::from(
+ "Could not open encrypted key, please check the password",
+ ))
+ })?;
+
+ let mut private_key_bytes = [0u8; 66];
+ private_key_bytes.copy_from_slice(&decrypted_data[0..66]);
+
+ // Create secret key from decrypted byte
+ Ok(String::from_utf8(private_key_bytes.to_vec()).expect("not able to convert private key"))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use autonomi::Wallet;
+
+ #[test]
+ fn test_encrypt_decrypt_private_key() {
+ let key = Wallet::random_private_key();
+ let password = "password123".to_string();
+
+ let encrypted_key =
+ encrypt_private_key(&key, &password).expect("Failed to encrypt the private key");
+
+ let decrypted_key = decrypt_private_key(&encrypted_key, &password)
+ .expect("Failed to decrypt the private key");
+
+ assert_eq!(
+ decrypted_key, key,
+ "Decrypted key does not match the original private key"
+ );
+ }
+
+ #[test]
+ fn test_wrong_password() {
+ let key = Wallet::random_private_key();
+ let password = "password123".to_string();
+
+ let encrypted_key =
+ encrypt_private_key(&key, &password).expect("Failed to encrypt the private key");
+
+ let wrong_password = "password456".to_string();
+ let result = decrypt_private_key(&encrypted_key, &wrong_password);
+
+ assert!(
+ result.is_err(),
+ "Decryption should not succeed with a wrong password"
+ );
+ }
+}
diff --git a/autonomi-cli/src/wallet/error.rs b/autonomi-cli/src/wallet/error.rs
new file mode 100644
index 0000000000..b32455566d
--- /dev/null
+++ b/autonomi-cli/src/wallet/error.rs
@@ -0,0 +1,31 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+#[derive(thiserror::Error, Debug)]
+pub enum Error {
+ #[error("Private key is invalid")]
+ InvalidPrivateKey,
+ #[error("Private key file is invalid")]
+ InvalidPrivateKeyFile,
+ #[error("Failed to encrypt private key: {0}")]
+ FailedToEncryptKey(String),
+ #[error("Failed to decrypt private key: {0}")]
+ FailedToDecryptKey(String),
+ #[error("Failed to write private key to disk: {0}")]
+ FailedToStorePrivateKey(String),
+ #[error("Failed to find wallets folder")]
+ WalletsFolderNotFound,
+ #[error("Failed to create wallets folder")]
+ FailedToCreateWalletsFolder,
+ #[error("Could not find private key file")]
+ PrivateKeyFileNotFound,
+ #[error("No wallets found. Create one using `wallet create`")]
+ NoWalletsFound,
+ #[error("Invalid wallet selection input")]
+ InvalidSelection,
+}
diff --git a/autonomi-cli/src/wallet/fs.rs b/autonomi-cli/src/wallet/fs.rs
new file mode 100644
index 0000000000..a467961016
--- /dev/null
+++ b/autonomi-cli/src/wallet/fs.rs
@@ -0,0 +1,202 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use crate::wallet::encryption::{decrypt_private_key, encrypt_private_key};
+use crate::wallet::error::Error;
+use crate::wallet::input::{get_password_input, get_wallet_selection_input};
+use crate::wallet::DUMMY_NETWORK;
+use autonomi::{get_evm_network_from_env, RewardsAddress, Wallet};
+use const_hex::traits::FromHex;
+use prettytable::{Cell, Row, Table};
+use std::ffi::OsString;
+use std::io::Read;
+use std::path::PathBuf;
+use std::sync::OnceLock;
+
+const ENCRYPTED_PRIVATE_KEY_EXT: &str = ".encrypted";
+
+pub static SELECTED_WALLET_ADDRESS: OnceLock = OnceLock::new();
+
+/// Creates the wallets folder if it is missing and returns the folder path.
+pub(crate) fn get_client_wallet_dir_path() -> Result {
+ let mut home_dirs = dirs_next::data_dir().ok_or(Error::WalletsFolderNotFound)?;
+ home_dirs.push("safe");
+ home_dirs.push("autonomi");
+ home_dirs.push("wallets");
+
+ std::fs::create_dir_all(home_dirs.as_path()).map_err(|_| Error::FailedToCreateWalletsFolder)?;
+
+ Ok(home_dirs)
+}
+
+/// Writes the private key (hex-encoded) to disk.
+///
+/// When a password is set, the private key file will be encrypted.
+pub(crate) fn store_private_key(
+ private_key: &str,
+ encryption_password: Option,
+) -> Result {
+ let wallet = Wallet::new_from_private_key(DUMMY_NETWORK, private_key)
+ .map_err(|_| Error::InvalidPrivateKey)?;
+
+ // Wallet address
+ let wallet_address = wallet.address().to_string();
+ let wallets_folder = get_client_wallet_dir_path()?;
+
+ // If `encryption_password` is provided, the private key will be encrypted with the password.
+ // Else it will be saved as plain text.
+ if let Some(password) = encryption_password.as_ref() {
+ let encrypted_key = encrypt_private_key(private_key, password)?;
+ let file_name = format!("{wallet_address}{ENCRYPTED_PRIVATE_KEY_EXT}");
+ let file_path = wallets_folder.join(file_name);
+
+ std::fs::write(file_path.clone(), encrypted_key)
+ .map_err(|err| Error::FailedToStorePrivateKey(err.to_string()))?;
+
+ Ok(file_path.into_os_string())
+ } else {
+ let file_path = wallets_folder.join(wallet_address);
+
+ std::fs::write(file_path.clone(), private_key)
+ .map_err(|err| Error::FailedToStorePrivateKey(err.to_string()))?;
+
+ Ok(file_path.into_os_string())
+ }
+}
+
+/// Loads the private key (hex-encoded) from disk.
+///
+/// If the private key file is encrypted, the function will prompt for the decryption password in the CLI.
+pub(crate) fn load_private_key(wallet_address: &str) -> Result {
+ let wallets_folder = get_client_wallet_dir_path()?;
+
+ let mut file_name = wallet_address.to_string();
+
+ // Check if a file with the encrypted extension exists
+ let encrypted_file_path =
+ wallets_folder.join(format!("{wallet_address}{ENCRYPTED_PRIVATE_KEY_EXT}"));
+
+ let is_encrypted = encrypted_file_path.exists();
+
+ if is_encrypted {
+ file_name.push_str(ENCRYPTED_PRIVATE_KEY_EXT);
+ }
+
+ let file_path = wallets_folder.join(file_name);
+
+ let mut file = std::fs::File::open(&file_path).map_err(|_| Error::PrivateKeyFileNotFound)?;
+
+ let mut buffer = String::new();
+ file.read_to_string(&mut buffer)
+ .map_err(|_| Error::InvalidPrivateKeyFile)?;
+
+ // If the file is encrypted, prompt for the password and decrypt the key.
+ if is_encrypted {
+ let password = get_password_input("Enter password to decrypt wallet:");
+
+ decrypt_private_key(&buffer, &password)
+ } else {
+ Ok(buffer)
+ }
+}
+
+pub(crate) fn load_wallet_from_address(wallet_address: &str) -> Result {
+ let network = get_evm_network_from_env().expect("Could not load EVM network from environment");
+ let private_key = load_private_key(wallet_address)?;
+ let wallet =
+ Wallet::new_from_private_key(network, &private_key).expect("Could not initialize wallet");
+ Ok(wallet)
+}
+
+pub(crate) fn select_wallet() -> Result {
+ let wallet_address = select_wallet_address()?;
+ load_wallet_from_address(&wallet_address)
+}
+
+pub(crate) fn select_wallet_private_key() -> Result {
+ let wallet_address = select_wallet_address()?;
+ load_private_key(&wallet_address)
+}
+
+pub(crate) fn select_wallet_address() -> Result {
+ // Try if a wallet address was already selected this session
+ if let Some(wallet_address) = SELECTED_WALLET_ADDRESS.get() {
+ return Ok(wallet_address.clone());
+ }
+
+ let wallets_folder = get_client_wallet_dir_path()?;
+ let wallet_files = get_wallet_files(&wallets_folder)?;
+
+ let wallet_address = match wallet_files.len() {
+ 0 => Err(Error::NoWalletsFound),
+ 1 => Ok(filter_wallet_file_extension(&wallet_files[0])),
+ _ => get_wallet_selection(wallet_files),
+ }?;
+
+ Ok(SELECTED_WALLET_ADDRESS
+ .get_or_init(|| wallet_address)
+ .to_string())
+}
+
+fn get_wallet_selection(wallet_files: Vec) -> Result {
+ list_wallets(&wallet_files);
+
+ let selected_index = get_wallet_selection_input("Select by index:")
+ .parse::()
+ .map_err(|_| Error::InvalidSelection)?;
+
+ if selected_index < 1 || selected_index > wallet_files.len() {
+ return Err(Error::InvalidSelection);
+ }
+
+ Ok(filter_wallet_file_extension(
+ &wallet_files[selected_index - 1],
+ ))
+}
+
+fn list_wallets(wallet_files: &[String]) {
+ println!("Wallets:");
+
+ let mut table = Table::new();
+
+ table.add_row(Row::new(vec![
+ Cell::new("Index"),
+ Cell::new("Address"),
+ Cell::new("Encrypted"),
+ ]));
+
+ for (index, wallet_file) in wallet_files.iter().enumerate() {
+ let encrypted = wallet_file.contains(ENCRYPTED_PRIVATE_KEY_EXT);
+
+ table.add_row(Row::new(vec![
+ Cell::new(&(index + 1).to_string()),
+ Cell::new(&filter_wallet_file_extension(wallet_file)),
+ Cell::new(&encrypted.to_string()),
+ ]));
+ }
+
+ table.printstd();
+}
+
+fn get_wallet_files(wallets_folder: &PathBuf) -> Result, Error> {
+ let wallet_files = std::fs::read_dir(wallets_folder)
+ .map_err(|_| Error::WalletsFolderNotFound)?
+ .filter_map(Result::ok)
+ .filter_map(|dir_entry| dir_entry.file_name().into_string().ok())
+ .filter(|file_name| {
+ let cleaned_file_name = filter_wallet_file_extension(file_name);
+ RewardsAddress::from_hex(cleaned_file_name).is_ok()
+ })
+ .collect::>();
+
+ Ok(wallet_files)
+}
+
+fn filter_wallet_file_extension(wallet_file: &str) -> String {
+ wallet_file.replace(ENCRYPTED_PRIVATE_KEY_EXT, "")
+}
diff --git a/autonomi-cli/src/wallet/input.rs b/autonomi-cli/src/wallet/input.rs
new file mode 100644
index 0000000000..94e3223cd8
--- /dev/null
+++ b/autonomi-cli/src/wallet/input.rs
@@ -0,0 +1,68 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+pub(crate) fn get_wallet_selection_input(prompt: &str) -> String {
+ println!("{prompt}");
+
+ let mut buffer = String::new();
+ let stdin = std::io::stdin();
+
+ if stdin.read_line(&mut buffer).is_err() {
+ // consider if error should process::exit(1) here
+ return "".to_string();
+ };
+
+ // Remove leading and trailing whitespace
+ buffer.trim().to_owned()
+}
+
+pub(crate) fn get_password_input(prompt: &str) -> String {
+ rpassword::prompt_password(prompt)
+ .map(|str| str.trim().into())
+ .unwrap_or_default()
+}
+
+pub(crate) fn confirm_password(password: &str) -> bool {
+ const MAX_RETRIES: u8 = 2;
+
+ for _ in 0..MAX_RETRIES {
+ if get_password_input("Repeat password: ") == password {
+ return true;
+ }
+ println!("Passwords do not match.");
+ }
+
+ false
+}
+
+pub(crate) fn request_password(required: bool) -> Option {
+ let prompt = if required {
+ "Enter password: "
+ } else {
+ "Enter password (leave empty for none): "
+ };
+
+ loop {
+ let password = get_password_input(prompt);
+
+ if password.is_empty() {
+ if required {
+ println!("Password is required.");
+ continue;
+ }
+
+ return None;
+ }
+
+ if confirm_password(&password) {
+ return Some(password);
+ }
+
+ println!("Please set a new password.");
+ }
+}
diff --git a/autonomi-cli/src/wallet/mod.rs b/autonomi-cli/src/wallet/mod.rs
new file mode 100644
index 0000000000..b0dddfb889
--- /dev/null
+++ b/autonomi-cli/src/wallet/mod.rs
@@ -0,0 +1,42 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use crate::keys::{get_secret_key_from_env, load_evm_wallet_from_env};
+use crate::wallet::fs::{select_wallet, select_wallet_private_key};
+use autonomi::{EvmNetwork, Wallet};
+
+pub(crate) mod encryption;
+pub(crate) mod error;
+pub(crate) mod fs;
+pub(crate) mod input;
+
+pub const DUMMY_NETWORK: EvmNetwork = EvmNetwork::ArbitrumSepolia;
+
+/// Load wallet from ENV or disk
+pub(crate) fn load_wallet() -> color_eyre::Result {
+ // First try wallet from ENV
+ if let Ok(wallet) = load_evm_wallet_from_env() {
+ return Ok(wallet);
+ }
+
+ let wallet = select_wallet()?;
+
+ Ok(wallet)
+}
+
+/// Load wallet private key from ENV or disk
+pub(crate) fn load_wallet_private_key() -> color_eyre::Result {
+ // First try wallet private key from ENV
+ if let Ok(private_key) = get_secret_key_from_env() {
+ return Ok(private_key);
+ }
+
+ let private_key = select_wallet_private_key()?;
+
+ Ok(private_key)
+}
diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml
index b44ca2233c..6f5491a4f3 100644
--- a/autonomi/Cargo.toml
+++ b/autonomi/Cargo.toml
@@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "]
description = "Autonomi client API"
name = "autonomi"
license = "GPL-3.0"
-version = "0.2.0"
+version = "0.2.2"
edition = "2021"
homepage = "https://maidsafe.net"
readme = "README.md"
@@ -13,14 +13,15 @@ repository = "https://github.com/maidsafe/safe_network"
crate-type = ["cdylib", "rlib"]
[features]
-default = ["data"]
+default = ["data", "vault"]
full = ["data", "registers", "vault"]
data = []
-vault = ["data"]
+vault = ["data", "registers"]
fs = ["tokio/fs", "data"]
local = ["sn_networking/local", "sn_evm/local"]
registers = ["data"]
loud = []
+external-signer = ["sn_evm/external-signer", "data"]
[dependencies]
bip39 = "2.0.0"
@@ -37,11 +38,11 @@ rand = "0.8.5"
rmp-serde = "1.1.1"
self_encryption = "~0.30.0"
serde = { version = "1.0.133", features = ["derive", "rc"] }
-sn_networking = { path = "../sn_networking", version = "0.19.0" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" }
-sn_protocol = { version = "0.17.12", path = "../sn_protocol" }
-sn_registers = { path = "../sn_registers", version = "0.4.0" }
-sn_evm = { path = "../sn_evm", version = "0.1.1" }
+sn_networking = { path = "../sn_networking", version = "0.19.1" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" }
+sn_protocol = { version = "0.17.13", path = "../sn_protocol" }
+sn_registers = { path = "../sn_registers", version = "0.4.1" }
+sn_evm = { path = "../sn_evm", version = "0.1.2" }
thiserror = "1.0.23"
tokio = { version = "1.35.0", features = ["sync"] }
tracing = { version = "~0.1.26" }
@@ -51,12 +52,16 @@ futures = "0.3.30"
wasm-bindgen = "0.2.93"
wasm-bindgen-futures = "0.4.43"
serde-wasm-bindgen = "0.6.5"
+sha2 = "0.10.6"
+blst = "0.3.13"
+blstrs = "0.7.1"
[dev-dependencies]
+alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] }
eyre = "0.6.5"
sha2 = "0.10.6"
-sn_logging = { path = "../sn_logging", version = "0.2.37" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" }
+sn_logging = { path = "../sn_logging", version = "0.2.38" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" }
# Do not specify the version field. Release process expects even the local dev deps to be published.
# Removing the version field is a workaround.
test_utils = { path = "../test_utils" }
@@ -66,12 +71,13 @@ wasm-bindgen-test = "0.3.43"
[target.'cfg(target_arch = "wasm32")'.dependencies]
console_error_panic_hook = "0.1.7"
-evmlib = { path = "../evmlib", version = "0.1.1", features = ["wasm-bindgen"] }
+evmlib = { path = "../evmlib", version = "0.1.2", features = ["wasm-bindgen"] }
# See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available
instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] }
js-sys = "0.3.70"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-web = "0.1.3"
+xor_name = { version = "5.0.0", features = ["serialize-hex"] }
[lints]
workspace = true
diff --git a/autonomi/README.md b/autonomi/README.md
index 3b27c6b0f0..5b95af38e4 100644
--- a/autonomi/README.md
+++ b/autonomi/README.md
@@ -26,7 +26,7 @@ autonomi = { path = "../autonomi", version = "0.1.0" }
cargo run --bin evm_testnet
```
-3. Run a local network with the `local` feature and use the local evm node.
+3. Run a local network with the `local` feature and use the local evm node.
```sh
cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-local
@@ -35,9 +35,9 @@ cargo run --bin=safenode-manager --features=local -- local run --build --clean -
4. Then run the tests with the `local` feature and pass the EVM params again:
```sh
-$ EVM_NETWORK=local cargo test --package=autonomi --features=local
+EVM_NETWORK=local cargo test --package=autonomi --features=local
# Or with logs
-$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture
+RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture
```
### Using a live testnet or mainnet
@@ -55,25 +55,64 @@ cargo run --bin=safenode-manager --features=local -- local run --build --clean -
payment tokens on the network (in this case Arbitrum One):
```sh
-$ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local
+EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local
# Or with logs
-$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture
+RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture
```
### WebAssembly
To run a WASM test
+
- Install `wasm-pack`
-- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you have `rustup`: `rustup target add wasm32-unknown-unknown`.)
-- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, e.g. `/ip4//tcp//ws/p2p/`.
+- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you
+ have `rustup`: `rustup target add wasm32-unknown-unknown`.)
+- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address,
+ e.g. `/ip4//tcp//ws/p2p/`.
- As well as the other environment variables needed for EVM payments (e.g. `RPC_URL`).
- Optionally specify the specific test, e.g. `-- put` to run `put()` in `wasm.rs` only.
Example:
-````sh
+
+```sh
SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --firefox autonomi --features=data,files --test wasm -- put
```
+#### Test from JS in the browser
+
+`wasm-pack test` does not execute JavaScript, but runs mostly WebAssembly. Again make sure the environment variables are
+set and build the JS package:
+
+```sh
+wasm-pack build --dev --target=web autonomi --features=vault
+```
+
+Then cd into `autonomi/tests-js`, and use `npm` to install and serve the test html file.
+
+```
+cd autonomi/tests-js
+npm install
+npm run serve
+```
+
+Then go to `http://127.0.0.1:8080/tests-js` in the browser. Here, enter a `ws` multiaddr of a local node and press '
+run'.
+
+#### MetaMask example
+
+There is a MetaMask example for doing a simple put operation.
+
+Build the package with the `external-signer` feature (and again with the env variables) and run a webserver, e.g. with
+Python:
+
+```sh
+wasm-pack build --dev --target=web autonomi --features=external-signer
+python -m http.server --directory=autonomi 8000
+```
+
+Then visit `http://127.0.0.1:8000/examples/metamask` in your (modern) browser.
+
+Here, enter a `ws` multiaddr of a local node and press 'run'.
## Faucet (local)
diff --git a/autonomi/WASM_docs.md b/autonomi/WASM_docs.md
index 995809b8bd..6cf080113f 100644
--- a/autonomi/WASM_docs.md
+++ b/autonomi/WASM_docs.md
@@ -1,160 +1,24 @@
-## JavaScript Autonomi API Documentation
+# JavaScript Autonomi API Documentation
Note that this is a first version and will be subject to change.
-### **Client**
+The entry point for connecting to the network is {@link Client.connect}.
-The `Client` object allows interaction with the network to store and retrieve data. Below are the available methods for the `Client` class.
+This API is a wrapper around the Rust API, found here: https://docs.rs/autonomi/latest/autonomi. The Rust API contains more detailed documentation on concepts and some types.
-#### **Constructor**
+## Addresses
-```javascript
-let client = await new Client([multiaddress]);
-```
-
-- **multiaddress** (Array of Strings): A list of network addresses for the client to connect to.
-
-Example:
-```javascript
-let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]);
-```
-
-#### **Methods**
-
-##### **put(data, wallet)**
-
-Uploads a piece of encrypted data to the network.
-
-```javascript
-let result = await client.put(data, wallet);
-```
-
-- **data** (Uint8Array): The data to be stored.
-- **wallet** (Wallet): The wallet used to pay for the storage.
-
-Returns:
-- **result** (XorName): The XOR address of the stored data.
-
-Example:
-```javascript
-let wallet = getFundedWallet();
-let data = new Uint8Array([1, 2, 3]);
-let result = await client.put(data, wallet);
-```
-
-##### **get(data_map_addr)**
-
-Fetches encrypted data from the network using its XOR address.
-
-```javascript
-let data = await client.get(data_map_addr);
-```
-
-- **data_map_addr** (XorName): The XOR address of the data to fetch.
-
-Returns:
-- **data** (Uint8Array): The fetched data.
-
-Example:
-```javascript
-let data = await client.get(result);
-```
-
-##### **cost(data)**
-
-Gets the cost of storing the provided data on the network.
-
-```javascript
-let cost = await client.cost(data);
-```
-
-- **data** (Uint8Array): The data whose storage cost you want to calculate.
-
-Returns:
-- **cost** (AttoTokens): The calculated cost for storing the data.
-
-Example:
-```javascript
-let cost = await client.cost(new Uint8Array([1, 2, 3]));
-```
-
----
-
-### **Wallet**
-
-The `Wallet` object represents an Ethereum wallet used for data payments.
-
-#### **Methods**
-
-##### **new_from_private_key(network, private_key)**
-
-Creates a new wallet using the given private key.
-
-```javascript
-let wallet = Wallet.new_from_private_key(network, private_key);
-```
+For addresses (chunk, data, archives, etc) we're using hex-encoded strings containing a 256-bit XOR addresse. For example: `abcdefg012345678900000000000000000000000000000000000000000000000`.
-- **network** (EvmNetwork): The network to which the wallet connects.
-- **private_key** (String): The private key of the wallet.
-
-Returns:
-- **wallet** (Wallet): The created wallet.
-
-Example:
-```javascript
-let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here");
-```
-
-##### **address()**
-
-Gets the walletโs address.
+## Example
```javascript
-let address = wallet.address();
-```
-
-Returns:
-- **address** (Address): The wallet's address.
+import init, { Client, Wallet, getEvmNetwork } from 'autonomi';
-Example:
-```javascript
-let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here");
-let address = wallet.address();
-```
-
----
-
-### **EvmNetwork**
-
-The `EvmNetwork` object represents the blockchain network.
-
-#### **Methods**
-
-##### **default()**
-
-Connects to the default network.
-
-```javascript
-let network = EvmNetwork.default();
-```
-
-Returns:
-- **network** (EvmNetwork): The default network.
-
-Example:
-```javascript
-let network = EvmNetwork.default();
-```
-
----
-
-### Example Usage:
-
-```javascript
let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]);
console.log("connected");
-let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here");
+let wallet = Wallet.new_from_private_key(getEvmNetwork, "your_private_key_here");
console.log("wallet retrieved");
let data = new Uint8Array([1, 2, 3]);
@@ -164,7 +28,3 @@ console.log("Data stored at:", result);
let fetchedData = await client.get(result);
console.log("Data retrieved:", fetchedData);
```
-
----
-
-This documentation covers the basic usage of `Client`, `Wallet`, and `EvmNetwork` types in the JavaScript API.
\ No newline at end of file
diff --git a/autonomi/examples/metamask/index.html b/autonomi/examples/metamask/index.html
new file mode 100644
index 0000000000..50844bd7f9
--- /dev/null
+++ b/autonomi/examples/metamask/index.html
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/autonomi/examples/metamask/index.js b/autonomi/examples/metamask/index.js
new file mode 100644
index 0000000000..633eb36317
--- /dev/null
+++ b/autonomi/examples/metamask/index.js
@@ -0,0 +1,149 @@
+import init, * as autonomi from '../../pkg/autonomi.js';
+
+export async function externalSignerPut(peerAddr) {
+ try {
+ // Check if MetaMask (window.ethereum) is available
+ if (typeof window.ethereum === 'undefined') {
+ throw new Error('MetaMask is not installed');
+ }
+
+ // Request account access from MetaMask
+ const accounts = await window.ethereum.request({method: 'eth_requestAccounts'});
+ const sender = accounts[0]; // Get the first account
+
+ // Setup API client
+ await init();
+
+ autonomi.logInit("autonomi=trace");
+
+ const client = await autonomi.Client.connect([peerAddr]);
+
+ // Generate 1MB of random bytes in a Uint8Array
+ const data = new Uint8Array(1024 * 1024).map(() => Math.floor(Math.random() * 256));
+
+ // Get quotes and payment information (this would need actual implementation)
+ const [quotes, quotePayments, free_chunks] = await client.getQuotes(data);
+
+ // Get the EVM network
+ let evmNetwork = autonomi.getEvmNetwork();
+
+ // Form quotes payment calldata
+ const payForQuotesCalldata = autonomi.getPayForQuotesCalldata(
+ evmNetwork,
+ quotePayments
+ );
+
+ // Form approve to spend tokens calldata
+ const approveCalldata = autonomi.getApproveToSpendTokensCalldata(
+ evmNetwork,
+ payForQuotesCalldata.approve_spender,
+ payForQuotesCalldata.approve_amount
+ );
+
+ console.log("Sending approve transaction..");
+
+ // Approve to spend tokens
+ let txHash = await sendTransaction({
+ from: sender,
+ to: approveCalldata[1],
+ data: approveCalldata[0]
+ });
+
+ await waitForTransactionConfirmation(txHash);
+
+ let payments = {};
+
+ // Execute batched quote payment transactions
+ for (const [calldata, quoteHashes] of payForQuotesCalldata.batched_calldata_map) {
+ console.log("Sending batched data payment transaction..");
+
+ let txHash = await sendTransaction({
+ from: sender,
+ to: payForQuotesCalldata.to,
+ data: calldata
+ });
+
+ await waitForTransactionConfirmation(txHash);
+
+ // Record the transaction hashes for each quote
+ quoteHashes.forEach(quoteHash => {
+ payments[quoteHash] = txHash;
+ });
+ }
+
+ // Generate payment proof
+ const proof = autonomi.getPaymentProofFromQuotesAndPayments(quotes, payments);
+
+ // Submit the data with proof of payment
+ const addr = await client.dataPutWithProof(data, proof);
+
+ // Wait for a few seconds to allow data to propagate
+ await new Promise(resolve => setTimeout(resolve, 10000));
+
+ // Fetch the data back
+ const fetchedData = await client.dataGet(addr);
+
+ if (fetchedData.toString() === data.toString()) {
+ console.log("Fetched data matches the original data!");
+ } else {
+ throw new Error("Fetched data does not match original data!")
+ }
+
+ console.log("Data successfully put and verified!");
+
+ } catch (error) {
+ console.error("An error occurred:", error);
+ }
+}
+
+// Helper function to send a transaction through MetaMask using Ethereum JSON-RPC
+async function sendTransaction({from, to, data}) {
+ const transactionParams = {
+ from: from, // Sender address
+ to: to, // Destination address
+ data: data, // Calldata (transaction input)
+ };
+
+ try {
+ // Send the transaction via MetaMask and get the transaction hash
+ const txHash = await window.ethereum.request({
+ method: 'eth_sendTransaction',
+ params: [transactionParams]
+ });
+
+ console.log(`Transaction sent with hash: ${txHash}`);
+ return txHash; // Return the transaction hash
+
+ } catch (error) {
+ console.error("Failed to send transaction:", error);
+ throw error;
+ }
+}
+
+async function waitForTransactionConfirmation(txHash) {
+ const delay = (ms) => new Promise(resolve => setTimeout(resolve, ms));
+
+ // Poll for the transaction receipt
+ while (true) {
+ // Query the transaction receipt
+ const receipt = await window.ethereum.request({
+ method: 'eth_getTransactionReceipt',
+ params: [txHash],
+ });
+
+ // If the receipt is found, the transaction has been mined
+ if (receipt !== null) {
+ // Check if the transaction was successful (status is '0x1')
+ if (receipt.status === '0x1') {
+ console.log('Transaction successful!', receipt);
+ return receipt; // Return the transaction receipt
+ } else {
+ console.log('Transaction failed!', receipt);
+ throw new Error('Transaction failed');
+ }
+ }
+
+ // Wait for 1 second before checking again
+ await delay(1000);
+ }
+}
\ No newline at end of file
diff --git a/autonomi/index.html b/autonomi/index.html
deleted file mode 100644
index bd806016ca..0000000000
--- a/autonomi/index.html
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs
index d3cf9714ec..04ad120b19 100644
--- a/autonomi/src/client/archive.rs
+++ b/autonomi/src/client/archive.rs
@@ -6,29 +6,136 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
-use std::{collections::HashMap, path::PathBuf};
+use std::{
+ collections::HashMap,
+ path::{Path, PathBuf},
+};
+
+use sn_networking::target_arch::{Duration, SystemTime, UNIX_EPOCH};
use super::{
- data::DataAddr,
- data::{GetError, PutError},
+ data::{CostError, DataAddr, GetError, PutError},
Client,
};
use bytes::Bytes;
use serde::{Deserialize, Serialize};
-use sn_evm::EvmWallet;
+use sn_evm::{AttoTokens, EvmWallet};
use xor_name::XorName;
/// The address of an archive on the network. Points to an [`Archive`].
pub type ArchiveAddr = XorName;
+use thiserror::Error;
+
+#[derive(Error, Debug, PartialEq, Eq)]
+pub enum RenameError {
+ #[error("File not found in archive: {0}")]
+ FileNotFound(PathBuf),
+}
+
/// An archive of files that containing file paths, their metadata and the files data addresses
/// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address.
-#[derive(Debug, Clone, Serialize, Deserialize)]
+/// Archives are public meaning anyone can read the data in the archive. For private archives use [`crate::client::archive_private::PrivateArchive`].
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct Archive {
- pub map: HashMap,
+ map: HashMap,
+}
+
+/// Metadata for a file in an archive. Time values are UNIX timestamps.
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+pub struct Metadata {
+ /// When the file was (last) uploaded to the network.
+ pub uploaded: u64,
+ /// File creation time on local file system. See [`std::fs::Metadata::created`] for details per OS.
+ pub created: u64,
+ /// Last file modification time taken from local file system. See [`std::fs::Metadata::modified`] for details per OS.
+ pub modified: u64,
+}
+
+impl Metadata {
+ /// Create a new metadata struct
+ pub fn new() -> Self {
+ let now = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .unwrap_or(Duration::from_secs(0))
+ .as_secs();
+ Self {
+ uploaded: now,
+ created: now,
+ modified: now,
+ }
+ }
+}
+
+impl Default for Metadata {
+ fn default() -> Self {
+ Self::new()
+ }
}
impl Archive {
+ /// Create a new emtpy local archive
+ /// Note that this does not upload the archive to the network
+ pub fn new() -> Self {
+ Self {
+ map: HashMap::new(),
+ }
+ }
+
+ /// Rename a file in an archive
+ /// Note that this does not upload the archive to the network
+ pub fn rename_file(&mut self, old_path: &Path, new_path: &Path) -> Result<(), RenameError> {
+ let (data_addr, mut meta) = self
+ .map
+ .remove(old_path)
+ .ok_or(RenameError::FileNotFound(old_path.to_path_buf()))?;
+ let now = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .unwrap_or(Duration::from_secs(0))
+ .as_secs();
+ meta.modified = now;
+ self.map.insert(new_path.to_path_buf(), (data_addr, meta));
+ Ok(())
+ }
+
+ /// Add a file to a local archive
+ /// Note that this does not upload the archive to the network
+ pub fn add_file(&mut self, path: PathBuf, data_addr: DataAddr, meta: Metadata) {
+ self.map.insert(path, (data_addr, meta));
+ }
+
+ /// Add a file to a local archive, with default metadata
+ /// Note that this does not upload the archive to the network
+ pub fn add_new_file(&mut self, path: PathBuf, data_addr: DataAddr) {
+ self.map.insert(path, (data_addr, Metadata::new()));
+ }
+
+ /// List all files in the archive
+ pub fn files(&self) -> Vec<(PathBuf, Metadata)> {
+ self.map
+ .iter()
+ .map(|(path, (_, meta))| (path.clone(), meta.clone()))
+ .collect()
+ }
+
+ /// List all data addresses of the files in the archive
+ pub fn addresses(&self) -> Vec {
+ self.map.values().map(|(addr, _)| *addr).collect()
+ }
+
+ /// Iterate over the archive items
+ /// Returns an iterator over (PathBuf, DataAddr, Metadata)
+ pub fn iter(&self) -> impl Iterator- {
+ self.map
+ .iter()
+ .map(|(path, (addr, meta))| (path, addr, meta))
+ }
+
+ /// Get the underlying map
+ pub fn map(&self) -> &HashMap {
+ &self.map
+ }
+
/// Deserialize from bytes.
pub fn from_bytes(data: Bytes) -> Result {
let root: Archive = rmp_serde::from_slice(&data[..])?;
@@ -63,4 +170,12 @@ impl Client {
.map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?;
self.data_put(bytes, wallet).await
}
+
+ /// Get the cost to upload an archive
+ pub async fn archive_cost(&self, archive: Archive) -> Result {
+ let bytes = archive
+ .into_bytes()
+ .map_err(|e| CostError::Serialization(format!("Failed to serialize archive: {e:?}")))?;
+ self.data_cost(bytes).await
+ }
}
diff --git a/autonomi/src/client/archive_private.rs b/autonomi/src/client/archive_private.rs
new file mode 100644
index 0000000000..a7ba854380
--- /dev/null
+++ b/autonomi/src/client/archive_private.rs
@@ -0,0 +1,140 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use std::{
+ collections::HashMap,
+ path::{Path, PathBuf},
+};
+
+use sn_networking::target_arch::{Duration, SystemTime, UNIX_EPOCH};
+
+use super::{
+ archive::{Metadata, RenameError},
+ data::{GetError, PutError},
+ data_private::PrivateDataAccess,
+ Client,
+};
+use bytes::Bytes;
+use serde::{Deserialize, Serialize};
+use sn_evm::EvmWallet;
+
+/// The address of a private archive
+/// Contains the [`PrivateDataAccess`] leading to the [`PrivateArchive`] data
+pub type PrivateArchiveAccess = PrivateDataAccess;
+
+/// A private archive of files that containing file paths, their metadata and the files data maps
+/// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address.
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
+pub struct PrivateArchive {
+ map: HashMap,
+}
+
+impl PrivateArchive {
+ /// Create a new emtpy local archive
+ /// Note that this does not upload the archive to the network
+ pub fn new() -> Self {
+ Self {
+ map: HashMap::new(),
+ }
+ }
+
+ /// Rename a file in an archive
+ /// Note that this does not upload the archive to the network
+ pub fn rename_file(&mut self, old_path: &Path, new_path: &Path) -> Result<(), RenameError> {
+ let (data_addr, mut meta) = self
+ .map
+ .remove(old_path)
+ .ok_or(RenameError::FileNotFound(old_path.to_path_buf()))?;
+ let now = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .unwrap_or(Duration::from_secs(0))
+ .as_secs();
+ meta.modified = now;
+ self.map.insert(new_path.to_path_buf(), (data_addr, meta));
+ Ok(())
+ }
+
+ /// Add a file to a local archive
+ /// Note that this does not upload the archive to the network
+ pub fn add_file(&mut self, path: PathBuf, data_map: PrivateDataAccess, meta: Metadata) {
+ self.map.insert(path, (data_map, meta));
+ }
+
+ /// Add a file to a local archive, with default metadata
+ /// Note that this does not upload the archive to the network
+ pub fn add_new_file(&mut self, path: PathBuf, data_map: PrivateDataAccess) {
+ self.map.insert(path, (data_map, Metadata::new()));
+ }
+
+ /// List all files in the archive
+ pub fn files(&self) -> Vec<(PathBuf, Metadata)> {
+ self.map
+ .iter()
+ .map(|(path, (_, meta))| (path.clone(), meta.clone()))
+ .collect()
+ }
+
+ /// List all data addresses of the files in the archive
+ pub fn addresses(&self) -> Vec {
+ self.map
+ .values()
+ .map(|(data_map, _)| data_map.clone())
+ .collect()
+ }
+
+ /// Iterate over the archive items
+ /// Returns an iterator over (PathBuf, SecretDataMap, Metadata)
+ pub fn iter(&self) -> impl Iterator
- {
+ self.map
+ .iter()
+ .map(|(path, (data_map, meta))| (path, data_map, meta))
+ }
+
+ /// Get the underlying map
+ pub fn map(&self) -> &HashMap {
+ &self.map
+ }
+
+ /// Deserialize from bytes.
+ pub fn from_bytes(data: Bytes) -> Result {
+ let root: PrivateArchive = rmp_serde::from_slice(&data[..])?;
+
+ Ok(root)
+ }
+
+ /// Serialize to bytes.
+ pub fn into_bytes(&self) -> Result {
+ let root_serialized = rmp_serde::to_vec(&self)?;
+ let root_serialized = Bytes::from(root_serialized);
+
+ Ok(root_serialized)
+ }
+}
+
+impl Client {
+ /// Fetch a private archive from the network
+ pub async fn private_archive_get(
+ &self,
+ addr: PrivateArchiveAccess,
+ ) -> Result {
+ let data = self.private_data_get(addr).await?;
+ Ok(PrivateArchive::from_bytes(data)?)
+ }
+
+ /// Upload a private archive to the network
+ pub async fn private_archive_put(
+ &self,
+ archive: PrivateArchive,
+ wallet: &EvmWallet,
+ ) -> Result {
+ let bytes = archive
+ .into_bytes()
+ .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?;
+ self.private_data_put(bytes, wallet).await
+ }
+}
diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs
index 055016f291..0a6be8598a 100644
--- a/autonomi/src/client/data.rs
+++ b/autonomi/src/client/data.rs
@@ -8,11 +8,12 @@
use bytes::Bytes;
use libp2p::kad::Quorum;
-use tokio::task::JoinError;
use std::collections::HashSet;
+use std::sync::LazyLock;
use xor_name::XorName;
+use crate::client::utils::process_tasks_with_max_concurrency;
use crate::client::{ClientEvent, UploadSummary};
use crate::{self_encryption::encrypt, Client};
use sn_evm::{Amount, AttoTokens};
@@ -23,6 +24,22 @@ use sn_protocol::{
NetworkAddress,
};
+/// Number of chunks to upload in parallel.
+/// Can be overridden by the `CHUNK_UPLOAD_BATCH_SIZE` environment variable.
+pub static CHUNK_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| {
+ let batch_size = std::env::var("CHUNK_UPLOAD_BATCH_SIZE")
+ .ok()
+ .and_then(|s| s.parse().ok())
+ .unwrap_or(
+ std::thread::available_parallelism()
+ .map(|n| n.get())
+ .unwrap_or(1)
+ * 8,
+ );
+ info!("Chunk upload batch size: {}", batch_size);
+ batch_size
+});
+
/// Raw Data Address (points to a DataMap)
pub type DataAddr = XorName;
/// Raw Chunk Address (points to a [`Chunk`])
@@ -33,31 +50,31 @@ pub type ChunkAddr = XorName;
pub enum PutError {
#[error("Failed to self-encrypt data.")]
SelfEncryption(#[from] crate::self_encryption::Error),
- #[error("Error getting Vault XorName data.")]
- VaultXorName,
#[error("A network error occurred.")]
Network(#[from] NetworkError),
+ #[error("Error occurred during cost estimation.")]
+ CostError(#[from] CostError),
#[error("Error occurred during payment.")]
PayError(#[from] PayError),
- #[error("Failed to serialize {0}")]
+ #[error("Serialization error: {0}")]
Serialization(String),
#[error("A wallet error occurred.")]
Wallet(#[from] sn_evm::EvmError),
+ #[error("The vault owner key does not match the client's public key")]
+ VaultBadOwner,
+ #[error("Payment unexpectedly invalid for {0:?}")]
+ PaymentUnexpectedlyInvalid(NetworkAddress),
}
/// Errors that can occur during the pay operation.
#[derive(Debug, thiserror::Error)]
pub enum PayError {
- #[error("Could not get store quote for: {0:?} after several retries")]
- CouldNotGetStoreQuote(XorName),
- #[error("Could not get store costs: {0:?}")]
- CouldNotGetStoreCosts(NetworkError),
- #[error("Could not simultaneously fetch store costs: {0:?}")]
- JoinError(JoinError),
#[error("Wallet error: {0:?}")]
EvmWalletError(#[from] EvmWalletError),
#[error("Failed to self-encrypt data.")]
SelfEncryption(#[from] crate::self_encryption::Error),
+ #[error("Cost error: {0:?}")]
+ Cost(#[from] CostError),
}
/// Errors that can occur during the get operation.
@@ -75,6 +92,19 @@ pub enum GetError {
Protocol(#[from] sn_protocol::Error),
}
+/// Errors that can occur during the cost calculation.
+#[derive(Debug, thiserror::Error)]
+pub enum CostError {
+ #[error("Failed to self-encrypt data.")]
+ SelfEncryption(#[from] crate::self_encryption::Error),
+ #[error("Could not get store quote for: {0:?} after several retries")]
+ CouldNotGetStoreQuote(XorName),
+ #[error("Could not get store costs: {0:?}")]
+ CouldNotGetStoreCosts(NetworkError),
+ #[error("Failed to serialize {0}")]
+ Serialization(String),
+}
+
impl Client {
/// Fetch a blob of data from the network
pub async fn data_get(&self, addr: DataAddr) -> Result {
@@ -87,17 +117,15 @@ impl Client {
Ok(data)
}
- /// Upload a piece of data to the network. This data will be self-encrypted.
+ /// Upload a piece of data to the network.
/// Returns the Data Address at which the data was stored.
+ /// This data is publicly accessible.
pub async fn data_put(&self, data: Bytes, wallet: &EvmWallet) -> Result {
let now = sn_networking::target_arch::Instant::now();
let (data_map_chunk, chunks) = encrypt(data)?;
- info!(
- "Uploading datamap chunk to the network at: {:?}",
- data_map_chunk.address()
- );
-
+ let data_map_addr = data_map_chunk.address();
debug!("Encryption took: {:.2?}", now.elapsed());
+ info!("Uploading datamap chunk to the network at: {data_map_addr:?}");
let map_xor_name = *data_map_chunk.address().xorname();
let mut xor_names = vec![map_xor_name];
@@ -113,29 +141,39 @@ impl Client {
.await
.inspect_err(|err| error!("Error paying for data: {err:?}"))?;
- let mut record_count = 0;
-
- // Upload data map
- if let Some(proof) = payment_proofs.get(&map_xor_name) {
- debug!("Uploading data map chunk: {map_xor_name:?}");
- self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone())
- .await
- .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?;
- record_count += 1;
- }
-
- // Upload the rest of the chunks
+ // Upload all the chunks in parallel including the data map chunk
debug!("Uploading {} chunks", chunks.len());
- for chunk in chunks {
+ let mut upload_tasks = vec![];
+ for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) {
+ let self_clone = self.clone();
+ let address = *chunk.address();
if let Some(proof) = payment_proofs.get(chunk.name()) {
- let address = *chunk.address();
- self.chunk_upload_with_payment(chunk, proof.clone())
- .await
- .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?;
- record_count += 1;
+ let proof_clone = proof.clone();
+ upload_tasks.push(async move {
+ self_clone
+ .chunk_upload_with_payment(chunk, proof_clone)
+ .await
+ .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))
+ });
+ } else {
+ debug!("Chunk at {address:?} was already paid for so skipping");
}
}
-
+ let uploads =
+ process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE).await;
+
+ // Check for errors
+ let total_uploads = uploads.len();
+ let ok_uploads = uploads
+ .iter()
+ .filter_map(|up| up.is_ok().then_some(()))
+ .count();
+ info!("Uploaded {} chunks out of {}", ok_uploads, total_uploads);
+ let uploads: Result, _> = uploads.into_iter().collect();
+ uploads.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?;
+ let record_count = ok_uploads;
+
+ // Reporting
if let Some(channel) = self.client_event_sender.as_ref() {
let tokens_spent = payment_proofs
.values()
@@ -184,7 +222,7 @@ impl Client {
}
/// Get the estimated cost of storing a piece of data.
- pub async fn data_cost(&self, data: Bytes) -> Result {
+ pub async fn data_cost(&self, data: Bytes) -> Result {
let now = sn_networking::target_arch::Instant::now();
let (data_map_chunk, chunks) = encrypt(data)?;
diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs
new file mode 100644
index 0000000000..d2ecaf0a2b
--- /dev/null
+++ b/autonomi/src/client/data_private.rs
@@ -0,0 +1,144 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use std::hash::{DefaultHasher, Hash, Hasher};
+
+use bytes::Bytes;
+use serde::{Deserialize, Serialize};
+use sn_evm::{Amount, EvmWallet};
+use sn_protocol::storage::Chunk;
+
+use super::data::CHUNK_UPLOAD_BATCH_SIZE;
+use super::data::{GetError, PutError};
+use crate::client::utils::process_tasks_with_max_concurrency;
+use crate::client::{ClientEvent, UploadSummary};
+use crate::{self_encryption::encrypt, Client};
+
+/// Private data on the network can be accessed with this
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct PrivateDataAccess(Chunk);
+
+impl PrivateDataAccess {
+ pub fn to_hex(&self) -> String {
+ hex::encode(self.0.value())
+ }
+
+ pub fn from_hex(hex: &str) -> Result {
+ let data = hex::decode(hex)?;
+ Ok(Self(Chunk::new(Bytes::from(data))))
+ }
+
+ /// Get a private address for [`PrivateDataAccess`]. Note that this is not a network address, it is only used for refering to private data client side.
+ pub fn address(&self) -> String {
+ hash_to_short_string(&self.to_hex())
+ }
+}
+
+fn hash_to_short_string(input: &str) -> String {
+ let mut hasher = DefaultHasher::new();
+ input.hash(&mut hasher);
+ let hash_value = hasher.finish();
+ hash_value.to_string()
+}
+
+impl Client {
+ /// Fetch a blob of private data from the network
+ pub async fn private_data_get(&self, data_map: PrivateDataAccess) -> Result {
+ info!(
+ "Fetching private data from Data Map {:?}",
+ data_map.0.address()
+ );
+ let data = self.fetch_from_data_map_chunk(data_map.0.value()).await?;
+
+ Ok(data)
+ }
+
+ /// Upload a piece of private data to the network. This data will be self-encrypted.
+ /// Returns the [`PrivateDataAccess`] containing the map to the encrypted chunks.
+ /// This data is private and only accessible with the [`PrivateDataAccess`].
+ pub async fn private_data_put(
+ &self,
+ data: Bytes,
+ wallet: &EvmWallet,
+ ) -> Result {
+ let now = sn_networking::target_arch::Instant::now();
+ let (data_map_chunk, chunks) = encrypt(data)?;
+ debug!("Encryption took: {:.2?}", now.elapsed());
+
+ // Pay for all chunks
+ let xor_names: Vec<_> = chunks.iter().map(|chunk| *chunk.name()).collect();
+ info!("Paying for {} addresses", xor_names.len());
+ let (payment_proofs, _free_chunks) = self
+ .pay(xor_names.into_iter(), wallet)
+ .await
+ .inspect_err(|err| error!("Error paying for data: {err:?}"))?;
+
+ // Upload the chunks with the payments
+ debug!("Uploading {} chunks", chunks.len());
+ let mut upload_tasks = vec![];
+ for chunk in chunks {
+ let self_clone = self.clone();
+ let address = *chunk.address();
+ if let Some(proof) = payment_proofs.get(chunk.name()) {
+ let proof_clone = proof.clone();
+ upload_tasks.push(async move {
+ self_clone
+ .chunk_upload_with_payment(chunk, proof_clone)
+ .await
+ .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))
+ });
+ } else {
+ debug!("Chunk at {address:?} was already paid for so skipping");
+ }
+ }
+ let uploads =
+ process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE).await;
+
+ // Check for errors
+ let total_uploads = uploads.len();
+ let ok_uploads = uploads
+ .iter()
+ .filter_map(|up| up.is_ok().then_some(()))
+ .count();
+ info!("Uploaded {} chunks out of {}", ok_uploads, total_uploads);
+ let uploads: Result, _> = uploads.into_iter().collect();
+ uploads.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?;
+ let record_count = ok_uploads;
+
+ // Reporting
+ if let Some(channel) = self.client_event_sender.as_ref() {
+ let tokens_spent = payment_proofs
+ .values()
+ .map(|proof| proof.quote.cost.as_atto())
+ .sum::();
+
+ let summary = UploadSummary {
+ record_count,
+ tokens_spent,
+ };
+ if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await {
+ error!("Failed to send client event: {err:?}");
+ }
+ }
+
+ Ok(PrivateDataAccess(data_map_chunk))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_hex() {
+ let data_map = PrivateDataAccess(Chunk::new(Bytes::from_static(b"hello")));
+ let hex = data_map.to_hex();
+ let data_map2 = PrivateDataAccess::from_hex(&hex).expect("Failed to decode hex");
+ assert_eq!(data_map, data_map2);
+ }
+}
diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs
new file mode 100644
index 0000000000..b17002bd9c
--- /dev/null
+++ b/autonomi/src/client/external_signer.rs
@@ -0,0 +1,108 @@
+use crate::client::data::{DataAddr, PutError};
+use crate::client::utils::extract_quote_payments;
+use crate::self_encryption::encrypt;
+use crate::Client;
+use bytes::Bytes;
+use sn_evm::{PaymentQuote, ProofOfPayment, QuotePayment};
+use sn_protocol::storage::Chunk;
+use std::collections::HashMap;
+use xor_name::XorName;
+
+#[allow(unused_imports)]
+pub use sn_evm::external_signer::*;
+
+impl Client {
+ /// Upload a piece of data to the network. This data will be self-encrypted.
+ /// Payment will not be done automatically as opposed to the regular `data_put`, so the proof of payment has to be provided.
+ /// Returns the Data Address at which the data was stored.
+ pub async fn data_put_with_proof_of_payment(
+ &self,
+ data: Bytes,
+ proof: HashMap,
+ ) -> Result {
+ let (data_map_chunk, chunks, _) = encrypt_data(data)?;
+ self.upload_data_map(&proof, &data_map_chunk).await?;
+ self.upload_chunks(&chunks, &proof).await?;
+ Ok(*data_map_chunk.address().xorname())
+ }
+
+ /// Get quotes for data.
+ /// Returns a cost map, data payments to be executed and a list of free (already paid for) chunks.
+ pub async fn get_quotes_for_data(
+ &self,
+ data: Bytes,
+ ) -> Result<
+ (
+ HashMap,
+ Vec,
+ Vec,
+ ),
+ PutError,
+ > {
+ // Encrypt the data as chunks
+ let (_data_map_chunk, _chunks, xor_names) = encrypt_data(data)?;
+
+ let cost_map: HashMap = self
+ .get_store_quotes(xor_names.into_iter())
+ .await?
+ .into_iter()
+ .map(|(name, (_, _, q))| (name, q))
+ .collect();
+
+ let (quote_payments, free_chunks) = extract_quote_payments(&cost_map);
+ Ok((cost_map, quote_payments, free_chunks))
+ }
+
+ async fn upload_data_map(
+ &self,
+ payment_proofs: &HashMap,
+ data_map_chunk: &Chunk,
+ ) -> Result<(), PutError> {
+ let map_xor_name = data_map_chunk.name();
+
+ if let Some(proof) = payment_proofs.get(map_xor_name) {
+ debug!("Uploading data map chunk: {map_xor_name:?}");
+ self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone())
+ .await
+ .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))
+ } else {
+ Ok(())
+ }
+ }
+
+ async fn upload_chunks(
+ &self,
+ chunks: &[Chunk],
+ payment_proofs: &HashMap,
+ ) -> Result<(), PutError> {
+ debug!("Uploading {} chunks", chunks.len());
+ for chunk in chunks {
+ if let Some(proof) = payment_proofs.get(chunk.name()) {
+ let address = *chunk.address();
+ self.chunk_upload_with_payment(chunk.clone(), proof.clone())
+ .await
+ .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?;
+ }
+ }
+ Ok(())
+ }
+}
+
+/// Encrypts data as chunks.
+///
+/// Returns the data map chunk, file chunks and a list of all content addresses including the data map.
+fn encrypt_data(data: Bytes) -> Result<(Chunk, Vec, Vec), PutError> {
+ let now = sn_networking::target_arch::Instant::now();
+ let result = encrypt(data)?;
+
+ debug!("Encryption took: {:.2?}", now.elapsed());
+
+ let map_xor_name = *result.0.address().xorname();
+ let mut xor_names = vec![map_xor_name];
+
+ for chunk in &result.1 {
+ xor_names.push(*chunk.name());
+ }
+
+ Ok((result.0, result.1, xor_names))
+}
diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs
index 8fff06324c..c1505224bc 100644
--- a/autonomi/src/client/fs.rs
+++ b/autonomi/src/client/fs.rs
@@ -6,15 +6,35 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
+use crate::client::archive::Metadata;
+use crate::client::data::CostError;
+use crate::client::utils::process_tasks_with_max_concurrency;
use crate::client::Client;
use bytes::Bytes;
use sn_evm::EvmWallet;
-use std::collections::HashMap;
+use sn_networking::target_arch::{Duration, SystemTime};
use std::path::PathBuf;
+use std::sync::LazyLock;
use super::archive::{Archive, ArchiveAddr};
use super::data::{DataAddr, GetError, PutError};
+/// Number of files to upload in parallel.
+/// Can be overridden by the `FILE_UPLOAD_BATCH_SIZE` environment variable.
+pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| {
+ let batch_size = std::env::var("FILE_UPLOAD_BATCH_SIZE")
+ .ok()
+ .and_then(|s| s.parse().ok())
+ .unwrap_or(
+ std::thread::available_parallelism()
+ .map(|n| n.get())
+ .unwrap_or(1)
+ * 8,
+ );
+ info!("File upload batch size: {}", batch_size);
+ batch_size
+});
+
/// Errors that can occur during the file upload operation.
#[cfg(feature = "fs")]
#[derive(Debug, thiserror::Error)]
@@ -43,6 +63,22 @@ pub enum DownloadError {
IoError(#[from] std::io::Error),
}
+#[cfg(feature = "fs")]
+/// Errors that can occur during the file cost calculation.
+#[derive(Debug, thiserror::Error)]
+pub enum FileCostError {
+ #[error("Cost error: {0}")]
+ Cost(#[from] CostError),
+ #[error("IO failure")]
+ IoError(#[from] std::io::Error),
+ #[error("Serialization error")]
+ Serialization(#[from] rmp_serde::encode::Error),
+ #[error("Self encryption error")]
+ SelfEncryption(#[from] crate::self_encryption::Error),
+ #[error("Walkdir error")]
+ WalkDir(#[from] walkdir::Error),
+}
+
impl Client {
/// Download file from network to local file system
pub async fn file_download(
@@ -65,8 +101,8 @@ impl Client {
to_dest: PathBuf,
) -> Result<(), DownloadError> {
let archive = self.archive_get(archive_addr).await?;
- for (path, addr) in archive.map {
- self.file_download(addr, to_dest.join(path)).await?;
+ for (path, addr, _meta) in archive.iter() {
+ self.file_download(*addr, to_dest.join(path)).await?;
}
Ok(())
}
@@ -78,29 +114,51 @@ impl Client {
dir_path: PathBuf,
wallet: &EvmWallet,
) -> Result {
- let mut map = HashMap::new();
+ info!("Uploading directory: {dir_path:?}");
+ let start = tokio::time::Instant::now();
+ // start upload of files in parallel
+ let mut upload_tasks = Vec::new();
for entry in walkdir::WalkDir::new(dir_path) {
let entry = entry?;
-
if !entry.file_type().is_file() {
continue;
}
+ let metadata = metadata_from_entry(&entry);
let path = entry.path().to_path_buf();
- tracing::info!("Uploading file: {path:?}");
- #[cfg(feature = "loud")]
- println!("Uploading file: {path:?}");
- let file = self.file_upload(path.clone(), wallet).await?;
+ upload_tasks.push(async move {
+ let file = self.file_upload(path.clone(), wallet).await;
+ (path, metadata, file)
+ });
+ }
- map.insert(path, file);
+ // wait for all files to be uploaded
+ let uploads =
+ process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await;
+ info!(
+ "Upload of {} files completed in {:?}",
+ uploads.len(),
+ start.elapsed()
+ );
+ let mut archive = Archive::new();
+ for (path, metadata, maybe_file) in uploads.into_iter() {
+ match maybe_file {
+ Ok(file) => archive.add_file(path, file, metadata),
+ Err(err) => {
+ error!("Failed to upload file: {path:?}: {err:?}");
+ return Err(err);
+ }
+ }
}
- let archive = Archive { map };
+ // upload archive
let archive_serialized = archive.into_bytes()?;
-
let arch_addr = self.data_put(archive_serialized, wallet).await?;
+ info!("Complete archive upload completed in {:?}", start.elapsed());
+ #[cfg(feature = "loud")]
+ println!("Upload completed in {:?}", start.elapsed());
Ok(arch_addr)
}
@@ -111,6 +169,10 @@ impl Client {
path: PathBuf,
wallet: &EvmWallet,
) -> Result {
+ info!("Uploading file: {path:?}");
+ #[cfg(feature = "loud")]
+ println!("Uploading file: {path:?}");
+
let data = tokio::fs::read(path).await?;
let data = Bytes::from(data);
let addr = self.data_put(data, wallet).await?;
@@ -119,8 +181,8 @@ impl Client {
/// Get the cost to upload a file/dir to the network.
/// quick and dirty implementation, please refactor once files are cleanly implemented
- pub async fn file_cost(&self, path: &PathBuf) -> Result {
- let mut map = HashMap::new();
+ pub async fn file_cost(&self, path: &PathBuf) -> Result {
+ let mut archive = Archive::new();
let mut total_cost = sn_evm::Amount::ZERO;
for entry in walkdir::WalkDir::new(path) {
@@ -135,29 +197,74 @@ impl Client {
let data = tokio::fs::read(&path).await?;
let file_bytes = Bytes::from(data);
- let file_cost = self.data_cost(file_bytes.clone()).await.expect("TODO");
+ let file_cost = self.data_cost(file_bytes.clone()).await?;
total_cost += file_cost.as_atto();
// re-do encryption to get the correct map xorname here
// this code needs refactor
let now = sn_networking::target_arch::Instant::now();
- let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes).expect("TODO");
+ let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes)?;
tracing::debug!("Encryption took: {:.2?}", now.elapsed());
let map_xor_name = *data_map_chunk.address().xorname();
- map.insert(path, map_xor_name);
+ archive.add_file(path, map_xor_name, Metadata::new());
}
- let root = Archive { map };
- let root_serialized = rmp_serde::to_vec(&root).expect("TODO");
+ let root_serialized = rmp_serde::to_vec(&archive)?;
- let archive_cost = self
- .data_cost(Bytes::from(root_serialized))
- .await
- .expect("TODO");
+ let archive_cost = self.data_cost(Bytes::from(root_serialized)).await?;
total_cost += archive_cost.as_atto();
Ok(total_cost.into())
}
}
+
+// Get metadata from directory entry. Defaults to `0` for creation and modification times if
+// any error is encountered. Logs errors upon error.
+pub(crate) fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata {
+ let fs_metadata = match entry.metadata() {
+ Ok(metadata) => metadata,
+ Err(err) => {
+ tracing::warn!(
+ "Failed to get metadata for `{}`: {err}",
+ entry.path().display()
+ );
+ return Metadata {
+ uploaded: 0,
+ created: 0,
+ modified: 0,
+ };
+ }
+ };
+
+ let unix_time = |property: &'static str, time: std::io::Result| {
+ time.inspect_err(|err| {
+ tracing::warn!(
+ "Failed to get '{property}' metadata for `{}`: {err}",
+ entry.path().display()
+ );
+ })
+ .unwrap_or(SystemTime::UNIX_EPOCH)
+ .duration_since(SystemTime::UNIX_EPOCH)
+ .inspect_err(|err| {
+ tracing::warn!(
+ "'{property}' metadata of `{}` is before UNIX epoch: {err}",
+ entry.path().display()
+ );
+ })
+ .unwrap_or(Duration::from_secs(0))
+ .as_secs()
+ };
+ let created = unix_time("created", fs_metadata.created());
+ let modified = unix_time("modified", fs_metadata.modified());
+
+ Metadata {
+ uploaded: SystemTime::now()
+ .duration_since(SystemTime::UNIX_EPOCH)
+ .unwrap_or(Duration::from_secs(0))
+ .as_secs(),
+ created,
+ modified,
+ }
+}
diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs
new file mode 100644
index 0000000000..08d453ae37
--- /dev/null
+++ b/autonomi/src/client/fs_private.rs
@@ -0,0 +1,132 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+use crate::client::utils::process_tasks_with_max_concurrency;
+use crate::client::Client;
+use bytes::Bytes;
+use sn_evm::EvmWallet;
+use std::path::PathBuf;
+
+use super::archive_private::{PrivateArchive, PrivateArchiveAccess};
+use super::data_private::PrivateDataAccess;
+use super::fs::{DownloadError, UploadError};
+
+use super::fs::FILE_UPLOAD_BATCH_SIZE;
+
+impl Client {
+ /// Download a private file from network to local file system
+ pub async fn private_file_download(
+ &self,
+ data_access: PrivateDataAccess,
+ to_dest: PathBuf,
+ ) -> Result<(), DownloadError> {
+ let data = self.private_data_get(data_access).await?;
+ if let Some(parent) = to_dest.parent() {
+ tokio::fs::create_dir_all(parent).await?;
+ }
+ tokio::fs::write(to_dest, data).await?;
+ Ok(())
+ }
+
+ /// Download a private directory from network to local file system
+ pub async fn private_dir_download(
+ &self,
+ archive_access: PrivateArchiveAccess,
+ to_dest: PathBuf,
+ ) -> Result<(), DownloadError> {
+ let archive = self.private_archive_get(archive_access).await?;
+ for (path, addr, _meta) in archive.iter() {
+ self.private_file_download(addr.clone(), to_dest.join(path))
+ .await?;
+ }
+ Ok(())
+ }
+
+ /// Upload a private directory to the network. The directory is recursively walked.
+ /// Reads all files, splits into chunks, uploads chunks, uploads private archive, returns [`PrivateArchiveAccess`] (pointing to the private archive)
+ pub async fn private_dir_upload(
+ &self,
+ dir_path: PathBuf,
+ wallet: &EvmWallet,
+ ) -> Result {
+ info!("Uploading directory as private: {dir_path:?}");
+ let start = tokio::time::Instant::now();
+
+ // start upload of file in parallel
+ let mut upload_tasks = Vec::new();
+ for entry in walkdir::WalkDir::new(dir_path) {
+ let entry = entry?;
+ if !entry.file_type().is_file() {
+ continue;
+ }
+
+ let metadata = super::fs::metadata_from_entry(&entry);
+ let path = entry.path().to_path_buf();
+ upload_tasks.push(async move {
+ let file = self.private_file_upload(path.clone(), wallet).await;
+ (path, metadata, file)
+ });
+ }
+
+ // wait for all files to be uploaded
+ let uploads =
+ process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await;
+ info!(
+ "Upload of {} files completed in {:?}",
+ uploads.len(),
+ start.elapsed()
+ );
+ let mut archive = PrivateArchive::new();
+ for (path, metadata, maybe_file) in uploads.into_iter() {
+ match maybe_file {
+ Ok(file) => archive.add_file(path, file, metadata),
+ Err(err) => {
+ error!("Failed to upload file: {path:?}: {err:?}");
+ return Err(err);
+ }
+ }
+ }
+
+ // upload archive
+ let archive_serialized = archive.into_bytes()?;
+ let arch_addr = self.private_data_put(archive_serialized, wallet).await?;
+
+ info!(
+ "Complete private archive upload completed in {:?}",
+ start.elapsed()
+ );
+ #[cfg(feature = "loud")]
+ println!("Upload completed in {:?}", start.elapsed());
+ Ok(arch_addr)
+ }
+
+ /// Upload a private file to the network.
+ /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns [`PrivateDataAccess`] (pointing to the datamap)
+ async fn private_file_upload(
+ &self,
+ path: PathBuf,
+ wallet: &EvmWallet,
+ ) -> Result {
+ info!("Uploading file: {path:?}");
+ #[cfg(feature = "loud")]
+ println!("Uploading file: {path:?}");
+
+ let data = tokio::fs::read(path).await?;
+ let data = Bytes::from(data);
+ let addr = self.private_data_put(data, wallet).await?;
+ Ok(addr)
+ }
+}
diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs
index f19216fe84..0938dcbf9d 100644
--- a/autonomi/src/client/mod.rs
+++ b/autonomi/src/client/mod.rs
@@ -11,9 +11,17 @@ pub mod address;
#[cfg(feature = "data")]
pub mod archive;
#[cfg(feature = "data")]
+pub mod archive_private;
+#[cfg(feature = "data")]
pub mod data;
+#[cfg(feature = "data")]
+pub mod data_private;
+#[cfg(feature = "external-signer")]
+pub mod external_signer;
#[cfg(feature = "fs")]
pub mod fs;
+#[cfg(feature = "fs")]
+pub mod fs_private;
#[cfg(feature = "registers")]
pub mod registers;
#[cfg(feature = "vault")]
@@ -34,7 +42,7 @@ use std::{collections::HashSet, sync::Arc, time::Duration};
use tokio::sync::mpsc;
/// Time before considering the connection timed out.
-pub const CONNECT_TIMEOUT_SECS: u64 = 20;
+pub const CONNECT_TIMEOUT_SECS: u64 = 10;
const CLIENT_EVENT_CHANNEL_SIZE: usize = 100;
@@ -197,11 +205,13 @@ async fn handle_event_receiver(
}
/// Events that can be broadcasted by the client.
+#[derive(Debug, Clone)]
pub enum ClientEvent {
UploadComplete(UploadSummary),
}
/// Summary of an upload operation.
+#[derive(Debug, Clone)]
pub struct UploadSummary {
pub record_count: usize,
pub tokens_spent: Amount,
diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs
index fb3c55fa6c..52f8944e1e 100644
--- a/autonomi/src/client/registers.rs
+++ b/autonomi/src/client/registers.rs
@@ -32,8 +32,12 @@ use sn_registers::{Permissions, RegisterCrdt, RegisterOp, SignedRegister};
use std::collections::BTreeSet;
use xor_name::XorName;
+use super::data::CostError;
+
#[derive(Debug, thiserror::Error)]
pub enum RegisterError {
+ #[error("Cost error: {0}")]
+ Cost(#[from] CostError),
#[error("Network error")]
Network(#[from] NetworkError),
#[error("Serialization error")]
@@ -143,23 +147,11 @@ impl Client {
try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?;
signed_reg
}
- // manage forked register case
Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => {
- debug!("Forked register detected for {address:?} merging forks");
- let mut registers: Vec = vec![];
- for (_, (record, _)) in result_map {
- registers.push(
- try_deserialize_record(&record)
- .map_err(|_| RegisterError::Serialization)?,
- );
- }
- let register = registers.iter().fold(registers[0].clone(), |mut acc, x| {
- if let Err(e) = acc.merge(x) {
- warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address());
- }
- acc
- });
- register
+ error!("Got split record error for register at address: {address}. This should've been handled at the network layer");
+ Err(RegisterError::Network(NetworkError::GetRecordError(
+ GetRecordError::SplitRecord { result_map },
+ )))?
}
Err(e) => {
error!("Failed to get register {address:?} from network: {e}");
diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs
index 0714f60d9d..95d70b6e4d 100644
--- a/autonomi/src/client/utils.rs
+++ b/autonomi/src/client/utils.rs
@@ -6,16 +6,12 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
-use std::{
- collections::{BTreeMap, HashMap},
- num::NonZero,
-};
-
use bytes::Bytes;
+use futures::stream::{FuturesUnordered, StreamExt};
use libp2p::kad::{Quorum, Record};
use rand::{thread_rng, Rng};
use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk};
-use sn_evm::{EvmWallet, ProofOfPayment, QuoteHash, QuotePayment, TxHash};
+use sn_evm::{EvmWallet, PaymentQuote, ProofOfPayment, QuotePayment};
use sn_networking::{
GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind,
};
@@ -24,14 +20,15 @@ use sn_protocol::{
storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy},
NetworkAddress,
};
+use std::{collections::HashMap, future::Future, num::NonZero};
use xor_name::XorName;
-use crate::self_encryption::DataMapLevel;
-
use super::{
- data::{GetError, PayError, PutError},
+ data::{CostError, GetError, PayError, PutError},
Client,
};
+use crate::self_encryption::DataMapLevel;
+use crate::utils::payment_proof_from_quotes_and_payments;
impl Client {
/// Fetch and decrypt all chunks in the data map.
@@ -152,9 +149,20 @@ impl Client {
content_addrs: impl Iterator
- ,
wallet: &EvmWallet,
) -> Result<(HashMap, Vec), PayError> {
- let cost_map = self.get_store_quotes(content_addrs).await?;
+ let cost_map = self
+ .get_store_quotes(content_addrs)
+ .await?
+ .into_iter()
+ .map(|(name, (_, _, q))| (name, q))
+ .collect();
+
let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map);
+ // Make sure nobody else can use the wallet while we are paying
+ debug!("Waiting for wallet lock");
+ let lock_guard = wallet.lock().await;
+ debug!("Locked wallet");
+
// TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying.
// TODO: retry when it fails?
// Execute chunk payments
@@ -163,7 +171,11 @@ impl Client {
.await
.map_err(|err| PayError::from(err.0))?;
- let proofs = construct_proofs(&cost_map, &payments);
+ // payment is done, unlock the wallet for other threads
+ drop(lock_guard);
+ debug!("Unlocked wallet");
+
+ let proofs = payment_proof_from_quotes_and_payments(&cost_map, &payments);
trace!(
"Chunk payments of {} chunks completed. {} chunks were free / already paid for",
@@ -177,7 +189,7 @@ impl Client {
pub(crate) async fn get_store_quotes(
&self,
content_addrs: impl Iterator
- ,
- ) -> Result, PayError> {
+ ) -> Result, CostError> {
let futures: Vec<_> = content_addrs
.into_iter()
.map(|content_addr| fetch_store_quote_with_retries(&self.network, content_addr))
@@ -193,7 +205,7 @@ impl Client {
async fn fetch_store_quote_with_retries(
network: &Network,
content_addr: XorName,
-) -> Result<(XorName, PayeeQuote), PayError> {
+) -> Result<(XorName, PayeeQuote), CostError> {
let mut retries = 0;
loop {
@@ -209,7 +221,7 @@ async fn fetch_store_quote_with_retries(
error!(
"Error while fetching store quote: {err:?}, stopping after {retries} retries"
);
- break Err(PayError::CouldNotGetStoreQuote(content_addr));
+ break Err(CostError::CouldNotGetStoreQuote(content_addr));
}
}
}
@@ -229,44 +241,46 @@ async fn fetch_store_quote(
}
/// Form to be executed payments and already executed payments from a cost map.
-fn extract_quote_payments(
- cost_map: &HashMap,
+pub(crate) fn extract_quote_payments(
+ cost_map: &HashMap,
) -> (Vec, Vec) {
let mut to_be_paid = vec![];
let mut already_paid = vec![];
for (chunk_address, quote) in cost_map.iter() {
- if quote.2.cost.is_zero() {
+ if quote.cost.is_zero() {
already_paid.push(*chunk_address);
} else {
- to_be_paid.push((
- quote.2.hash(),
- quote.2.rewards_address,
- quote.2.cost.as_atto(),
- ));
+ to_be_paid.push((quote.hash(), quote.rewards_address, quote.cost.as_atto()));
}
}
(to_be_paid, already_paid)
}
-/// Construct payment proofs from cost map and payments map.
-fn construct_proofs(
- cost_map: &HashMap,
- payments: &BTreeMap,
-) -> HashMap {
- cost_map
- .iter()
- .filter_map(|(xor_name, (_, _, quote))| {
- payments.get("e.hash()).map(|tx_hash| {
- (
- *xor_name,
- ProofOfPayment {
- quote: quote.clone(),
- tx_hash: *tx_hash,
- },
- )
- })
- })
- .collect()
+pub(crate) async fn process_tasks_with_max_concurrency(tasks: I, batch_size: usize) -> Vec
+where
+ I: IntoIterator,
+ I::Item: Future