diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f33c274b5..b0e52e73f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Setup Build Env + - name: Install Build Tools run: sudo ./scripts/install-build-tools.sh - name: Setup Local Dependencies run: ./scripts/setup-dependencies.sh @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Setup Build Env + - name: Install Build Tools run: sudo ./scripts/install-build-tools.sh - name: Setup Local Dependencies run: ./scripts/setup-dependencies.sh @@ -50,7 +50,7 @@ jobs: name: Pylint runs-on: ubuntu-22.04 continue-on-error: true - timeout-minutes: 10 + timeout-minutes: 5 strategy: matrix: python-version: ["3.10"] @@ -62,10 +62,23 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Setup Build Env + - name: Install Build Tools run: sudo ./scripts/install-build-tools.sh - name: Lint with Pylint run: ./scripts/pylint.sh + shellcheck: + name: Shellcheck + runs-on: ubuntu-22.04 + continue-on-error: true + timeout-minutes: 5 + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Install Build Tools + run: sudo ./scripts/install-build-tools.sh + - name: Lint with Shellcheck + run: ./scripts/shellcheck.sh view unit-and-integration-test: name: Unit and Integration Tests runs-on: ubuntu-22.04 @@ -74,7 +87,7 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Setup Build Env + - name: Install Build Tools run: sudo ./scripts/install-build-tools.sh - name: Setup Local Dependencies run: ./scripts/setup-dependencies.sh @@ -84,7 +97,7 @@ jobs: run: ./scripts/test.sh - name: Shorten SHA id: vars - run: echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" + run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV - uses: actions/upload-artifact@v4 if: ${{ !env.ACT }} name: Archive Test Results @@ -114,4 +127,3 @@ jobs: name: OpenCBDC Transaction Processor docs for ${{ steps.vars.outputs.sha_short }} path: ./doxygen_generated/html/* retention-days: 7 - diff --git a/.gitignore b/.gitignore index ebb163ae7..fddc84dfe 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,7 @@ CMakeFiles/ plots/ .deps/ .libs/ +.cache/ # Database blocks.dat diff --git a/benchmarks/transactions.cpp b/benchmarks/transactions.cpp index 83aa6c5e7..0350d6f2a 100644 --- a/benchmarks/transactions.cpp +++ b/benchmarks/transactions.cpp @@ -41,21 +41,21 @@ void reset_wallets(cbdc::transaction::wallet& w1, /// @brief Time an N-in, 1-out transaction. /// @brief Note: handles benchmark timing, do not time outside function. /// @param sender -/// @param reciever +/// @param receiver /// @param n_in /// @param state /// @return inline bool generate_Nto1_tx(cbdc::transaction::wallet& sender, - cbdc::transaction::wallet& reciever, + cbdc::transaction::wallet& receiver, uint32_t n_in, benchmark::State& state) { std::optional maybe_tx{}; state.ResumeTiming(); - maybe_tx = sender.send_to(n_in * 2, reciever.generate_key(), true).value(); + maybe_tx = sender.send_to(n_in * 2, receiver.generate_key(), true).value(); state.PauseTiming(); if(maybe_tx.has_value()) { sender.confirm_transaction(*maybe_tx); - reciever.confirm_transaction(*maybe_tx); + receiver.confirm_transaction(*maybe_tx); return true; } return false; @@ -64,22 +64,22 @@ inline bool generate_Nto1_tx(cbdc::transaction::wallet& sender, /// @brief Time an N-in, 2-out transaction. /// @brief Note: handles benchmark timing, do not time outside function. /// @param sender -/// @param reciever +/// @param receiver /// @param n_in /// @param state /// @return inline bool generate_Nto2_tx(cbdc::transaction::wallet& sender, - cbdc::transaction::wallet& reciever, + cbdc::transaction::wallet& receiver, uint32_t n_in, benchmark::State& state) { std::optional maybe_tx{}; state.ResumeTiming(); maybe_tx - = sender.send_to(n_in * 2 - 1, reciever.generate_key(), true).value(); + = sender.send_to(n_in * 2 - 1, receiver.generate_key(), true).value(); state.PauseTiming(); if(maybe_tx.has_value()) { sender.confirm_transaction(*maybe_tx); - reciever.confirm_transaction(*maybe_tx); + receiver.confirm_transaction(*maybe_tx); return true; } return false; diff --git a/scripts/benchmarks.sh b/scripts/benchmarks.sh index 030f4f901..2aa8813f2 100755 --- a/scripts/benchmarks.sh +++ b/scripts/benchmarks.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Exit script on failure. set -e @@ -42,13 +42,13 @@ do -d|--build-dir) shift ARG="$1" - if [[ $ARG == "" || ${ARG:0:1} == "-" ]] + if [[ "${ARG}" == "" || ${ARG:0:1} == "-" ]] then echo -n "ERROR: The -d flag was used, " echo "but a valid build folder was not given." exit 1 fi - BUILD_DIR=$ARG + BUILD_DIR="${ARG}" shift ;; *) @@ -69,7 +69,7 @@ then BUILD_DIR="${REPO_TOP_DIR}/build" fi -if [[ ! -d "$BUILD_DIR" ]] +if [[ ! -d "${BUILD_DIR}" ]] then echo "ERROR: The folder '${BUILD_DIR}' was not found." exit 1 @@ -78,14 +78,15 @@ fi # If the build folder is a relative path, convert it to an absolute path # to avoid potential relative path errors and to improve readability # if the path is written to stdout. -export BUILD_DIR=$(cd "$BUILD_DIR"; pwd) +BUILD_DIR=$(cd "${BUILD_DIR}"; pwd) +export BUILD_DIR echo "Build folder: '${BUILD_DIR}'" echo run_test_suite () { - cd "$BUILD_DIR" + cd "${BUILD_DIR}" find . -name '*.gcda' -exec rm {} \; - "$PWD"/"$1" "${GTEST_FLAGS[@]}" + "${PWD}"/"$1" "${GTEST_FLAGS[@]}" } run_test_suite "benchmarks/run_benchmarks" diff --git a/scripts/build-docker.sh b/scripts/build-docker.sh index 68ec4b0d0..a8ac5c12d 100755 --- a/scripts/build-docker.sh +++ b/scripts/build-docker.sh @@ -12,7 +12,7 @@ DOCKER_IMAGE_TAG_TWOPHASE=${DOCKER_IMAGE_TAG:-opencbdc-tx-twophase} git submodule init && git submodule update # Build docker image -docker build --target base -t $DOCKER_IMAGE_TAG_BASE -f $SCRIPT_DIR/../Dockerfile $SCRIPT_DIR/.. -docker build --target builder --build-arg BASE_IMAGE=base -t $DOCKER_IMAGE_TAG_BUILDER -f $SCRIPT_DIR/../Dockerfile $SCRIPT_DIR/.. -docker build --target twophase --build-arg BASE_IMAGE=base -t $DOCKER_IMAGE_TAG_TWOPHASE -f $SCRIPT_DIR/../Dockerfile $SCRIPT_DIR/.. -docker build --target atomizer --build-arg BASE_IMAGE=base -t $DOCKER_IMAGE_TAG_ATOMIZER -f $SCRIPT_DIR/../Dockerfile $SCRIPT_DIR/.. +docker build --target base -t "${DOCKER_IMAGE_TAG_BASE}" -f "${SCRIPT_DIR}/../Dockerfile" "${SCRIPT_DIR}/.." +docker build --target builder --build-arg BASE_IMAGE=base -t "${DOCKER_IMAGE_TAG_BUILDER}" -f "${SCRIPT_DIR}/../Dockerfile" "${SCRIPT_DIR}/.." +docker build --target twophase --build-arg BASE_IMAGE=base -t "${DOCKER_IMAGE_TAG_TWOPHASE}" -f "${SCRIPT_DIR}/../Dockerfile" "${SCRIPT_DIR}}/.." +docker build --target atomizer --build-arg BASE_IMAGE=base -t "${DOCKER_IMAGE_TAG_ATOMIZER}" -f "${SCRIPT_DIR}/../Dockerfile" "${SCRIPT_DIR}}/.." diff --git a/scripts/build.sh b/scripts/build.sh index 90d07991d..029a6ef9b 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,9 +1,9 @@ -#!/bin/bash +#!/usr/bin/env bash set -e help() { - if [ $# -gt 0 ]; then + if [[ $# -gt 0 ]]; then printf 'Unexpected Argument (%s)\n' "$1" fi printf 'HELP: Usage: %s [Debug|Release|Profiling]\n' "$0" @@ -13,21 +13,21 @@ help() { # Note: # CMAKE_BUILD_TYPE="Debug" adds "-O0 -g" flags by default # CMAKE_BUILD_TYPE="Release" adds "-O3 -DNDEBUG" by default -if [[ "$BUILD_DEBUG" == "1" ]]; then +if [[ "${BUILD_DEBUG}" == "1" ]]; then CMAKE_BUILD_TYPE="Debug" -elif [[ "$BUILD_RELEASE" == "1" ]]; then +elif [[ "${BUILD_RELEASE}" == "1" ]]; then CMAKE_BUILD_TYPE="Release" -elif [[ "$BUILD_PROFILING" == "1" ]]; then +elif [[ "${BUILD_PROFILING}" == "1" ]]; then CMAKE_BUILD_TYPE="Profiling" fi -if [ $# -gt 0 ]; then +if [[ $# -gt 0 ]]; then case "$1" in Release|--release|-r) CMAKE_BUILD_TYPE="Release";; Profiling|--profiling|-p) CMAKE_BUILD_TYPE="Profiling";; Debug|--debug|-d) CMAKE_BUILD_TYPE="Debug";; --help|-h) help;; - *) help $1;; + *) help "$1";; esac fi @@ -36,29 +36,33 @@ echo "Building..." # see PREFIX in ./scripts/setup-dependencies.sh PREFIX="$(cd "$(dirname "$0")"/.. && pwd)/prefix" -if [ -z ${BUILD_DIR+x} ]; then +if [[ -z ${BUILD_DIR+x} ]]; then export BUILD_DIR=build fi -mkdir -p $BUILD_DIR -cd $BUILD_DIR +mkdir -p "${BUILD_DIR}" +cd "${BUILD_DIR}" CMAKE_FLAGS=-DCMAKE_PREFIX_PATH="${PREFIX}" CPUS=1 -if [[ "$OSTYPE" == "linux-gnu"* ]]; then +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then CPUS=$(grep -c ^processor /proc/cpuinfo) -elif [[ "$OSTYPE" == "darwin"* ]]; then +elif [[ "${OSTYPE}" == "darwin"* ]]; then CPUS=$(sysctl -n hw.ncpu) XCODE_CMDLINE_DIR=$(xcode-select -p) - CMAKE_FLAGS+=" -DCMAKE_C_COMPILER=${XCODE_CMDLINE_DIR}/usr/bin/clang -DCMAKE_CXX_COMPILER=${XCODE_CMDLINE_DIR}/usr/bin/clang++ -DCMAKE_CXX_FLAGS=-isystem\ /usr/local/include -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" + CMAKE_FLAGS+=" -DCMAKE_C_COMPILER=${XCODE_CMDLINE_DIR}/usr/bin/clang" + CMAKE_FLAGS+=" -DCMAKE_CXX_COMPILER=${XCODE_CMDLINE_DIR}/usr/bin/clang++" + CMAKE_FLAGS+=" -DCMAKE_CXX_FLAGS=-isystem\ /usr/local/include" + CMAKE_FLAGS+=" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" fi -if [[ -z $CMAKE_BUILD_TYPE ]]; then +if [[ -z "${CMAKE_BUILD_TYPE}" ]]; then echo "CMAKE_BUILD_TYPE not set, defaulting to debug" CMAKE_BUILD_TYPE="Debug" fi -echo "Building $CMAKE_BUILD_TYPE" +echo "Building ${CMAKE_BUILD_TYPE}" eval "cmake -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} ${CMAKE_FLAGS} .." -make -j$CPUS +make "-j${CPUS}" +echo; echo "Build complete"; echo diff --git a/scripts/create-e2e-report.sh b/scripts/create-e2e-report.sh index 51214a4d0..5087107d5 100755 --- a/scripts/create-e2e-report.sh +++ b/scripts/create-e2e-report.sh @@ -1,25 +1,23 @@ #!/usr/bin/env bash set -e -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) TESTRUN_PATH=$1 function readAndFormatLogs() { logdir="$1" message="" - if [[ ! -d $logdir ]]; then - echo "$logdir does not exist" + if [[ ! -d "${logdir}" ]]; then + echo "${logdir} does not exist" return fi - for logfile in $(ls $logdir); do - logfile_path="$logdir/$logfile" - logfile_content=$(cat $logfile_path) - message+="\n
\n$logfile\n\n\`\`\`\n$logfile_content\n\`\`\`\n
\n" + for logfile in "${logdir}"/*; do + logfile_content=$(cat "${logfile}") + message+="\n
\n${logfile}\n\n\`\`\`\n${logfile_content}\n\`\`\`\n
\n" done - echo "$message" + echo "${message}" } -testrun_logs="\n
\nView Testrun\n\n\`\`\`\n$(cat $TESTRUN_PATH/testrun.log)\n\`\`\`\n
\n\n" -container_logs=$(readAndFormatLogs $TESTRUN_PATH/logs) +testrun_logs="\n
\nView Testrun\n\n\`\`\`\n$(cat "${TESTRUN_PATH}/testrun.log")\n\`\`\`\n
\n\n" +container_logs=$(readAndFormatLogs "${TESTRUN_PATH}/logs") -printf "# E2E Results\n# TestRun Logs\n%b\n\n# Container Logs\n%b\n" "$testrun_logs" "$container_logs" +printf "# E2E Results\n# TestRun Logs\n%b\n\n# Container Logs\n%b\n" "${testrun_logs}" "${container_logs}" diff --git a/scripts/install-build-tools.sh b/scripts/install-build-tools.sh index 523789d99..516975581 100755 --- a/scripts/install-build-tools.sh +++ b/scripts/install-build-tools.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash echo "Setting up build environment..." @@ -10,14 +10,14 @@ end="\033[0m" set -e SUDO='' -if (( $EUID != 0 )); then +if (( EUID != 0 )); then echo -e "non-root user, sudo required" SUDO='sudo' fi # Supporting these versions for buildflow PYTHON_VERSIONS=("3.10" "3.11" "3.12") -echo "Python3 versions supported: ${PYTHON_VERSIONS[@]}" +echo "Python3 versions supported: ${PYTHON_VERSIONS[*]}" # check if supported version of python3 is already installed, and save the version PY_INSTALLED='' @@ -36,12 +36,12 @@ ENV_NAME=".py_venv" # make a virtual environement to install python packages create_venv_install_python() { PY_LOC=$1 - if [[ -z "$PY_LOC" ]]; then + if [[ -z "${PY_LOC}" ]]; then echo "Python path not provided" exit 1 fi PY_VERSION=$2 - if [[ -z "$PY_VERSION" ]]; then + if [[ -z "${PY_VERSION}" ]]; then echo "python version not provided" exit 1 fi @@ -56,23 +56,23 @@ create_venv_install_python() { fi fi # install pip for linux - if [[ "$OSTYPE" == "linux-gnu"* ]]; then - if ! $SUDO apt install -y python3-pip; then + if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + if ! ${SUDO} apt install -y python3-pip; then echo "Failed to install python3-pip" wget https://bootstrap.pypa.io/get-pip.py - $SUDO python${PY_VERSION} get-pip.py + ${SUDO} "python${PY_VERSION}" get-pip.py rm get-pip.py fi # add deadsnakes to download the python venv module - $SUDO add-apt-repository -y ppa:deadsnakes/ppa + ${SUDO} add-apt-repository -y ppa:deadsnakes/ppa # make sure deadsnakes is available DEADSNAKES_AVAIL=$(wget -q --spider http://ppa.launchpad.net/deadsnakes/ppa/ubuntu/dists/focal/Release; echo $?) - if [[ $DEADSNAKES_AVAIL -ne 0 ]]; then + if [[ ${DEADSNAKES_AVAIL} -ne 0 ]]; then echo "Failed to add deadsnakes which is needed to install python3" exit 1 fi # install python3 venv module for linux - if ! $SUDO apt install -y "python${PY_VERSION}-venv"; then + if ! ${SUDO} apt install -y "python${PY_VERSION}-venv"; then echo "Failed to install python${PY_VERSION}-venv" exit 1 else @@ -87,7 +87,7 @@ create_venv_install_python() { exit 1 fi # activate virtual environment - if ! . "${ROOT}/scripts/activate-venv.sh"; then + if ! source "${ROOT}/scripts/activate-venv.sh"; then echo "Failed to activate virtual environment" exit 1 fi @@ -102,46 +102,45 @@ create_venv_install_python() { deactivate } -echo "OS Type: $OSTYPE" +echo "OS Type: ${OSTYPE}" # macOS install with homebrew -if [[ "$OSTYPE" == "darwin"* ]]; then +if [[ "${OSTYPE}" == "darwin"* ]]; then # macOS does not support running shell scripts as root with homebrew - if [[ $EUID -eq 0 ]]; then + if [[ ${EUID} -eq 0 ]]; then echo -e "Mac users should run this script without 'sudo'. Exiting..." exit 1 fi - CPUS=$(sysctl -n hw.ncpu) # ensure development environment is set correctly for clang - $SUDO xcode-select -switch /Library/Developer/CommandLineTools + ${SUDO} xcode-select -switch /Library/Developer/CommandLineTools if ! brew --version &>/dev/null; then echo -e "${cyan}Homebrew is required to install dependencies.${end}" exit 1 fi - brew install llvm@14 googletest google-benchmark lcov make wget cmake bash bc + brew install llvm@14 googletest google-benchmark lcov make wget cmake bash bc shellcheck brew upgrade bash BREW_ROOT=$(brew --prefix) CLANG_TIDY=/usr/local/bin/clang-tidy - if [[ ! -L "$CLANG_TIDY" ]]; then - $SUDO ln -s "${BREW_ROOT}/opt/llvm@14/bin/clang-tidy" /usr/local/bin/clang-tidy + if [[ ! -L "${CLANG_TIDY}" ]]; then + ${SUDO} ln -s "${BREW_ROOT}/opt/llvm@14/bin/clang-tidy" /usr/local/bin/clang-tidy fi GMAKE=/usr/local/bin/gmake - if [[ ! -L "$GMAKE" ]]; then - $SUDO ln -s $(xcode-select -p)/usr/bin/gnumake /usr/local/bin/gmake + if [[ ! -L "${GMAKE}" ]]; then + ${SUDO} ln -s "$(xcode-select -p)/usr/bin/gnumake" /usr/local/bin/gmake fi # install valid python version if not installed yet - if [[ -z "$PY_INSTALLED" ]]; then + if [[ -z "${PY_INSTALLED}" ]]; then PY_VERS=${PYTHON_VERSIONS[0]} FULL_PY="python${PY_VERS}" MAX_RETRIES=2 - while [[ $MAX_RETRIES -gt 0 ]]; do + while [[ ${MAX_RETRIES} -gt 0 ]]; do # try to install python version from homebrew and verify installation if brew install "${FULL_PY}"; then echo "${FULL_PY} installed successfully" @@ -151,50 +150,50 @@ if [[ "$OSTYPE" == "darwin"* ]]; then MAX_RETRIES=$((MAX_RETRIES - 1)) sleep 1 done - if [[ $MAX_RETRIES -eq 0 ]]; then + if [[ ${MAX_RETRIES} -eq 0 ]]; then echo "Python3 install with homebrew failed, attempted on ${FULL_PY}" exit 1 fi fi # Linux install with apt -elif [[ "$OSTYPE" == "linux-gnu"* ]]; then +elif [[ "${OSTYPE}" == "linux-gnu"* ]]; then # avoids getting stuck on interactive prompts which is essential for CI/CD export DEBIAN_FRONTEND=noninteractive - $SUDO apt update -y - $SUDO apt install -y build-essential wget cmake libgtest-dev libbenchmark-dev \ - lcov git software-properties-common rsync unzip bc + ${SUDO} apt update -y + ${SUDO} apt install -y build-essential wget cmake libgtest-dev libbenchmark-dev \ + lcov git software-properties-common rsync unzip bc shellcheck # Add LLVM GPG key (apt-key is deprecated in Ubuntu 21.04+ so using gpg) wget -qO - https://apt.llvm.org/llvm-snapshot.gpg.key | \ gpg --dearmor -o /usr/share/keyrings/llvm-archive-keyring.gpg echo "deb [signed-by=/usr/share/keyrings/llvm-archive-keyring.gpg] http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main" | \ - $SUDO tee /etc/apt/sources.list.d/llvm.list + ${SUDO} tee /etc/apt/sources.list.d/llvm.list - $SUDO apt update -y - $SUDO apt install -y clang-format-14 clang-tidy-14 - $SUDO ln -sf $(which clang-format-14) /usr/local/bin/clang-format - $SUDO ln -sf $(which clang-tidy-14) /usr/local/bin/clang-tidy + ${SUDO} apt update -y + ${SUDO} apt install -y clang-format-14 clang-tidy-14 + ${SUDO} ln -sf "$(which clang-format-14)" /usr/local/bin/clang-format + ${SUDO} ln -sf "$(which clang-tidy-14)" /usr/local/bin/clang-tidy # install valid python version if not installed yet - if [[ -z "$PY_INSTALLED" ]]; then + if [[ -z "${PY_INSTALLED}" ]]; then PY_VERS=${PYTHON_VERSIONS[0]} FULL_PY="python${PY_VERS}" # try to install python version from apt and verify installation - $SUDO apt install -y software-properties-common - $SUDO add-apt-repository -y ppa:deadsnakes/ppa - $SUDO apt update -y + ${SUDO} apt install -y software-properties-common + ${SUDO} add-apt-repository -y ppa:deadsnakes/ppa + ${SUDO} apt update -y DEADSNAKES_AVAIL=$(wget -q --spider http://ppa.launchpad.net/deadsnakes/ppa/ubuntu/dists/focal/Release; echo $?) - if [[ $DEADSNAKES_AVAIL -ne 0 ]]; then + if [[ ${DEADSNAKES_AVAIL} -ne 0 ]]; then echo "Failed to add deadsnakes which is needed to install python3" exit 1 fi MAX_RETRIES=2 - while [[ $MAX_RETRIES -gt 0 ]]; do + while [[ ${MAX_RETRIES} -gt 0 ]]; do # install python3 valid version and venv module - if $SUDO apt install -y ${FULL_PY}; then + if ${SUDO} apt install -y "${FULL_PY}"; then echo "${FULL_PY} installed successfully" PY_INSTALLED=${PY_VERS} break @@ -202,7 +201,7 @@ elif [[ "$OSTYPE" == "linux-gnu"* ]]; then MAX_RETRIES=$((MAX_RETRIES - 1)) sleep 1 done - if [[ $MAX_RETRIES -eq 0 ]]; then + if [[ ${MAX_RETRIES} -eq 0 ]]; then echo "Python3 install with apt and deadsnakes failed, attempted on ${FULL_PY}" exit 1 fi @@ -216,7 +215,7 @@ if ! which "python${PY_INSTALLED}" &> /dev/null; then else # create virtual environment and install python packages for the valid python version PYTHON_PATH=$(which "python${PY_INSTALLED}") - create_venv_install_python "${PYTHON_PATH}" ${PY_INSTALLED} + create_venv_install_python "${PYTHON_PATH}" "${PY_INSTALLED}" fi echo "To activate the virtual env to run python, run 'source ./scripts/activate-venv.sh'" @@ -224,8 +223,8 @@ PYTHON_TIDY=/usr/local/bin/run-clang-tidy.py if [[ ! -f "${PYTHON_TIDY}" ]]; then echo -e "${green}Copying run-clang-tidy to /usr/local/bin${end}" wget https://raw.githubusercontent.com/llvm/llvm-project/e837ce2a32369b2e9e8e5d60270c072c7dd63827/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py - $SUDO mv run-clang-tidy.py /usr/local/bin + ${SUDO} mv run-clang-tidy.py /usr/local/bin fi -echo "Build environment setup complete." -echo "Next run './scripts/setup-dependencies.sh'." +echo; echo "Build environment setup complete." +echo "Next run './scripts/setup-dependencies.sh'"; echo diff --git a/scripts/lint.sh b/scripts/lint.sh index c6e4b0936..a84ae06e4 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -1,33 +1,43 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # Usage: ./scripts/lint.sh echo "Linting..." -check_files=$(git ls-files \ - | grep -v -E ".jpg|.svg|3rdparty" | cat) +CHECK_FILES=$(git ls-files | grep -v -E ".jpg|.svg|3rdparty" | cat) -whitespace_files=$(printf '%s' "${check_files[@]}" | xargs egrep -l " +$" | grep -v -E ".md" | cat) +WHITESPACE_FILES=$(printf '%s' "${CHECK_FILES[@]}" | xargs egrep -l " +$" | grep -v -E ".md" | cat) -if [ -n "$whitespace_files" ]; then +if [[ -n "${WHITESPACE_FILES}" ]]; then echo "The following files have trailing whitespace:" - printf '%s\n' "${whitespace_files[@]}" + printf '%s\n' "${WHITESPACE_FILES[@]}" fi -newline_files=$(printf '%s' "${check_files[@]}" | xargs -r -I {} bash -c 'test "$(tail -c 1 "{}" | wc -l)" -eq 0 && echo {}' | cat) +NEWLINE_FILES=$(printf '%s' "${CHECK_FILES[@]}" | \ + xargs -r -I {} bash -c 'test "$(tail -c 1 "{}" | wc -l)" -eq 0 && echo {}' | cat) -if [ -n "$newline_files" ] ; then +if [[ -n "${NEWLINE_FILES}" ]] ; then echo "The following files need an EOF newline:" - printf '%s\n' "${newline_files[@]}" + printf '%s\n' "${NEWLINE_FILES[@]}" fi -if [ -n "$whitespace_files" ] || [ -n "$newline_files" ] ; then +if [[ -n "${WHITESPACE_FILES}" ]] || [[ -n "${NEWLINE_FILES}" ]] ; then exit 1 fi -check_format_files=$(git ls-files | grep -E "tools|tests|src|cmake-tests" \ - | grep -E "\..*pp") -clang-format --style=file --Werror --dry-run ${check_format_files[@]} +# enable parallelization for clang-format and clang-tidy +NUM_CORES=1 +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + NUM_CORES=$(grep -c ^processor /proc/cpuinfo) +elif [[ "${OSTYPE}" == "darwin"* ]]; then + NUM_CORES=$(sysctl -n hw.ncpu) +fi + +CHECK_FORMAT_FILES=$(git ls-files \ + | grep -E "tools|tests|src|cmake-tests" \ + | grep -E "\..*pp") +echo "${CHECK_FORMAT_FILES}" | \ + xargs -n1 -P"${NUM_CORES}" -I{} clang-format --style=file --Werror --dry-run {} if ! command -v clang-tidy &>/dev/null; then echo "clang-tidy does not appear to be installed" @@ -35,10 +45,10 @@ if ! command -v clang-tidy &>/dev/null; then exit 1 fi -if [ -z ${BUILD_DIR+x} ]; then +if [[ -z "${BUILD_DIR+x}" ]]; then echo "BUILD_DIR environment variable not found. Assuming default: build" export BUILD_DIR=build - if [ ! -d "${BUILD_DIR}" ]; then + if [[ ! -d "${BUILD_DIR}" ]]; then echo "${BUILD_DIR} directory not found. Please set BUILD_DIR or run \`export BUILD_DIR=${BUILD_DIR}; build.sh\` before linting." exit 1 fi @@ -46,6 +56,6 @@ fi # use python from the virtual environment for clang-tidy if source "./scripts/activate-venv.sh"; then - python /usr/local/bin/run-clang-tidy.py -p ${BUILD_DIR} "tests/.*/.*\.cpp|src/.*/.*\.cpp|tools/.*/.*\.cpp" + python /usr/local/bin/run-clang-tidy.py -j "${NUM_CORES}" -p "${BUILD_DIR}" "tests/.*/.*\.cpp|src/.*/.*\.cpp|tools/.*/.*\.cpp" deactivate fi diff --git a/scripts/lua_bench.sh b/scripts/lua_bench.sh index d2ab0ced0..9a67f6152 100755 --- a/scripts/lua_bench.sh +++ b/scripts/lua_bench.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash IP="localhost" PORT="8889" @@ -17,20 +17,20 @@ function print_help() { } for arg in "$@"; do - if [[ "$arg" == "-h" || "$arg" == "--help" ]]; then + if [[ "${arg}" == "-h" || "${arg}" == "--help" ]]; then print_help exit 0 - elif [[ "$arg" == "--ip"* ]]; then + elif [[ "${arg}" == "--ip"* ]]; then IP="${arg#--ip=}" - elif [[ "$arg" == "--port"* ]]; then + elif [[ "${arg}" == "--port"* ]]; then PORT="${arg#--port=}" - elif [[ "$arg" == "--loglevel"* ]]; then + elif [[ "${arg}" == "--loglevel"* ]]; then LOGLEVEL="${arg#--loglevel=}" fi done ./build/tools/bench/parsec/lua/lua_bench --component_id=0 \ - --ticket_machine0_endpoint=$IP:7777 --ticket_machine_count=1 \ - --shard_count=1 --shard0_count=1 --shard00_endpoint=$IP:5556 \ - --agent_count=1 --agent0_endpoint=$IP:$PORT \ - --loglevel=$LOGLEVEL scripts/gen_bytecode.lua $N_WALLETS -echo done + "--ticket_machine0_endpoint=${IP}:7777" --ticket_machine_count=1 \ + --shard_count=1 --shard0_count=1 "--shard00_endpoint=${IP}:5556" \ + --agent_count=1 "--agent0_endpoint=${IP}:${PORT}" \ + "--loglevel=${LOGLEVEL}" scripts/gen_bytecode.lua "${N_WALLETS}" +echo "done" diff --git a/scripts/native-system-benchmark.sh b/scripts/native-system-benchmark.sh index 1b7923fa2..95ec774ba 100755 --- a/scripts/native-system-benchmark.sh +++ b/scripts/native-system-benchmark.sh @@ -13,8 +13,8 @@ COMMIT=$(git rev-parse --short HEAD) TL=$(git rev-parse --show-toplevel) RT="${TL:-$CWD}" BLD="$RT"/build -SEEDDIR="$BLD"/preseeds -TESTDIR="$BLD"/test-$(date +"%s") +SEEDDIR="${BLD}"/preseeds +TESTDIR="${BLD}"/test-$(date +"%s") IFS='' read -r -d '' usage <<'EOF' Usage: %s [options] @@ -46,80 +46,106 @@ EOF _help= if [[ $# -eq 0 ]]; then - _help=1 + _help=1 fi _err=0 while [[ $# -gt 0 ]]; do - optarg= - shft_cnt=1 - if [[ "$1" =~ [=] ]]; then - optarg="${1#*=}" - elif [[ "$1" =~ ^-- && $# -gt 1 && ! "$2" =~ ^- ]]; then - optarg="$2" - shft_cnt=2 - elif [[ "$1" =~ ^-[^-] && $# -gt 1 && ! "$2" =~ ^- ]]; then - optarg="$2" - shft_cnt=2 - elif [[ "$1" =~ ^-[^-] ]]; then - optarg="${1/??/}" - fi - - case "$1" in - -s*|--samples*) DURATION="${optarg:-$DURATION}"; shift "$shft_cnt";; - --leak-check) RECORD=debug; DBG="$VALGRIND"; shift "$shft_cnt";; - --debug) RECORD=debug; shift "$shft_cnt";; - --profile) RECORD=perf; shift "$shft_cnt";; - --clean-tests) - printf '%s\n' 'Deleting all test directories' - rm -rf -- "$BLD"/test-*; shift "$shft_cnt";; - --clean-seeds) - printf '%s\n' 'Deleting all cached preseeds' - rm -rf -- "$BLD"/preseeds; shift "$shft_cnt";; - --clean) - printf '%s\n' 'Deleting all tests and preseeds' - rm -rf -- "$BLD"/test-* "$BLD"/preseeds; shift "$shft_cnt";; - -d*|--debugger*) - case "$optarg" in - gdb) DBG="$GDB";; - rr) DBG="$RR";; - *) DBG="$optarg";; - esac - shift "$shft_cnt";; - -c*|--config*) - if [[ "$optarg" = /* ]]; then - ORIG_CFG="${optarg}" - else - ORIG_CFG="$CWD/$optarg" - fi - shift "$shft_cnt";; - -h|--help) _help=1; shift "$shft_cnt";; - *) - printf 'Unrecognized option: %s\n' "$1" - _help=1; _err=1; - break;; - esac + optarg= + shft_cnt=1 + if [[ $1 =~ [=] ]]; then + optarg="${1#*=}" + elif [[ $1 =~ ^-- && $# -gt 1 && ! $2 =~ ^- ]]; then + optarg="$2" + shft_cnt=2 + elif [[ $1 =~ ^-[^-] && $# -gt 1 && ! $2 =~ ^- ]]; then + optarg="$2" + shft_cnt=2 + elif [[ $1 =~ ^-[^-] ]]; then + optarg="${1/??/}" + fi + + case "$1" in + -s* | --samples*) + DURATION="${optarg:-$DURATION}" + shift "${shft_cnt}" + ;; + --leak-check) + RECORD=debug + DBG="${VALGRIND}" + shift "${shft_cnt}" + ;; + --debug) + RECORD=debug + shift "${shft_cnt}" + ;; + --profile) + RECORD=perf + shift "${shft_cnt}" + ;; + --clean-tests) + printf '%s\n' 'Deleting all test directories' + rm -rf -- "${BLD}"/test-* + shift "${shft_cnt}" + ;; + --clean-seeds) + printf '%s\n' 'Deleting all cached preseeds' + rm -rf -- "${BLD}"/preseeds + shift "${shft_cnt}" + ;; + --clean) + printf '%s\n' 'Deleting all tests and preseeds' + rm -rf -- "${BLD}"/test-* "${BLD}"/preseeds + shift "${shft_cnt}" + ;; + -d* | --debugger*) + case "${optarg}" in + gdb) DBG="${GDB}" ;; + rr) DBG="${RR}" ;; + *) DBG="${optarg}" ;; + esac + shift "${shft_cnt}" + ;; + -c* | --config*) + if [[ ${optarg} == /* ]]; then + ORIG_CFG="${optarg}" + else + ORIG_CFG="${CWD}/${optarg}" + fi + shift "${shft_cnt}" + ;; + -h | --help) + _help=1 + shift "${shft_cnt}" + ;; + *) + printf 'Unrecognized option: %s\n' "$1" + _help=1 + _err=1 + break + ;; + esac done case "$DURATION" in - inf|infinity) DURATION=infinity;; - '') DURATION=30;; +inf | infinity) DURATION=infinity ;; +'') DURATION=30 ;; esac -if [[ -n "$_help" ]]; then - printf "$usage" "$(basename $0)" - exit "$_err" +if [[ -n ${_help} ]]; then + printf "%s %s" "${usage}" "$(basename "$0")" + exit "${_err}" fi -if [[ -z "$ORIG_CFG" ]]; then - printf '%s\n' 'No config specified; exiting' - exit 0 +if [[ -z ${ORIG_CFG} ]]; then + printf '%s\n' 'No config specified; exiting' + exit 0 fi # locate and move to test directory -mkdir -p "$TESTDIR" -printf 'Running test from %s\n' "$TESTDIR" -cd "$TESTDIR" || exit +mkdir -p "${TESTDIR}" +printf 'Running test from %s\n' "${TESTDIR}" +cd "${TESTDIR}" || exit # normalizes ports for local execution IFS='' read -r -d '' normalize <<'EOF' @@ -135,231 +161,237 @@ BEGIN { { print } EOF -CFG="$TESTDIR"/config -awk "$normalize" "$ORIG_CFG" > "$CFG" +CFG="${TESTDIR}/config" +awk "${normalize}" "${ORIG_CFG}" > "${CFG}" -twophase=$(grep -q '2pc=1' "$CFG" && printf '1\n' || printf '0\n') +twophase=$(grep -q '2pc=1' "${CFG}" && printf '1\n' || printf '0\n') arch= -if test "$twophase" -eq 0; then - arch='atomizer' +if test "${twophase}" -eq 0; then + arch='atomizer' else - arch='2pc' + arch='2pc' fi PERFS= on_int() { - printf 'Interrupting all components\n' - trap '' SIGINT # avoid interrupting ourself - for i in $PIDS; do # intentionally unquoted - if [[ -n "RECORD" ]]; then - kill -SIGINT -- "-$i" - else - kill -SIGINT -- "$i" - fi - done - wait - sleep 5 - - _failed= - for i in "$TESTDIR"/tx_samples_*.txt; do - if ! test -s "$i"; then - printf 'Could not generate plots: %s is not a non-empty, regular file\n' "$i" - _failed=1 - break - fi - done - - if [[ "$RECORD" = 'perf' ]]; then - for i in $PERFS; do - kill -SIGTERM -- "$i" - done - fi - - if [[ -x "$(which flamegraph.pl)" && -x "$(which stackcollapse-perf.pl)" && -n "$(find "$TESTDIR" -maxdepth 1 -name '*.perf' -print -quit)" ]]; then - printf 'Generating Flamegraphs\n' - for i in "$TESTDIR"/*.perf; do - waitpid -t 5 -e $(lsof -Qt "$i") &>/dev/null - perf script -i "$i" | stackcollapse-perf.pl > "${i/.perf/.folded}" - flamegraph.pl "${i/.perf/.folded}" > "${i/.perf/.svg}" - rm -- "${i/.perf/.folded}" - done - fi - - if [[ -z "$_failed" ]]; then - printf 'Generating plots\n' - source "${RT}/scripts/activate-venv.sh" - python "$RT"/scripts/plot-samples.py -d "$TESTDIR" - deactivate - fi - - printf 'Terminating any remaining processes\n' - for i in $PIDS; do # intentionally unquoted - if [[ -n "RECORD" ]]; then - kill -SIGTERM -- "-$i" - else - kill -SIGTERM -- "$i" - fi - done + printf 'Interrupting all components\n' + trap '' SIGINT # avoid interrupting ourself + for i in ${PIDS}; do # intentionally unquoted + if [[ -n "${RECORD}" ]]; then + kill -SIGINT -- "-${i}" + else + kill -SIGINT -- "${i}" + fi + done + wait + sleep 5 + + _failed= + for i in "${TESTDIR}"/tx_samples_*.txt; do + if ! test -s "${i}"; then + printf 'Could not generate plots: %s is not a non-empty, regular file\n' "${i}" + _failed=1 + break + fi + done + + if [[ "${RECORD}" == 'perf' ]]; then + for i in ${PERFS}; do + kill -SIGTERM -- "${i}" + done + fi + + if [[ -x "$(which flamegraph.pl)" && -x "$(which stackcollapse-perf.pl)" && -n "$(find "$TESTDIR" -maxdepth 1 -name '*.perf' -print -quit)" ]]; then + printf 'Generating Flamegraphs\n' + for i in "${TESTDIR}"/*.perf; do + waitpid -t 5 -e "$(lsof -Qt "$i")" &>/dev/null + perf script -i "$i" | stackcollapse-perf.pl >"${i/.perf/.folded}" + flamegraph.pl "${i/.perf/.folded}" >"${i/.perf/.svg}" + rm -- "${i/.perf/.folded}" + done + fi + + if [[ -z ${_failed} ]]; then + printf 'Generating plots\n' + source "${RT}/scripts/activate-venv.sh" + python "${RT}/scripts/plot-samples.py" -d "${TESTDIR}" + deactivate + fi + + printf 'Terminating any remaining processes\n' + for i in ${PIDS}; do # intentionally unquoted + if [[ -n "${RECORD}" ]]; then + kill -SIGTERM -- "-${i}" + else + kill -SIGTERM -- "${i}" + fi + done } trap on_int SIGINT getcount() { - count=$(grep -E "$1_count" "$CFG") - if test "$count"; then - printf '%s\n' "$count" | cut -d'=' -f2 - else - printf '0\n' - fi + count=$(grep -E "$1_count" "${CFG}") + if test "${count}"; then + printf '%s\n' "${count}" | cut -d'=' -f2 + else + printf '0\n' + fi } getpath() { - case "$1" in - # uniquely-named - archiver) printf '%s/src/uhs/atomizer/archiver/archiverd\n' "$BLD";; - atomizer) printf '%s/src/uhs/atomizer/atomizer/atomizer-raftd\n' "$BLD";; - watchtower) printf '%s/src/uhs/atomizer/watchtower/watchtowerd\n' "$BLD";; - coordinator) printf '%s/src/uhs/twophase/coordinator/coordinatord\n' "$BLD";; - - # special-case - seeder) printf '%s/tools/shard-seeder/shard-seeder\n' "$BLD";; - - # architecture-dependent - loadgen) - if test "$twophase" -eq 1; then - printf '%s/tools/bench/twophase-gen\n' "$BLD" - else - printf '%s/tools/bench/atomizer-cli-watchtower\n' "$BLD" - fi;; - shard) - if test "$twophase" -eq 1; then - printf '%s/src/uhs/twophase/locking_shard/locking-shardd\n' "$BLD" - else - printf '%s/src/uhs/atomizer/shard/shardd\n' "$BLD" - fi;; - sentinel) - if test "$twophase" -eq 1; then - printf '%s/src/uhs/twophase/sentinel_2pc/sentineld-2pc\n' "$BLD" - else - printf '%s/src/uhs/atomizer/sentinel/sentineld\n' "$BLD" - fi;; - *) printf 'Unrecognized component: %s\n' "$1";; - esac + case "$1" in + # uniquely-named + archiver) printf '%s/src/uhs/atomizer/archiver/archiverd\n' "${BLD}" ;; + atomizer) printf '%s/src/uhs/atomizer/atomizer/atomizer-raftd\n' "${BLD}" ;; + watchtower) printf '%s/src/uhs/atomizer/watchtower/watchtowerd\n' "${BLD}" ;; + coordinator) printf '%s/src/uhs/twophase/coordinator/coordinatord\n' "${BLD}" ;; + + # special-case + seeder) printf '%s/tools/shard-seeder/shard-seeder\n' "${BLD}" ;; + + # architecture-dependent + loadgen) + if test "${twophase}" -eq 1; then + printf '%s/tools/bench/twophase-gen\n' "${BLD}" + else + printf '%s/tools/bench/atomizer-cli-watchtower\n' "${BLD}" + fi + ;; + shard) + if test "${twophase}" -eq 1; then + printf '%s/src/uhs/twophase/locking_shard/locking-shardd\n' "${BLD}" + else + printf '%s/src/uhs/atomizer/shard/shardd\n' "${BLD}" + fi + ;; + sentinel) + if test "${twophase}" -eq 1; then + printf '%s/src/uhs/twophase/sentinel_2pc/sentineld-2pc\n' "${BLD}" + else + printf '%s/src/uhs/atomizer/sentinel/sentineld\n' "${BLD}" + fi + ;; + *) printf 'Unrecognized component: %s\n' "$1" ;; + esac } run() { - PROC_LOG="$TESTDIR"/"$PNAME.log" - PERF_LOG="$TESTDIR"/"$PNAME-perf.log" - COMP= - case "$RECORD" in - perf) - $@ &> "$PROC_LOG" & - COMP="$!" - perf record -F 99 -a -g -o "$PNAME".perf -p "$COMP" &> "$PERF_LOG" & - PERFS="$PERFS $!";; - debug) - ${DBG} "$@" &> "$PROC_LOG" & - COMP="$!";; - *) - $@ &> "$PROC_LOG" & - COMP="$!";; - esac - - if test -n "$BLOCK"; then - wait "$COMP" - fi - - echo "$COMP" + PROC_LOG="${TESTDIR}"/"${PNAME}.log" + PERF_LOG="${TESTDIR}"/"${PNAME}-perf.log" + COMP= + case "${RECORD}" in + perf) + "$@" &>"${PROC_LOG}" & + COMP="$!" + perf record -F 99 -a -g -o "${PNAME}".perf -p "${COMP}" &>"${PERF_LOG}" & + PERFS="${PERFS} $!" + ;; + debug) + ${DBG} "$@" &>"${PROC_LOG}" & + COMP="$!" + ;; + *) + "$@" &>"${PROC_LOG}" & + COMP="$!" + ;; + esac + + if test -n "${BLOCK}"; then + wait "${COMP}" + fi + + echo "${COMP}" } seed() { - seed_from=$(grep -E 'seed_from=.*' "$CFG" | cut -d'=' -f2) - seed_from="${seed_from:-0}" - seed_to=$(grep -E 'seed_to=.*' "$CFG" | cut -d'=' -f2) - seed_to="${seed_to:-0}" - seed_count=$(( "$seed_to" - "$seed_from" )) - if test ! "$seed_to" -gt "$seed_from"; then - printf 'Running without seeding\n' - return - fi - - preseed_id="$arch"_"$COMMIT"_"$seed_count" - if test ! -e "$SEEDDIR"/"$preseed_id"; then - printf 'Creating %s\n' "$preseed_id" - mkdir -p -- "$SEEDDIR"/"$preseed_id" - pushd "$SEEDDIR"/"$preseed_id" &> /dev/null - PID=$(PNAME=seeder BLOCK=1 run "$(getpath seeder)" "$CFG") - popd &> /dev/null - fi - - printf 'Using %s as seed\n' "$preseed_id" - for i in "$SEEDDIR"/"$preseed_id"/*; do - ln -sf -- "$i" "$TESTDIR"/"$(basename "$i")" - done + seed_from=$(grep -E 'seed_from=.*' "${CFG}" | cut -d'=' -f2) + seed_from="${seed_from:-0}" + seed_to=$(grep -E 'seed_to=.*' "${CFG}" | cut -d'=' -f2) + seed_to="${seed_to:-0}" + seed_count=$(("${seed_to}" - "${seed_from}")) + if test ! "${seed_to}" -gt "${seed_from}"; then + printf 'Running without seeding\n' + return + fi + + preseed_id="${arch}_${COMMIT}_${seed_count}" + if test ! -e "${SEEDDIR}/${preseed_id}"; then + printf 'Creating %s\n' "${preseed_id}" + mkdir -p -- "${SEEDDIR}"/"${preseed_id}" + pushd "${SEEDDIR}/${preseed_id}" &>/dev/null || exit + PID=$(PNAME=seeder BLOCK=1 run "$(getpath seeder)" "${CFG}") + popd &>/dev/null || exit + fi + + printf 'Using %s as seed\n' "${preseed_id}" + for i in "${SEEDDIR}/${preseed_id}"/*; do + ln -sf -- "${i}" "${TESTDIR}"/"$(basename "${i}")" + done } getpgid() { - ps -o pgid= "$1" + ps -o pgid= "$1" } PIDS= launch() { - last=$(getcount "$1") - if test "$last" -le 0; then - if test "$1" = 'loadgen'; then - printf 'Running without a loadgen\n' - else - printf 'Invalid count for %s\n' "$1" - exit 1 - fi - else - for id in $(seq 0 $(( "$last" - 1 )) ); do - raft=$(getcount "$1$id") - PNAME= - if test "$raft" -gt 0; then - for node in $(seq 0 $(( "$raft" - 1 )) ); do - export PNAME="$1${id}_$node" - PID=$(run "$(getpath "$1")" "$CFG" "$id" "$node") - for ep in $(awk -F'[":]' "/$PNAME.*endpoint/ { print \$3 }" "$CFG"); do - "$RT"/scripts/wait-for-it.sh -q -t 5 -h localhost -p "$ep" - done - printf 'Launched logical %s %d, replica %d [PID: %d]\n' "$1" "$id" "$node" "$PID" - if [[ -n "RECORD" ]]; then - PIDS="$PIDS $(getpgid $PID)" - else - PIDS="$PIDS $PID" - fi - done - else - export PNAME="$1${id}" - PID=$(run "$(getpath "$1")" "$CFG" "$id") - for ep in $(awk -F'[":]' "/$PNAME.*endpoint/ { print \$3 }" "$CFG"); do - "$RT"/scripts/wait-for-it.sh -q -t 5 -h localhost -p "$ep" - done - printf 'Launched %s %d [PID: %d]\n' "$1" "$id" "$PID" - if [[ -n "RECORD" ]]; then - PIDS="$PIDS $(getpgid $PID)" - else - PIDS="$PIDS $PID" - fi - fi - done - fi + last=$(getcount "$1") + if test "${last}" -le 0; then + if test "$1" = 'loadgen'; then + printf 'Running without a loadgen\n' + else + printf 'Invalid count for %s\n' "$1" + exit 1 + fi + else + for id in $(seq 0 $(("${last}" - 1))); do + raft=$(getcount "$1${id}") + PNAME= + if test "${raft}" -gt 0; then + for node in $(seq 0 $(("${raft}" - 1))); do + export PNAME="$1${id}_${node}" + PID=$(run "$(getpath "$1")" "${CFG}" "${id}" "${node}") + for ep in $(awk -F'[":]' "/$PNAME.*endpoint/ { print \$3 }" "$CFG"); do + "${RT}/scripts/wait-for-it.sh" -q -t 5 -h localhost -p "${ep}" + done + printf 'Launched logical %s %d, replica %d [PID: %d]\n' "$1" "${id}" "${node}" "${PID}" + if [[ -n "${RECORD}" ]]; then + PIDS="${PIDS} $(getpgid "${PID}")" + else + PIDS="${PIDS} ${PID}" + fi + done + else + export PNAME="$1${id}" + PID=$(run "$(getpath "$1")" "${CFG}" "${id}") + for ep in $(awk -F'[":]' "/$PNAME.*endpoint/ { print \$3 }" "${CFG}"); do + "${RT}"/scripts/wait-for-it.sh -q -t 5 -h localhost -p "${ep}" + done + printf 'Launched %s %d [PID: %d]\n' "$1" "${id}" "${PID}" + if [[ -n "${RECORD}" ]]; then + PIDS="${PIDS} $(getpgid "${PID}")" + else + PIDS="${PIDS} ${PID}" + fi + fi + done + fi } seed -if test "$twophase" -eq 0; then # atomizer - for comp in watchtower atomizer archiver shard sentinel loadgen; do - launch "$comp" - done +if test "${twophase}" -eq 0; then # atomizer + for comp in watchtower atomizer archiver shard sentinel loadgen; do + launch "${comp}" + done else # twophase - for comp in shard coordinator sentinel loadgen; do - launch "$comp" - done + for comp in shard coordinator sentinel loadgen; do + launch "${comp}" + done fi -printf 'Awaiting manual termination or timeout (%ds)\n' "$DURATION" -sleep "$DURATION" +printf 'Awaiting manual termination or timeout (%ds)\n' "${DURATION}" +sleep "${DURATION}" on_int diff --git a/scripts/parsec-run-local.sh b/scripts/parsec-run-local.sh index 643cc79e0..34c44cf34 100755 --- a/scripts/parsec-run-local.sh +++ b/scripts/parsec-run-local.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash IP="localhost" PORT="8888" @@ -18,46 +18,46 @@ function print_help() { } for arg in "$@"; do - if [[ "$arg" == "-h" || "$arg" == "--help" ]]; then + if [[ "${arg}" == "-h" || "${arg}" == "--help" ]]; then print_help exit 0 - elif [[ "$arg" == "--runner_type"* ]]; then - if [[ "$arg" == "--runner_type=lua" ]]; then + elif [[ "${arg}" == "--runner_type"* ]]; then + if [[ "${arg}" == "--runner_type=lua" ]]; then RUNNER_TYPE="lua" - elif [[ "$arg" != "--runner_type=evm" ]]; then + elif [[ "${arg}" != "--runner_type=evm" ]]; then echo "unknown runner type, using evm" fi - elif [[ "$arg" == "--ip"* ]]; then + elif [[ "${arg}" == "--ip"* ]]; then IP="${arg#--ip=}" - elif [[ "$arg" == "--port"* ]]; then + elif [[ "${arg}" == "--port"* ]]; then PORT="${arg#--port=}" - elif [[ "$arg" == "--loglevel"* ]]; then + elif [[ "${arg}" == "--loglevel"* ]]; then LOGLEVEL="${arg#--loglevel=}" fi done mkdir -p logs -echo Running agent on $IP:$PORT -echo Log level = $LOGLEVEL -echo Runner type = $RUNNER_TYPE +echo Running agent on "${IP}:${PORT}" +echo Log level = "${LOGLEVEL}" +echo Runner type = "${RUNNER_TYPE}" ./build/src/parsec/runtime_locking_shard/runtime_locking_shardd --shard_count=1 \ - --shard0_count=1 --shard00_endpoint=$IP:5556 \ - --shard00_raft_endpoint=$IP:5557 --node_id=0 --component_id=0 \ - --agent_count=1 --agent0_endpoint=$IP:6666 --ticket_machine_count=1 \ - --ticket_machine0_endpoint=$IP:7777 --loglevel=$LOGLEVEL \ + --shard0_count=1 "--shard00_endpoint=${IP}:5556" \ + "--shard00_raft_endpoint=${IP}:5557" --node_id=0 --component_id=0 \ + --agent_count=1 "--agent0_endpoint=${IP}:6666" --ticket_machine_count=1 \ + "--ticket_machine0_endpoint=${IP}:7777" "--loglevel=${LOGLEVEL}" \ > logs/shardd.log & sleep 1 -./scripts/wait-for-it.sh -s $IP:5556 -t 60 -- \ +./scripts/wait-for-it.sh -s "${IP}:5556" -t 60 -- \ ./build/src/parsec/ticket_machine/ticket_machined --shard_count=1 \ - --shard0_count=1 --shard00_endpoint=$IP:5556 --node_id=0 \ - --component_id=0 --agent_count=1 --agent0_endpoint=$IP:6666 \ - --ticket_machine_count=1 --ticket_machine0_endpoint=$IP:7777 \ - --loglevel=$LOGLEVEL > logs/ticket_machined.log & + --shard0_count=1 "--shard00_endpoint=${IP}:5556" --node_id=0 \ + --component_id=0 --agent_count=1 "--agent0_endpoint=${IP}:6666" \ + --ticket_machine_count=1 "--ticket_machine0_endpoint=${IP}:7777" \ + "--loglevel=${LOGLEVEL}" > logs/ticket_machined.log & sleep 1 -./scripts/wait-for-it.sh -s $IP:7777 -t 60 -- ./scripts/wait-for-it.sh -s \ - $IP:5556 -t 60 -- ./build/src/parsec/agent/agentd --shard_count=1 \ - --shard0_count=1 --shard00_endpoint=$IP:5556 --node_id=0 --component_id=0 \ - --agent_count=1 --agent0_endpoint=$IP:$PORT --ticket_machine_count=1 \ - --ticket_machine0_endpoint=$IP:7777 --loglevel=$LOGLEVEL \ - --runner_type=$RUNNER_TYPE > logs/agentd.log & +./scripts/wait-for-it.sh -s "${IP}:7777" -t 60 -- ./scripts/wait-for-it.sh -s \ + "${IP}:5556" -t 60 -- ./build/src/parsec/agent/agentd --shard_count=1 \ + --shard0_count=1 "--shard00_endpoint=${IP}:5556" --node_id=0 --component_id=0 \ + --agent_count=1 "--agent0_endpoint=${IP}:${PORT}" --ticket_machine_count=1 \ + "--ticket_machine0_endpoint=${IP}:7777" "--loglevel=${LOGLEVEL}" \ + "--runner_type=${RUNNER_TYPE}" > logs/agentd.log & diff --git a/scripts/pylint.sh b/scripts/pylint.sh index 9c22df164..ebf77c6c5 100755 --- a/scripts/pylint.sh +++ b/scripts/pylint.sh @@ -1,50 +1,49 @@ #!/usr/bin/env bash ROOT="$(cd "$(dirname "$0")"/.. && pwd)" -PREFIX="${ROOT}"/prefix MIN_CODE_QUALITY=8.0 get_code_score() { - if [ -n "$1" ]; then - # set minimum quality to user input (int/float) if provided and (5.0 <= input <= 10.0) - if [[ $1 =~ ^([0-9]+)*([\.][0-9])?$ ]]; then - if (( $(echo "$1 >= 5.0" | bc -l) )) && (( $(echo "$1 <= 10.0" | bc -l) )); then - MIN_CODE_QUALITY=$1 - else - # In the future, we want code quality to be at minimum 8.0/10.0 - echo "Code quality score must be between 5.0 and 10.0, inclusive." - echo "Recommended code quality score is >= 8.0." - exit 1 - fi - else - echo "Code quality score must be an integer or floating point number." - exit 1 + if [[ -n "$1" ]]; then + # set minimum quality to user input (int/float) if provided and (5.0 <= input <= 10.0) + if [[ $1 =~ ^([0-9]+)*([\.][0-9])?$ ]]; then + if (( $(echo "$1 >= 5.0" | bc -l) )) && (( $(echo "$1 <= 10.0" | bc -l) )); then + MIN_CODE_QUALITY=$1 + else + # In the future, we want code quality to be at minimum 8.0/10.0 + echo "Code quality score must be between 5.0 and 10.0, inclusive." + echo "Recommended code quality score is >= 8.0." + exit 1 + fi + else + echo "Code quality score must be an integer or floating point number." + exit 1 + fi fi - fi - echo "Linting Python code with minimum quality of $MIN_CODE_QUALITY/10.0..." + echo "Linting Python code with minimum quality of ${MIN_CODE_QUALITY}/10.0..." } check_pylint() { - if ! command -v pylint &>/dev/null; then - echo "pylint is not installed." - echo "Run 'sudo ./scripts/install-build-tools.sh' to install pylint." - exit 1 - fi + if ! command -v pylint &>/dev/null; then + echo "pylint is not installed." + echo "Run 'sudo ./scripts/install-build-tools.sh' to install pylint." + exit 1 + fi } -get_code_score $1 +get_code_score "$1" if source "${ROOT}/scripts/activate-venv.sh"; then - echo "Virtual environment activated." + echo "Virtual environment activated." else - echo "Failed to activate virtual environment." - exit 1 + echo "Failed to activate virtual environment." + exit 1 fi check_pylint if ! pylint scripts src tests tools --rcfile=.pylintrc \ - --fail-under=$MIN_CODE_QUALITY $(git ls-files '*.py'); then + "--fail-under=${MIN_CODE_QUALITY}" $(git ls-files '*.py'); then echo "Linting failed, please fix the issues and rerun." - exit 1 + exit 1 else - echo "Linting passed." + echo "Linting passed." fi diff --git a/scripts/setup-dependencies.sh b/scripts/setup-dependencies.sh index ea4b4c771..4863e9708 100755 --- a/scripts/setup-dependencies.sh +++ b/scripts/setup-dependencies.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash echo "Setting up dependencies..." @@ -11,44 +11,44 @@ set -e # install in a custom prefix rather than /usr/local. by default, this # chooses "prefix" directory alongside "scripts" directory. PREFIX="$(cd "$(dirname "$0")"/.. && pwd)/prefix" -echo "Will install local dependencies in the following prefix: $PREFIX" -mkdir -p "$PREFIX"/{lib,include} +echo "Will install local dependencies in the following prefix: ${PREFIX}" +mkdir -p "${PREFIX}"/{lib,include} CMAKE_BUILD_TYPE="Debug" -if [[ "$BUILD_RELEASE" == "1" ]]; then +if [[ "${BUILD_RELEASE}" == "1" ]]; then CMAKE_BUILD_TYPE="Release" fi CPUS=1 -if [[ "$OSTYPE" == "linux-gnu"* ]]; then +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then CPUS=$(grep -c ^processor /proc/cpuinfo) -elif [[ "$OSTYPE" == "darwin"* ]]; then +elif [[ "${OSTYPE}" == "darwin"* ]]; then CPUS=$(sysctl -n hw.ncpu) fi LEVELDB_VERSION="1.23" echo -e "${green}Building LevelDB from sources...${end}" -wget https://github.com/google/leveldb/archive/${LEVELDB_VERSION}.tar.gz +wget "https://github.com/google/leveldb/archive/${LEVELDB_VERSION}.tar.gz" rm -rf "leveldb-${LEVELDB_VERSION}-${CMAKE_BUILD_TYPE}" -tar xzvf ${LEVELDB_VERSION}.tar.gz -rm -rf ${LEVELDB_VERSION}.tar.gz -mv leveldb-${LEVELDB_VERSION} "leveldb-${LEVELDB_VERSION}-${CMAKE_BUILD_TYPE}" +tar xzvf "${LEVELDB_VERSION}.tar.gz" +rm -rf "${LEVELDB_VERSION}.tar.gz" +mv "leveldb-${LEVELDB_VERSION}" "leveldb-${LEVELDB_VERSION}-${CMAKE_BUILD_TYPE}" cd "leveldb-${LEVELDB_VERSION}-${CMAKE_BUILD_TYPE}" -cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DLEVELDB_BUILD_TESTS=0 -DLEVELDB_BUILD_BENCHMARKS=0 -DBUILD_SHARED_LIBS=0 -DHAVE_SNAPPY=0 . -make -j$CPUS +cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}" -DLEVELDB_BUILD_TESTS=0 -DLEVELDB_BUILD_BENCHMARKS=0 -DBUILD_SHARED_LIBS=0 -DHAVE_SNAPPY=0 . +make "-j${CPUS}" make install cd .. NURAFT_VERSION="1.3.0" echo -e "${green}Building NuRaft from sources...${end}" -wget https://github.com/eBay/NuRaft/archive/v${NURAFT_VERSION}.tar.gz +wget "https://github.com/eBay/NuRaft/archive/v${NURAFT_VERSION}.tar.gz" rm -rf "NuRaft-${NURAFT_VERSION}-${CMAKE_BUILD_TYPE}" -tar xzvf v${NURAFT_VERSION}.tar.gz -rm v${NURAFT_VERSION}.tar.gz -mv NuRaft-${NURAFT_VERSION} "NuRaft-${NURAFT_VERSION}-${CMAKE_BUILD_TYPE}" +tar xzvf "v${NURAFT_VERSION}.tar.gz" +rm "v${NURAFT_VERSION}.tar.gz" +mv "NuRaft-${NURAFT_VERSION}" "NuRaft-${NURAFT_VERSION}-${CMAKE_BUILD_TYPE}" cd "NuRaft-${NURAFT_VERSION}-${CMAKE_BUILD_TYPE}" ./prepare.sh -if [[ "$BUILD_RELEASE" == "1" ]]; then +if [[ "${BUILD_RELEASE}" == "1" ]]; then # If we're doing a release build, remove the examples and tests rm -rf examples tests mkdir examples @@ -58,12 +58,12 @@ if [[ "$BUILD_RELEASE" == "1" ]]; then fi mkdir -p build cd build -cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DDISABLE_SSL=1 .. -make -j$CPUS static_lib +cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}" -DDISABLE_SSL=1 .. +make "-j${CPUS}" static_lib -echo -e "${green}Copying nuraft to $PREFIX/lib and $PREFIX/include${end}" -cp libnuraft.a $PREFIX/lib -cp -r ../include/libnuraft $PREFIX/include +echo -e "${green}Copying nuraft to \"${PREFIX}/lib\" and ${PREFIX}/include${end}" +cp libnuraft.a "${PREFIX}/lib" +cp -r ../include/libnuraft "${PREFIX}/include" cd ../.. @@ -72,63 +72,63 @@ rm -rf lua-5.4.3 tar zxf lua-5.4.3.tar.gz rm -rf lua-5.4.3.tar.gz cd lua-5.4.3 -make -j$CPUS -make INSTALL_TOP=$PREFIX install +make "-j${CPUS}" +make "INSTALL_TOP=${PREFIX}" install cd .. -if [[ "$OSTYPE" != "darwin"* ]]; then +if [[ "${OSTYPE}" != "darwin"* ]]; then # For Mac Silicon: this curl install creates problems for building tools/bench/parsec/evm/ CURL_VERSION="7.83.1" - wget https://curl.se/download/curl-${CURL_VERSION}.tar.gz - rm -rf curl-${CURL_VERSION} - tar xzvf curl-${CURL_VERSION}.tar.gz - rm -rf curl-${CURL_VERSION}.tar.gz - mkdir -p curl-${CURL_VERSION}/build - cd curl-${CURL_VERSION}/build + wget "https://curl.se/download/curl-${CURL_VERSION}.tar.gz" + rm -rf "curl-${CURL_VERSION}" + tar xzvf "curl-${CURL_VERSION}.tar.gz" + rm -rf "curl-${CURL_VERSION}.tar.gz" + mkdir -p "curl-${CURL_VERSION}/build" + cd "curl-${CURL_VERSION}/build" ../configure --prefix="${PREFIX}" --disable-shared --without-ssl --without-libpsl --without-libidn2 --without-brotli --without-zstd --without-zlib - make -j$CPUS + make "-j${CPUS}" make install cd ../.. fi JSONCPP_VERSION="1.9.5" -wget https://github.com/open-source-parsers/jsoncpp/archive/refs/tags/${JSONCPP_VERSION}.tar.gz -rm -rf jsoncpp-${JSONCPP_VERSION} -tar xzvf ${JSONCPP_VERSION}.tar.gz -rm -rf ${JSONCPP_VERSION}.tar.gz -mkdir -p jsoncpp-${JSONCPP_VERSION}/build -cd jsoncpp-${JSONCPP_VERSION}/build +wget "https://github.com/open-source-parsers/jsoncpp/archive/refs/tags/${JSONCPP_VERSION}.tar.gz" +rm -rf "jsoncpp-${JSONCPP_VERSION}" +tar xzvf "${JSONCPP_VERSION}.tar.gz" +rm -rf "${JSONCPP_VERSION}.tar.gz" +mkdir -p "jsoncpp-${JSONCPP_VERSION}/build" +cd "jsoncpp-${JSONCPP_VERSION}/build" cmake .. -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DBUILD_SHARED_LIBS=NO -DBUILD_STATIC_LIBS=YES -DJSONCPP_WITH_TESTS=OFF -DJSONCPP_WITH_POST_BUILD_UNITTEST=OFF -make -j$CPUS +make "-j${CPUS}" make install cd ../.. # NOTE: evmc v10.0.0 requires evmone v0.9.0 # evmc v10.1.1 requires evmone v0.10.0 (which requires c++20) EVMC_VER=10.0.0 -wget https://github.com/ethereum/evmc/archive/refs/tags/v${EVMC_VER}.zip -rm -rf evmc-${EVMC_VER} -unzip v${EVMC_VER}.zip -rm v${EVMC_VER}.zip -cd evmc-${EVMC_VER} +wget "https://github.com/ethereum/evmc/archive/refs/tags/v${EVMC_VER}.zip" +rm -rf "evmc-${EVMC_VER}" +unzip "v${EVMC_VER}.zip" +rm "v${EVMC_VER}.zip" +cd "evmc-${EVMC_VER}" mkdir build cd build cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" .. -make -j$CPUS +make "-j${CPUS}" make install cd ../.. # NOTE: updating evmone to v0.10.0 requires c++20 EVMONE_VER=0.9.1 -wget https://github.com/ethereum/evmone/archive/refs/tags/v${EVMONE_VER}.zip -rm -rf evmone-${EVMONE_VER} -unzip v${EVMONE_VER}.zip -rm v${EVMONE_VER}.zip -cd evmone-${EVMONE_VER} +wget "https://github.com/ethereum/evmone/archive/refs/tags/v${EVMONE_VER}.zip" +rm -rf "evmone-${EVMONE_VER}" +unzip "v${EVMONE_VER}.zip" +rm "v${EVMONE_VER}.zip" +cd "evmone-${EVMONE_VER}" rm -rf evmc -mv ../evmc-${EVMC_VER} ./evmc +mv "../evmc-${EVMC_VER}" ./evmc mkdir ./evmc/.git -if [[ "$OSTYPE" == "darwin"* ]]; then +if [[ "${OSTYPE}" == "darwin"* ]]; then # Mac Silicon: clang 'ar' does not allow empty member list, fails w/ -DBUILD_SHARED_LIBS=OFF cmake -S . -B build -DCMAKE_INSTALL_PREFIX="${PREFIX}" else @@ -138,7 +138,7 @@ cmake --build build --parallel cd build make install cd ../.. -rm -rf evmone-${EVMONE_VER} +rm -rf "evmone-${EVMONE_VER}" wget https://github.com/chfast/ethash/archive/e3e002ecc25ca699349aa62fa38e7b7cc5f653af.zip rm -rf ethash-e3e002ecc25ca699349aa62fa38e7b7cc5f653af @@ -149,8 +149,8 @@ mkdir build cd build cmake -DETHASH_BUILD_ETHASH=OFF -DETHASH_BUILD_TESTS=OFF .. cmake --build . --parallel -cp ./lib/keccak/libkeccak.a $PREFIX/lib -cp -r ../include/ethash $PREFIX/include +cp ./lib/keccak/libkeccak.a "${PREFIX}/lib" +cp -r ../include/ethash "${PREFIX}/include" cd ../.. wget https://gnu.askapache.com/libmicrohttpd/libmicrohttpd-0.9.75.tar.gz @@ -160,8 +160,11 @@ rm libmicrohttpd-0.9.75.tar.gz cd libmicrohttpd-0.9.75 mkdir build cd build -../configure --prefix="${PREFIX}" --disable-curl --disable-examples --disable-doc --disable-shared --disable-https -make -j $CPUS +../configure "--prefix=${PREFIX}" --disable-curl --disable-examples --disable-doc --disable-shared --disable-https +make -j "${CPUS}" make install cd ../../ rm -rf libmicrohttpd-0.9.75 + +echo; echo "Setup dependencies complete." +echo "Next run './scripts/build.sh'"; echo diff --git a/scripts/shellcheck.sh b/scripts/shellcheck.sh new file mode 100755 index 000000000..3ad1a76c4 --- /dev/null +++ b/scripts/shellcheck.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Usage: ./scripts/shellcheck.sh [view] +ROOT="$(cd "$(dirname "$0")"/.. && pwd)" +SHELLCHECK_REPORT="${ROOT}/shellcheck-report.txt" + +NUM_CORES=1 +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + NUM_CORES=$(grep -c ^processor /proc/cpuinfo) +elif [[ "${OSTYPE}" == "darwin"* ]]; then + NUM_CORES=$(sysctl -n hw.ncpu) +fi + +if ! command -v shellcheck &>/dev/null; then + echo "shellcheck is not installed." + echo "Run 'sudo ./scripts/install-build-tools.sh' to install shellcheck." + exit 1 +fi + +# run shellcheck in parallel on all tracked shell scripts +# checking status of this run will give failure if even a warning is found +# by default warnings are treated as errors ... so we need to search the report for errors +git ls-files '*.sh' | xargs -n 1 -P "${NUM_CORES}" shellcheck > "${SHELLCHECK_REPORT}" + +# if shell check report exists to determine if shellcheck run was successful +if [[ -z "${SHELLCHECK_REPORT}" ]]; then + echo "Shellcheck report ${SHELLCHECK_REPORT} not found. Exiting..." + exit 1 +else + echo "Shellcheck report: ${SHELLCHECK_REPORT}" + if [[ ! -s "${SHELLCHECK_REPORT}" ]]; then + echo "Shellcheck report is empty." + echo "Either there are no warnings or errors across all shell scripts (unlikely)," + echo "or shellcheck failed to run successfully." + exit 0 + fi +fi + +# view non-empty shellcheck report, includes warnings and/or errors +if [[ "$#" > 0 && "$1" == "view" ]]; then + echo "Shellcheck report: ${SHELLCHECK_REPORT}" + cat "${SHELLCHECK_REPORT}" +fi + +# detect fatal errors in shellcheck report +if grep -qE "error" "${SHELLCHECK_REPORT}"; then + echo "Shellcheck found fatal errors in report: ${SHELLCHECK_REPORT}" + echo "Shellcheck failed." + exit 1 +else + echo "Shellcheck found no fatal errors in report: ${SHELLCHECK_REPORT}" + echo "Shellcheck passed." + exit 0 +fi diff --git a/scripts/test-e2e-minikube.sh b/scripts/test-e2e-minikube.sh index d25900163..103d34f69 100755 --- a/scripts/test-e2e-minikube.sh +++ b/scripts/test-e2e-minikube.sh @@ -7,49 +7,49 @@ BUILD_DOCKER=${TESTRUN_BUILD_DOCKER:-1} # Make sure we have the necessary tools installed required_executables=(minikube docker go helm kubectl) -for e in ${required_executables[@]}; do - if ! command -v $e &> /dev/null; then - echo "'$e' command not be found! This is required to run. Please install it." +for e in "${required_executables[@]}"; do + if ! command -v "${e}" &> /dev/null; then + echo "'${e}' command not be found! This is required to run. Please install it." exit 1 fi done # Start minikube cluster with opencbdc profile -minikube_status=$(minikube -p $MINIKUBE_PROFILE status | grep apiserver | awk '{ print $2 }') -if [ "$minikube_status" != "Running" ]; then - echo "🔄 Starting minkube cluster with profile '$MINIKUBE_PROFILE'..." - minikube -p $MINIKUBE_PROFILE start +minikube_status=$(minikube -p "${MINIKUBE_PROFILE}" status | grep apiserver | awk '{ print $2 }') +if [[ "${minikube_status}" != "Running" ]]; then + echo "🔄 Starting minkube cluster with profile '${MINIKUBE_PROFILE}'..." + minikube -p "${MINIKUBE_PROFILE}" start else - echo "✅ minikube cluster with profile '$MINIKUBE_PROFILE' is currently running." + echo "✅ minikube cluster with profile '${MINIKUBE_PROFILE}' is currently running." fi # Update Kubernetes context to default to this minikube cluster profile -minikube -p $MINIKUBE_PROFILE update-context +minikube -p "${MINIKUBE_PROFILE}" update-context # Connect host docker cli to docker daemon in minikube vm -eval $(minikube -p $MINIKUBE_PROFILE docker-env) +eval "$(minikube -p "${MINIKUBE_PROFILE}" docker-env)" # Build docker image -if [[ $BUILD_DOCKER -eq 1 ]]; then +if [[ ${BUILD_DOCKER} -eq 1 ]]; then echo "🔄 Building docker image for 'opencbdc-tx'" - $SCRIPT_DIR/build-docker.sh + "${SCRIPT_DIR}/build-docker.sh" fi # Change to the test directory and fetch dependencies -cd $SCRIPT_DIR/../charts/tests +cd "${SCRIPT_DIR}/../charts/tests" echo "🔄 Downloading Go dependencies for testing..." go mod download -x # Set testrun_id and path to store logs from testrun and containers -TESTRUN_ID=${TESTRUN_ID:-$(uuidgen)} -TESTRUN_PATH=$SCRIPT_DIR/../testruns/$TESTRUN_ID -TESTRUN_LOG_PATH="$TESTRUN_PATH/testrun.log" -mkdir -p $TESTRUN_PATH +TESTRUN_ID="${TESTRUN_ID:-$(uuidgen)}" +TESTRUN_PATH="${SCRIPT_DIR}/../testruns/${TESTRUN_ID}" +TESTRUN_LOG_PATH="${TESTRUN_PATH}/testrun.log" +mkdir -p "${TESTRUN_PATH}" # Run test and output test logs to console as well as a file for reference later echo "🔄 Running tests..." -TESTRUN_ID=$TESTRUN_ID go test 2>&1 | tee -a $TESTRUN_LOG_PATH +TESTRUN_ID=${TESTRUN_ID} go test 2>&1 | tee -a "${TESTRUN_LOG_PATH}" # Generate a markdown report of the testrun with logs -$SCRIPT_DIR/create-e2e-report.sh $TESTRUN_PATH >> $TESTRUN_PATH/report.md -echo "View test results at $(realpath $TESTRUN_PATH)" +"${SCRIPT_DIR}/create-e2e-report.sh" "${TESTRUN_PATH}" >> "${TESTRUN_PATH}/report.md" +echo "View test results at $(realpath "${TESTRUN_PATH}")" diff --git a/scripts/test-transaction.sh b/scripts/test-transaction.sh index aeb4e3e78..5099553e8 100644 --- a/scripts/test-transaction.sh +++ b/scripts/test-transaction.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e BUILD_DIR="build" @@ -11,27 +11,27 @@ wallet1="wallet1.dat" client1="client1.dat" # Create wallet0 and save wallet id to var, mint -wallet0_id=$($BUILD_DIR/src/uhs/client/client-cli $config_file $client0 $wallet0 mint 10 10 | grep -E '^[^\W]+$') +wallet0_id=$("${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client0}" "${wallet0}" mint 10 10 | grep -E '^[^\W]+$') -echo "Wallet0_PK: $wallet0_id" +echo "Wallet0_PK: ${wallet0}_id" # Sync wallet0 -$BUILD_DIR/src/uhs/client/client-cli $config_file $client0 $wallet0 sync +"${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client0}" "${wallet0}" sync # Create wallet1 and save to var -wallet1_id=$($BUILD_DIR/src/uhs/client/client-cli $config_file $client1 $wallet1 newaddress | grep -E '^[^\W]+$') +wallet1_id=$("${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client1}" "${wallet1}" newaddress | grep -E '^[^\W]+$') -echo "Wallet1_PK: $wallet1_id" +echo "Wallet1_PK: ${wallet1}_id" # Simulate transaction -importinput=$($BUILD_DIR/src/uhs/client/client-cli $config_file $client0 $wallet0 send 33 $wallet1_id | grep -E '^[a-zA-z0-9]{70,}') +importinput=$("${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client0}" "${wallet0}" send 33 "${wallet1_id}" | grep -E '^[a-zA-z0-9]{70,}') -echo "ImportInput: $importinput" +echo "ImportInput: ${importinput}" -$BUILD_DIR/src/uhs/client/client-cli $config_file $client1 $wallet1 importinput $importinput +"${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client1}" "${wallet1}" importinput "${importinput}" # Sync wallet1 -$BUILD_DIR/src/uhs/client/client-cli $config_file $client1 $wallet1 sync +"${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client1}" "${wallet1}" sync # Sync wallet1 -$BUILD_DIR/src/uhs/client/client-cli $config_file $client1 $wallet1 info +"${BUILD_DIR}/src/uhs/client/client-cli" "${config_file}" "${client1}" "${wallet1}" info diff --git a/scripts/test.sh b/scripts/test.sh index 615a8acb5..e84f51f65 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Exit script on failure. set -e @@ -77,13 +77,13 @@ do -d|--build-dir) shift ARG="$1" - if [[ $ARG == "" || ${ARG:0:1} == "-" ]] + if [[ ${ARG} == "" || ${ARG:0:1} == "-" ]] then echo -n "ERROR: The -d flag was used, " echo "but a valid build folder was not given." exit 1 fi - BUILD_DIR=$ARG + BUILD_DIR=${ARG} shift ;; -ni|--no-integration-tests) @@ -120,7 +120,7 @@ then BUILD_DIR="${REPO_TOP_DIR}/build" fi -if [[ ! -d "$BUILD_DIR" ]] +if [[ ! -d "${BUILD_DIR}" ]] then echo "ERROR: The folder '${BUILD_DIR}' was not found." exit 1 @@ -129,24 +129,25 @@ fi # If the build folder is a relative path, convert it to an absolute path # to avoid potential relative path errors and to improve readability # if the path is written to stdout. -export BUILD_DIR=$(cd "$BUILD_DIR"; pwd) +BUILD_DIR=$(cd "${BUILD_DIR}"; pwd) +export BUILD_DIR echo "Build folder: '${BUILD_DIR}'" echo run_test_suite () { - cd "$BUILD_DIR" + cd "${BUILD_DIR}" find . -name '*.gcda' -exec rm {} \; - "$PWD"/"$1" "${GTEST_FLAGS[@]}" + "${PWD}"/"$1" "${GTEST_FLAGS[@]}" - if [[ "$MEASURE_COVERAGE" == "true" ]] + if [[ "${MEASURE_COVERAGE}" == "true" ]] then echo "Checking test coverage." LOCATION="$2" - rm -rf "$LOCATION" - mkdir -p "$LOCATION" + rm -rf "${LOCATION}" + mkdir -p "${LOCATION}" find . \( -name '*.gcno' -or -name '*.gcda' \) \ - -and -not -path '*coverage*' -exec rsync -R \{\} "$LOCATION" \; - cd "$LOCATION" + -and -not -path '*coverage*' -exec rsync -R \{\} "${LOCATION}" \; + cd "${LOCATION}" lcov -c -i -d . -o base.info --rc lcov_branch_coverage=1 lcov -c -d . -o test.info --rc lcov_branch_coverage=1 @@ -161,21 +162,21 @@ run_test_suite () { fi } -if [[ "$RUN_UNIT_TESTS" == "true" ]] +if [[ "${RUN_UNIT_TESTS}" == "true" ]] then echo "Running unit tests..." find "${REPO_TOP_DIR}"/tests/unit/ -name '*.cfg' \ - -exec rsync \{\} "$BUILD_DIR" \; + -exec rsync \{\} "${BUILD_DIR}" \; run_test_suite "tests/unit/run_unit_tests" "unit_tests_coverage" else echo "Skipping unit tests." fi echo -if [[ "$RUN_INTEGRATION_TESTS" == "true" ]] +if [[ "${RUN_INTEGRATION_TESTS}" == "true" ]] then echo "Running integration tests..." - cp "${REPO_TOP_DIR}"/tests/integration/*.cfg "$BUILD_DIR" + cp "${REPO_TOP_DIR}"/tests/integration/*.cfg "${BUILD_DIR}" run_test_suite "tests/integration/run_integration_tests" \ "integration_tests_coverage" else diff --git a/scripts/wait-for-it.sh b/scripts/wait-for-it.sh index d990e0d36..4b8ceb1e1 100755 --- a/scripts/wait-for-it.sh +++ b/scripts/wait-for-it.sh @@ -3,13 +3,13 @@ WAITFORIT_cmdname=${0##*/} -echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } +echoerr() { if [[ ${WAITFORIT_QUIET} -ne 1 ]]; then echo "$@" 1>&2; fi } usage() { cat << USAGE >&2 Usage: - $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + ${WAITFORIT_cmdname} host:port [-s] [-t timeout] [-- command args] -h HOST | --host=HOST Host or IP under test -p PORT | --port=PORT TCP port under test Alternatively, you specify the host and port as host:port @@ -24,47 +24,47 @@ USAGE wait_for() { - if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then - echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + if [[ ${WAITFORIT_TIMEOUT} -gt 0 ]]; then + echoerr "${WAITFORIT_cmdname}: waiting ${WAITFORIT_TIMEOUT} seconds for ${WAITFORIT_HOST}:${WAITFORIT_PORT}" else - echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + echoerr "${WAITFORIT_cmdname}: waiting for ${WAITFORIT_HOST}:${WAITFORIT_PORT} without a timeout" fi WAITFORIT_start_ts=$(date +%s) while : do - if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then - nc -z $WAITFORIT_HOST $WAITFORIT_PORT + if [[ ${WAITFORIT_ISBUSY} -eq 1 ]]; then + nc -z "${WAITFORIT_HOST}" "${WAITFORIT_PORT}" WAITFORIT_result=$? else - (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + (echo -n > "/dev/tcp/${WAITFORIT_HOST}/${WAITFORIT_PORT}") >/dev/null 2>&1 WAITFORIT_result=$? fi - if [[ $WAITFORIT_result -eq 0 ]]; then + if [[ ${WAITFORIT_RESULT} -eq 0 ]]; then WAITFORIT_end_ts=$(date +%s) - echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + echoerr "${WAITFORIT_cmdname}: ${WAITFORIT_HOST}:${WAITFORIT_PORT} is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" break fi sleep 1 done - return $WAITFORIT_result + return "${WAITFORIT_RESULT}" } wait_for_wrapper() { # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 - if [[ $WAITFORIT_QUIET -eq 1 ]]; then - timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + if [[ ${WAITFORIT_QUIET} -eq 1 ]]; then + timeout "${WAITFORIT_BUSYTIMEFLAG}" "${WAITFORIT_TIMEOUT}" "$0" --quiet --child "--host=${WAITFORIT_HOST}" "--port=${WAITFORIT_PORT}" "--timeout=${WAITFORIT_TIMEOUT}" & else - timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + timeout "${WAITFORIT_BUSYTIMEFLAG}" "${WAITFORIT_TIMEOUT}" "$0" --child "--host=${WAITFORIT_HOST}" "--port=${WAITFORIT_PORT}" "--timeout=${WAITFORIT_TIMEOUT}" & fi WAITFORIT_PID=$! - trap "kill -INT -$WAITFORIT_PID" INT - wait $WAITFORIT_PID + trap 'kill -INT -'"${WAITFORIT_PID}" INT + wait "${WAITFORIT_PID}" WAITFORIT_RESULT=$? - if [[ $WAITFORIT_RESULT -ne 0 ]]; then - echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + if [[ ${WAITFORIT_RESULT} -ne 0 ]]; then + echoerr "${WAITFORIT_cmdname}: timeout occurred after waiting ${WAITFORIT_TIMEOUT} seconds for ${WAITFORIT_HOST}:${WAITFORIT_PORT}" fi - return $WAITFORIT_RESULT + return "${WAITFORIT_RESULT}" } # process arguments @@ -72,7 +72,7 @@ while [[ $# -gt 0 ]] do case "$1" in *:* ) - WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_hostport=("${1//:/ }") WAITFORIT_HOST=${WAITFORIT_hostport[0]} WAITFORIT_PORT=${WAITFORIT_hostport[1]} shift 1 @@ -91,7 +91,7 @@ do ;; -h) WAITFORIT_HOST="$2" - if [[ $WAITFORIT_HOST == "" ]]; then break; fi + if [[ ${WAITFORIT_HOST} == "" ]]; then break; fi shift 2 ;; --host=*) @@ -100,7 +100,7 @@ do ;; -p) WAITFORIT_PORT="$2" - if [[ $WAITFORIT_PORT == "" ]]; then break; fi + if [[ ${WAITFORIT_PORT} == "" ]]; then break; fi shift 2 ;; --port=*) @@ -109,7 +109,7 @@ do ;; -t) WAITFORIT_TIMEOUT="$2" - if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + if [[ ${WAITFORIT_TIMEOUT} == "" ]]; then break; fi shift 2 ;; --timeout=*) @@ -131,7 +131,7 @@ do esac done -if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then +if [[ "${WAITFORIT_HOST}" == "" || "${WAITFORIT_PORT}" == "" ]]; then echoerr "Error: you need to provide a host and port to test." usage fi @@ -143,40 +143,40 @@ WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} # Check to see if timeout is from busybox? WAITFORIT_TIMEOUT_PATH=$(type -p timeout) -WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) +WAITFORIT_TIMEOUT_PATH=$(realpath "${WAITFORIT_TIMEOUT_PATH}" 2>/dev/null || readlink -f "${WAITFORIT_TIMEOUT_PATH}") WAITFORIT_BUSYTIMEFLAG="" -if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then +if [[ "${WAITFORIT_TIMEOUT_PATH}" =~ "busybox" ]]; then WAITFORIT_ISBUSY=1 # Check if busybox timeout uses -t flag # (recent Alpine versions don't support -t anymore) - if timeout &>/dev/stdout | grep -q -e '-t '; then + if timeout 2>&1 | tee /dev/stdout | grep -q -e '-t '; then WAITFORIT_BUSYTIMEFLAG="-t" fi else WAITFORIT_ISBUSY=0 fi -if [[ $WAITFORIT_CHILD -gt 0 ]]; then +if [[ ${WAITFORIT_CHILD} -gt 0 ]]; then wait_for - WAITFORIT_RESULT=$? - exit $WAITFORIT_RESULT + WAITFORIT_RESULT="$?" + exit "${WAITFORIT_RESULT}" else - if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + if [[ ${WAITFORIT_TIMEOUT} -gt 0 ]]; then wait_for_wrapper - WAITFORIT_RESULT=$? + WAITFORIT_RESULT="$?" else wait_for - WAITFORIT_RESULT=$? + WAITFORIT_RESULT="$?" fi fi -if [[ $WAITFORIT_CLI != "" ]]; then - if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then - echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" - exit $WAITFORIT_RESULT +if [[ ${#WAITFORIT_CLI[@]} -gt 0 ]]; then + if [[ ${WAITFORIT_RESULT} -ne 0 && ${WAITFORIT_STRICT} -eq 1 ]]; then + echoerr "${WAITFORIT_cmdname}: strict mode, refusing to execute subprocess" + exit "${WAITFORIT_RESULT}" fi - exec "${WAITFORIT_CLI[@]}" + exec "${WAITFORIT_CLI[*]}" else - exit $WAITFORIT_RESULT + exit "${WAITFORIT_RESULT}" fi