Skip to content

Ci cd overhaul

Ci cd overhaul #402

Workflow file for this run

name: Test actions
on:
push:
branches: ["**"]
pull_request:
branches: [main]
workflow_dispatch:
env:
# The bash escape character is \033
bashPass: \033[32;1mPASSED -
bashWarn: \033[33;1mWARN -
bashFail: \033[31;1mFAILED -
bashEnd: \033[0m
jobs:
test-format-check:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
# Checks for a commit
inputs:
[
{
test-type: Functional,
test-status: failure,
description: "Whitespace, CRLF, and Format Failure",
path: formatting
# Also the test that not using exclude dirs/files throws no API errors
},
{
test-type: Functional,
test-status: failure,
description: "CRLF and Formatting Error",
path: formatting,
exclude-dirs: "filesWithTrailingWhitespace"
},
{
test-type: Functional,
test-status: failure,
description: "CRLF and Whitespace Error",
path: formatting,
exclude-dirs: "filesWithFormattingErrors"
},
{
test-type: Functional,
test-status: failure,
description: "CRLF Error",
path: formatting,
exclude-dirs: "filesWithFormattingIssues,filesWithWTrailingWhitespace"
},
{
test-type: Functional,
test-status: failure,
description: "Formatting and Whitespace Errror",
path: formatting,
exclude-dirs: "filesWithCRLFEndings"
},
{
test-type: Functional,
test-status: failure,
description: "Formatting Error",
path: formatting,
exclude-dirs: "filesWithTrailingWhitespace,filesWithCRLFEndings"
},
{
test-type: Functional,
test-status: failure,
description: "Whitespace Error",
path: formatting,
exclude-dirs: "filesWithFormattingErrors,filesWithCRLFEndings"
},
{
test-type: Functional,
test-status: success,
description: "No Errors",
path: formatting,
exclude-dirs: "filesWithFormattingErrors,filesWithCRLFEndings,filesWithTrailingWhitespace",
exclude-files: "badFile.c"
},
]
steps:
- uses: actions/checkout@v3
- env:
stepName: "${{ matrix.inputs.test-type }} | ${{ matrix.inputs.test-status }} | ${{matrix.inputs.description }}"
name: ${{ env.stepName }}
uses: ./formatting
id: format-test
continue-on-error: true
with:
path: ${{ matrix.inputs.path }}
exclude-files: ${{ matrix.inputs.exclude-files }}
exclude-dirs: ${{ matrix.inputs.exclude-dirs }}
- env:
stepName: Check Failure Test Cases
name: ${{ env.stepName }}
id: check-failure-test-cases
shell: bash
run: |
# Check Failure Test Cases
set +e
if [ "${{ steps.format-test.outcome}}" = "${{ matrix.inputs.test-status }}" ]; then
echo -e "${{ env.bashPass }} ${{ matrix.inputs.test-type }} | ${{ matrix.inputs.description}} | Had Expected ${{ matrix.inputs.test-status }} ${{ env.bashEnd }}"
else
echo -e "${{ env.bashFail }} ${{ matrix.inputs.test-type }} | ${{ matrix.inputs.description}} | Had Unexpected ${{ matrix.inputs.test-status }} ${{ env.bashEnd }}"
set -e
exit 1
fi
test-exe-monitor-success-cases:
strategy:
fail-fast: false
matrix:
#os: [ubuntu-latest, windows-latest]
os: [ubuntu-latest]
inputs:
[
{
test-type: Functional,
test-status: Success,
description: "Exit Code Found",
exe-path: executable-monitor/test.out,
success-exit-code: 0,
log-dir: logDirectory,
timeout-seconds: 60,
},
{
test-type: Functional,
test-status: Success,
description: "Success Line Found",
exe-path: executable-monitor/test.out,
success-line: "SLEEPING FOR 6 SECONDS",
log-dir: logDirectory,
timeout-seconds: 30,
},
{
test-type: Functional,
test-status: Success,
description: "Exit Code and Success Line | Exit Code Found",
exe-path: executable-monitor/test.out,
success-line: "LINE_THAT_WILL_NOT_PRINT",
success-exit-code: 0,
timeout-seconds: 60,
},
{
test-type: Functional,
test-status: Success,
description: "Exit Code and Success Line | Success Line Found",
exe-path: executable-monitor/test.out,
success-line: "SLEEPING FOR 6 SECONDS",
success-exit-code: 0,
timeout-seconds: 30,
},
{
test-type: Functional,
test-status: Success,
description: "Retry Needed | Exit Code Found",
exe-path: executable-monitor/test_exit_current_minutes.out,
success-exit-code: RETRY_EXIT_CODE,
retry-attempts: 10,
timeout-seconds: 60,
},
{
test-type: Functional,
test-status: Success,
description: "Retry Needed | Success Line Found",
exe-path: executable-monitor/test_exit_current_minutes.out,
success-line: RETRY_SUCCESS_LINE,
retry-attempts: 10,
timeout-seconds: 60,
},
{
test-type: Functional,
test-status: Success,
description: "Retry Needed | Exit Code and Success Line | Exit Code Found",
exe-path: executable-monitor/test_exit_current_minutes.out,
success-line: "LINE_THAT_WILL_NOT_PRINT",
success-exit-code: RETRY_EXIT_CODE,
retry-attempts: 10,
timeout-seconds: 60,
},
{
test-type: Functional,
test-status: Success,
description: "Retry Needed | Exit Code and Success Line | Success Line Found",
# Use the EXE that doesn't exit with the current minutes
exe-path: executable-monitor/test.out,
success-line: RETRY_SUCCESS_LINE,
success-exit-code: 1,
retry-attempts: 10,
timeout-seconds: 60,
},
]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
# - env:
# stepName: Install Windows Build tools
# name: ${{ env.stepName }}
# if: runner.os == 'Windows'
# id: install-windows-build-tools
- env:
stepName: Install Ubuntu Build Tools
name: ${{ env.stepName }}
if: runner.os == 'Linux'
id: install-ubuntu-build--tools
run: |
# Install Ubuntu Build Tools
echo "::group::${{ env.stepName }}"
sudo apt install build-essential
exitStatus=$?
echo "::endgroup::"
if [ "$exitStatus" = "0" ]; then
echo -e "${{ env.bashPass }} ${{ env.stepName }} ${{ env.bashEnd }}"
else
echo -e "${{ env.bashFail }} ${{ env.stepName }} ${{ env.bashEnd }}"
exit 1
fi
- env:
stepName: Compile Executable Monitor Test Files
name: ${{ env.stepName }}
id: compile-executable-monitor-test-files
shell: bash
run: |
# ${{ env.stepName }}
echo "::group::${{ env.stepName }}"
gcc executable-monitor/test.c -o executable-monitor/test.out
gcc -DEXIT_WITH_MINUTES executable-monitor/test.c -o executable-monitor/test_exit_current_minutes.out
readlink -f executable-monitor/test.out
readlink -f executable-monitor/test_exit_current_minutes.out
exitStatus=$?
echo "::endgroup::"
if [ "$exitStatus" = "0" ]; then
echo -e "${{ env.bashPass }} ${{ env.stepName }} ${{ env.bashEnd }}"
else
echo -e "${{ env.bashFail }} ${{ env.stepName }} ${{ env.bashEnd }}"
exit 1
fi
# Get future times from now, then look for that in the executable
- env:
stepName: Get Future Times
name: ${{ env.stepName }}
id: future-time
shell: bash
run: |
# Get times for future demos
if [[ "${{ matrix.inputs.success-line }}" == "RETRY_SUCCESS_LINE" ]]; then
echo "Setting Success Line to in the future"
echo "successLine=$(date --date='3 minutes' +%H:%M)" >> "$GITHUB_ENV"
else
echo "Keeping current success line"
echo "successLine=${{ matrix.inputs.success-line}}" >> "$GITHUB_ENV"
fi
if [[ "${{ matrix.inputs.success-exit-code }}" == "RETRY_EXIT_CODE" ]]; then
echo "Setting Exit Code to in the future"
echo "exitCode=$(date --date='3 minutes' +%M)" >> "$GITHUB_ENV"
else
echo "Keeping current Exit Code"
echo "exitCode=${{ matrix.inputs.success-exit-code}}" >> "$GITHUB_ENV"
fi
- env:
stepName: "${{ matrix.inputs.test-type }} | ${{ matrix.inputs.test-status }} | ${{matrix.inputs.description }}"
name: ${{ env.stepName }}
id: test-executable-monitor-action-success-line
uses: ./executable-monitor
with:
exe-path: ${{ matrix.inputs.exe-path }}
timeout-seconds: ${{ matrix.inputs.timeout-seconds }}
success-line: ${{ env.successLine }}
success-exit-code: ${{ env.exitCode}}
retry-attempts: ${{ matrix.inputs.retry-attempts }}
log-dir: ${{ matrix.inputs.log-dir}}
test-exe-monitor-failure-cases:
strategy:
fail-fast: false
matrix:
#os: [ubuntu-latest, windows-latest]
os: [ubuntu-latest]
inputs:
[
{
test-type: API,
test-status: Failure,
description: "No Executable Provided",
timeout-seconds: 30,
},
{
test-type: API,
test-status: Failure,
description: "No Success Condition Provided",
exe-path: executable-monitor/test.out,
log-dir: logDirectory,
timeout-seconds: 30,
},
{
test-type: Functional,
test-status: Falure,
description: "Timeout Cause No Success Line To Print",
exe-path: executable-monitor/test.out,
# This is a line that would print if not for timeout
success-line: "SLEEPING FOR 9 SECONDS",
timeout-seconds: 2,
},
{
test-type: Functional,
test-status: Failure,
description: "Timeout Cause No Exit Code",
exe-path: executable-monitor/test.out,
# This is an exit status that should be met if not for timeout
success-exit-code: 0,
timeout-seconds: 2,
},
{
test-type: Functional,
test-status: Failure,
description: "Timeout Cause Neither Condition",
exe-path: executable-monitor/test.out,
# These are exit conditions that should be met if not for timeout
success-line: "SLEEPING FOR 9 SECONDS",
success-exit-code: 0,
timeout-seconds: 2,
},
{
test-type: Functional,
test-status: Falure,
description: "Retry | Timeout Cause No Success Line To Print",
exe-path: executable-monitor/test.out,
# This is a line that would print if not for timeout
success-line: "SLEEPING FOR 9 SECONDS",
timeout-seconds: 2,
retry-attempts: 2,
},
{
test-type: Functional,
test-status: Failure,
description: "Retry | Timeout Cause No Exit Code",
exe-path: executable-monitor/test.out,
# This is an exit status that should be met if not for timeout
success-exit-code: 0,
timeout-seconds: 2,
retry-attempts: 2,
},
{
test-type: Functional,
test-status: Failure,
description: "Retry | Timeout Cause Neither Condition",
exe-path: executable-monitor/test.out,
# These are exit conditions that should be met if not for timeout
success-line: "SLEEPING FOR 9 SECONDS",
success-exit-code: 0,
timeout-seconds: 2,
retry-attempts: 2,
},
]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
# - env:
# stepName: Install Windows Build tools
# name: ${{ env.stepName }}
# if: runner.os == 'Windows'
# id: install-windows-build-tools
# uses: setup-msbuild@v1.1
- env:
stepName: Install Ubuntu Build Tools
name: ${{ env.stepName }}
if: runner.os == 'Linux'
id: install-ubuntu-build--tools
run: |
# Install Ubuntu Build Tools
echo "::group::${{ env.stepName }}"
sudo apt install build-essential
exitStatus=$?
echo "::endgroup::"
if [ "$exitStatus" = "0" ]; then
echo -e "${{ env.bashPass }} ${{ env.stepName }} ${{ env.bashEnd }}"
else
echo -e "${{ env.bashFail }} ${{ env.stepName }} ${{ env.bashEnd }}"
exit 1
fi
- env:
stepName: Compile Executable Monitor Test Files
name: ${{ env.stepName }}
id: compile-executable-monitor-test-files
shell: bash
run: |
# ${{ env.stepName }}
echo "::group::${{ env.stepName }}"
gcc executable-monitor/test.c -o executable-monitor/test.out
gcc -DEXIT_WITH_MINUTES executable-monitor/test.c -o executable-monitor/test_exit_current_minutes.out
readlink -f executable-monitor/test.out
readlink -f executable-monitor/test_exit_current_minutes.out
exitStatus=$?
echo "::endgroup::"
if [ "$exitStatus" = "0" ]; then
echo -e "${{ env.bashPass }} ${{ env.stepName }} ${{ env.bashEnd }}"
else
echo -e "${{ env.bashFail }} ${{ env.stepName }} ${{ env.bashEnd }}"
exit 1
fi
- env:
stepName: "${{ matrix.inputs.test-type }} | ${{ matrix.inputs.test-status }} | ${{matrix.inputs.description }} "
name: ${{ env.stepName }}
id: test-failure-condition
uses: ./executable-monitor
continue-on-error: true
with:
exe-path: ${{ matrix.inputs.exe-path }}
timeout-seconds: ${{ matrix.inputs.timeout-seconds }}
success-line: ${{ matrix.inputs.success-line }}
success-exit-code: ${{ matrix.inputs.success-exit-code }}
retry-attempts: ${{ matrix.inputs.retry-attempts }}
log-dir: ${{ matrix.inputs.log-dir}}
- env:
stepName: Check Failure Test Cases
name: ${{ env.stepName }}
id: check-failure-test-cases
shell: bash
run: |
# Check Failure Test Cases
if [ "${{ steps.test-failure-condition.outcome}}" = "failure" ]; then
echo -e "${{ env.bashPass }} ${{ matrix.inputs.test-type }} | ${{ matrix.inputs.description}} | Failed As Intended ${{ env.bashEnd }}"
else
echo -e "${{ env.bashFail }} ${{ matrix.inputs.test-type }} | ${{ matrix.inputs.description}} | Had Unexpected Pass ${{ env.bashEnd }}"
exit 1
fi
test-complexity-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v3
with:
repository: FreeRTOS/coreMQTT
ref: main
path: coreMQTT
- name: Test complexity check action
uses: ./complexity
with:
path: coreMQTT
# For coreMQTT the code complexity threshold is 10.
horrid_threshold: 10
test-doxygen-zip-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
with:
python-version: '3.11.0'
- uses: actions/checkout@v3
with:
repository: aws/aws-iot-device-sdk-embedded-C
submodules: recursive
ref: main
path: aws-iot-device-sdk-embedded-C
- name: Test doxygen build action
uses: ./doxygen
with:
path: ./aws-iot-device-sdk-embedded-C
libs_parent_dir_path: libraries/standard,libraries/aws
generate_zip: true
test-doxygen-non-zip-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v3
with:
repository: FreeRTOS/coreMQTT
ref: main
path: coreMQTT
- name: Test doxygen build action
uses: ./doxygen
with:
path: coreMQTT
test-spell-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v3
with:
repository: FreeRTOS/coreMQTT
ref: main
path: coreMQTT
- name: Test spell check action
uses: ./spellings
with:
path: coreMQTT
test-coverage-cop:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v3
with:
repository: FreeRTOS/coreMQTT
ref: main
path: coreMQTT
- name: Build
run: |
sudo apt-get install -y lcov
cmake -S ./coreMQTT/test -B build/ \
-G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE=Debug \
-DBUILD_CLONE_SUBMODULES=ON \
-DCMAKE_C_FLAGS='--coverage -Wall -Wextra -Werror -DNDEBUG -DLIBRARY_LOG_LEVEL=LOG_DEBUG'
make -C build/ all
- name: Test
run: |
cd build/
ctest -E system --output-on-failure
cd ..
- name: Run Coverage
run: |
make -C build/ coverage
declare -a EXCLUDE=("\*test/\*" "\*CMakeCCompilerId\*" "\*mocks\*")
echo ${EXCLUDE[@]} | xargs lcov --rc lcov_branch_coverage=1 -r build/coverage.info -o build/coverage.info
lcov --rc lcov_branch_coverage=1 --list build/coverage.info
- name: Test coverage cop action
uses: ./coverage-cop
with:
path: ./build/coverage.info
branch-coverage-min: 70
line-coverage-min: 100
test-memory-statistics:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Test memory statistics action
uses: ./memory_statistics
with:
path: memory_statistics/test
config: ./memory_statistics_config.json
output: ./size_table_new.html
check_against: ./size_table_expected.html
test-link-verifier:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup python environment
uses: actions/setup-python@v3
with:
python-version: '3.11.0'
- name: Test link verifier action
uses: ./link-verifier
with:
path: ./
exclude-dirs: complexity,formatting
include-file-types: .c,.html
test-manifest-verifier:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup python environment
uses: actions/setup-python@v3
with:
python-version: '3.x'
- name: Checkout the FreeRTOS/FreeRTOS repository for testing action on.
uses: actions/checkout@v3
with:
repository: FreeRTOS/FreeRTOS
ref: '202107.00'
path: FreeRTOS
submodules: recursive
- name: Test manifest verifier
uses: ./manifest-verifier
with:
path: ./FreeRTOS
exclude-submodules: FreeRTOS-Plus/Test/CMock,FreeRTOS/Test/CMock/CMock,FreeRTOS/Test/litani
fail-on-incorrect-version: true