Skip to content

Commit

Permalink
split python setup and tearmdown scripts to bash scripts
Browse files Browse the repository at this point in the history
this allows the CI and makefile to have more configurations

Signed-off-by: jiaxiao zhou <jiazho@microsoft.com>
  • Loading branch information
Mossaka committed Aug 22, 2023
1 parent 1aae8ed commit 6105fb5
Show file tree
Hide file tree
Showing 11 changed files with 212 additions and 130 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ jobs:
- name: build
run: |
VERBOSE=1 make build
- name: unit tests
run: |
VERBOSE=1 make unit-tests
- name: lowercase the runner OS name
shell: bash
run: |
Expand Down
19 changes: 7 additions & 12 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,27 +26,22 @@ jobs:
build:
uses: ./.github/workflows/build.yaml
test:
needs: build
runs-on: ubuntu-latest
env:
ARCH: x86_64
steps:
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
with:
workspaces: |
"containerd-shim-*-v1 -> target"
- name: "Install Rust Wasm targets"
- uses: actions/download-artifact@v3
- name: Extract containerd-wasm-shims-v1-linux-${{ env.ARCH }}
run: |
make install-rust-targets
- name: "Install dependencies"
run: |
sudo apt-get update
sudo apt-get install protobuf-compiler -y
mkdir -p ./bin
tar -xzf containerd-wasm-shims-v1-linux-${{ env.ARCH }}/containerd-wasm-shims-v1-linux-${{ env.ARCH }}.tar.gz -C ./bin
- name: install k3d
run: make install-k3d
working-directory: ./deployments/k3d
- name: run integration tests
run: make test
run: BIN_DIR="./bin" make integration-tests
- name: clean up k3d
if: always()
run: make test/clean
run: make test/clean
27 changes: 24 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,42 @@ else
VERBOSE_FLAG := -vvv
endif

BIN_DIR ?=

.PHONY: test
test: unit-tests integration-tests

.PHONY: unit-tests
unit-tests: build
$(foreach shim,$(SHIMS),cross test --release --manifest-path=containerd-shim-$(shim)-v1/Cargo.toml --target $(TARGET);)

.PHONY: check-bins
check-bins:
./scripts/check-bins.sh

./PHONY: move-bins
move-bins:
./scripts/move-bins.sh $(BIN_DIR)

./PHONY: up
up:
./scripts/up.sh

./PHONY: pod-status-check
pod-status-check:
./scripts/pod-status-check.sh

./PHONY: workloads
workloads:
./scripts/workloads.sh

.PHONY: integration-tests
integration-tests: build
$(PYTHON) tests/setup.py $(TARGET)
integration-tests: install-cross check-bins move-bins up pod-status-check workloads
cargo test -- --nocapture

.PHONY: tests/clean
test/clean:
$(PYTHON) tests/teardown.py
./scripts/down.sh

.PHONY: fmt
fmt:
Expand Down
41 changes: 41 additions & 0 deletions scripts/check-bins.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/bin/bash

# Description:
# This script checks for the existence of specific binaries on the system.
# It uses a function called which_binary to accomplish this.
# The script first verifies the existence of the binaries and then prints their paths.

# Usage:
# ./check-bins.sh

# Dependencies:
# The script expects the following binaries to be present in the system's PATH:
# k3d, cross, docker, kubectl

set -euo pipefail

# Function: which_binary
# Description:
# Finds and prints the path of the specified binary if it exists in the system's PATH.
# If the binary is not found, it prints an error message.
# Parameters:
# $1 - The name of the binary to locate.
which_binary() {
local binary_name="$1"
local binary_path
binary_path=$(command -v "$binary_name")
if [[ -n "$binary_path" ]]; then
echo "$binary_path"
else
echo "Could not find $binary_name" >&2
exit 1
fi
}


# List of binary names
binaries=("k3d" "cross" "docker" "kubectl")

for binary in "${binaries[@]}"; do
which_binary "$binary"
done
15 changes: 15 additions & 0 deletions scripts/down.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash

set -euo pipefail

cluster_name="test-cluster"

teardown_test() {
# delete k3d cluster
k3d cluster delete "$cluster_name"

# delete docker image
docker rmi k3d-shim-test
}

teardown_test
46 changes: 46 additions & 0 deletions scripts/move-bins.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#!/bin/bash

# Containerd Shim Installer Script
#
# This script automates the installation of specific containerd shim versions (slight, spin, wws)
# by checking their existence and copying them to a desired location if not found.
#
# Usage:
# ./move-bins.sh [release_pattern] [target]
#
# Arguments:
# 1. release_pattern (Optional): The pattern used to locate the shim binaries.
# 2. target (Optional): The target architecture used in the release path.
# Default value is `x86_64-unknown-linux-musl`.
#
# Example:
# ./move-bins.sh
#

set -euo pipefail

target="${2:-x86_64-unknown-linux-musl}"
release_pattern="${1:-containerd-shim-%s-v1/target/$target/release}"

dockerfile_path="deployments/k3d"
bin_path="${dockerfile_path}/.tmp/"
cluster_name="test-cluster"
default_shim_path="${bin_path}containerd-shim-"

declare -A shims=(
[slight]="${default_shim_path}slight-v1"
[spin]="${default_shim_path}spin-v1"
[wws]="${default_shim_path}wws-v1"
)

mkdir -p "$bin_path"

for shim_key in "${!shims[@]}"; do
shim_path=${shims[$shim_key]}
release_path=$(printf "$release_pattern" "$shim_key")

if [ ! -f "$shim_path" ]; then
echo ">>> install containerd-shim-${shim_key}-v1 from $release_path"
cp "$(eval echo $release_path)/containerd-shim-${shim_key}-v1" "${bin_path}containerd-shim-${shim_key}-v1"
fi
done
21 changes: 21 additions & 0 deletions scripts/pod-status-check.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/bash

set -euo pipefail

# Get the status of all pods
pod_statuses=$(kubectl get pods --no-headers -o custom-columns=":status.phase")

# Check if all pods are running fine
all_running=true
for status in $pod_statuses; do
if [ "$status" != "Running" ]; then
all_running=false
break
fi
done

if $all_running; then
echo "All pods are running fine."
else
echo "Not all pods are running fine. Please check the status."
fi
29 changes: 29 additions & 0 deletions scripts/up.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#!/bin/bash

set -euo pipefail

cluster_name="test-cluster" # name of the k3d cluster
dockerfile_path="deployments/k3d" # path to the Dockerfile

DOCKER_IMAGES=("slight" "spin" "wws")
OUT_DIRS=("test/out_slight" "test/out_spin" "test/out_wws")
IMAGES=("slight-hello-world" "spin-hello-world" "wws-hello-world")

# build the Docker image for the k3d cluster
docker build -t k3d-shim-test "$dockerfile_path"

k3d cluster create "$cluster_name" --image k3d-shim-test --api-port 6551 -p '8082:80@loadbalancer' --agents 2

kubectl wait --for=condition=ready node --all --timeout=120s

# Iterate through the Docker images and build them
for i in "${!DOCKER_IMAGES[@]}"; do
docker buildx build -t "${IMAGES[$i]}:latest" "./images/${DOCKER_IMAGES[$i]}" --load
mkdir -p "${OUT_DIRS[$i]}"
docker save -o "${OUT_DIRS[$i]}/img.tar" "${IMAGES[$i]}:latest"
k3d image import "${OUT_DIRS[$i]}/img.tar" -c "$cluster_name"
done

sleep 5

echo ">>> cluster is ready"
26 changes: 26 additions & 0 deletions scripts/workloads.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/bin/bash

set -euo pipefail

# apply the workloads
echo ">>> apply workloads"
kubectl apply -f tests/workloads


# wait for all the pods to be ready
kubectl wait --for=condition=ready --timeout=50s pod --all

# get and describe all the pods
echo ">>> Pods:"
kubectl get pods -o wide
kubectl describe pods

# get and describe all the deployments
echo ">>> Deployments:"
kubectl get deployments -o wide
kubectl describe deployments

# get and describe all the services
echo ">>> Services:"
kubectl get services -o wide
kubectl describe services
103 changes: 0 additions & 103 deletions tests/setup.py

This file was deleted.

12 changes: 0 additions & 12 deletions tests/teardown.py

This file was deleted.

0 comments on commit 6105fb5

Please sign in to comment.