Skip to content

Deploy bundle and run UATs on self-hosted runners #251

Deploy bundle and run UATs on self-hosted runners

Deploy bundle and run UATs on self-hosted runners #251

name: Deploy bundle and run UATs on self-hosted runners
on:
workflow_dispatch:
inputs:
bundle-test-path:
description: 'Test folder to run'
required: true
bundle-source:
description: 'Either `--channel <channel_name>` or `--file <bundle_file>.yaml`'
required: true
uats-branch:
description: Branch to checkout at for charmed-kubeflow-uats repo
required: false
default: main
workflow_call:
inputs:
bundle-test-path:
description: 'Test folder to run'
type: string
required: true
bundle-source:
description: 'Either `--channel <channel_name>` or `--file <bundle_file>.yaml`'
type: string
required: true
uats-branch:
description: Branch to checkout at for charmed-kubeflow-uats repo
required: false
type: string
default: main
jobs:
test-bundle:
runs-on: [self-hosted, linux, X64, two-xlarge]
steps:
- uses: actions/checkout@v3
- name: Parse and enable DNS server
id: dns-name
run: |
dns_server=$(grep -oPm1 'Current DNS Server: \K[^\s]+' <<< "$(resolvectl status)")
echo "Using following DNS Server: $dns_server"
echo "MY_ADDONS=hostpath-storage ingress dns:$dns_server rbac registry metallb:'10.64.140.43-10.64.140.49,192.168.0.105-192.168.0.111'" >> $GITHUB_OUTPUT
- name: Setup aproxy
run: |
sudo snap install aproxy --edge
sudo snap set aproxy proxy=squid.internal:3128
sudo nft -f - << EOF
define default-ip = $(ip route get $(ip route show 0.0.0.0/0 | grep -oP 'via \K\S+') | grep -oP 'src \K\S+')
define private-ips = { 10.0.0.0/8, 127.0.0.1/8, 172.16.0.0/12, 192.168.0.0/16 }
table ip aproxy
flush table ip aproxy
table ip aproxy {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
ip daddr != \$private-ips tcp dport { 80, 443 } counter dnat to \$default-ip:8443
}
chain output {
type nat hook output priority -100; policy accept;
ip daddr != \$private-ips tcp dport { 80, 443 } counter dnat to \$default-ip:8443
}
}
EOF
- name: Install tools
run: |
pip install tox
sudo snap install kubectl --channel 1.24/stable --classic
# sudo apt-get update -yqq
# sudo apt-get install -yqq python3-pip
# sudo --preserve-env=http_proxy,https_proxy,no_proxy pip3 install tox
sudo snap install firefox
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@main
with:
provider: microk8s
channel: 1.24/stable
juju-channel: 2.9/stable
charmcraft-channel: latest/candidate
microk8s-addons: ${{ steps.dns-name.outputs.MY_ADDONS }}
- name: Wait for microk8s to be ready and configure .kube/config
run: |
sg microk8s -c "microk8s status --wait-ready --timeout 150"
sg microk8s -c "mkdir -p ~/.kube"
sg microk8s -c "microk8s config > ~/.kube/config"
- name: Show all pods status
run: |
sg microk8s -c "microk8s kubectl get pods --all-namespaces"
- name: Increase file system limits
run: |
sudo sysctl fs.inotify.max_user_instances=1280
sudo sysctl fs.inotify.max_user_watches=655360
- name: Configure juju model
run: |
sg microk8s -c "juju add-model kubeflow --config default-series=focal --config automatically-retry-hooks=true"
sg microk8s -c "juju model-config"
sg microk8s -c "juju status"
- name: Configure env for Gecko driver
run: |
# required for gecko driver
export XDG_RUNTIME_DIR="/run/user/$(id -u)"
export DBUS_SESSION_BUS_ADDRESS="unix:path=$XDG_RUNTIME_DIR/bus"
echo "$(id -u)"
loginctl enable-linger $USER
sudo apt-get install dbus-user-session -yqq
systemctl --user start dbus.service
- name: Run bundle tests
run: |
export BUNDLE_TEST_PATH=${{ inputs.bundle-test-path }}
export GH_TOKEN=${{ secrets.GITHUB_TOKEN }}
sg microk8s -c "tox -e full_bundle_tests -- ${{ inputs.bundle-source }}"
# sg microk8s -c "tox -e full_bundle_tests -- --file releases/1.7/stable/kubeflow/bundle.yaml"
- name: Checkout Kubeflow UATs
run: |
git clone https://github.com/canonical/charmed-kubeflow-uats.git ~/charmed-kubeflow-uats
cd ~/charmed-kubeflow-uats
git checkout ${{ inputs.uats-branch }}
- name: Run UATs
run: |
sg microk8s -c "tox -c ~/charmed-kubeflow-uats/ -e kubeflow"
# - name: Upload selenium screenshots
# if: failure()
# uses: actions/upload-artifact@v3
# with:
# name: selenium-screenshots
# path: |
# sel-screenshots
# **/sel-screenshots
# **/**/sel-screenshots
- name: Save debug artifacts
uses: canonical/kubeflow-ci/actions/dump-charm-debug-artifacts@main
if: always()
- name: Dump Aproxy logs on failure
if: failure()
run: sudo snap logs aproxy.aproxy -n=all
- name: Run connectivity check
if: always()
run: |
sg microk8s -c "curl --max-time 10 --connect-timeout 10 http://10.64.140.43.nip.io"
- name: Dump Juju/k8s logs on failure
if: failure() || cancelled()
run: |
sg microk8s -c "juju status"
echo "Dumping k8s logs"
sg microk8s -c "microk8s kubectl get all --all-namespaces"
- name: Descript all pods
if: failure()
run: |
sg microk8s -c "microk8s kubectl describe pods --all-namespaces"
- name: Get logs from failed pods
if: failure() || cancelled()
run: |
POD_LIST=$(sg microk8s -c "microk8s kubectl get pods --all-namespaces -o 'custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,CONTAINERS:.status.containerStatuses[*].ready'" | awk '$3 == "false" {print $1,$2}')
if [ -n "$POD_LIST" ]; then
echo "Actual Logs"
while read -r POD NAMESPACE; do
echo "\n\n\nPod: $POD"
sg microk8s -c "microk8s kubectl logs -n $NAMESPACE $POD"
done <<< "$POD_LIST"
fi