Skip to content

Commit

Permalink
MGMT-8356: Deploy assisted-service and its components to kind cluster
Browse files Browse the repository at this point in the history
instead of minikube for local subsystem testing
  • Loading branch information
danmanor authored and root committed May 30, 2024
1 parent fdf233a commit 6efed48
Show file tree
Hide file tree
Showing 8 changed files with 211 additions and 130 deletions.
65 changes: 35 additions & 30 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,6 @@ else
UPDATE_IMAGE=update-minimal
endif

ifdef SUBSYSTEM_LOCAL_REGISTRY
UPDATE_LOCAL_SERVICE=_update-private-registry-image
LOCAL_SERVICE_IMAGE=${SUBSYSTEM_LOCAL_REGISTRY}/assisted-service:${ASSISTED_TAG}
IMAGE_PULL_POLICY=--image-pull-policy Always
else
IMAGE_PULL_POLICY=--image-pull-policy IfNotPresent
UPDATE_LOCAL_SERVICE=_update-local-k8s-image
LOCAL_SERVICE_IMAGE=${SERVICE}
endif

CONTAINER_BUILD_PARAMS = --network=host --label git_revision=${GIT_REVISION} ${CONTAINER_BUILD_EXTRA_PARAMS}

MUST_GATHER_IMAGES := $(or ${MUST_GATHER_IMAGES}, $(shell (tr -d '\n\t ' < ${ROOT_DIR}/data/default_must_gather_versions.json)))
Expand Down Expand Up @@ -238,16 +228,9 @@ update-debug-minimal:

update-image: $(UPDATE_IMAGE)

_update-private-registry-image: update-image
$(CONTAINER_COMMAND) tag $(SERVICE) $(LOCAL_SERVICE_IMAGE)
$(CONTAINER_COMMAND) push $(PUSH_FLAGS) $(LOCAL_SERVICE_IMAGE)

_update-local-k8s-image:
# Temporary hack that updates the local k8s(e.g minikube) with the latest image.
# Should be replaced after installing a local registry
./hack/update_local_image.sh

update-local-image: $(UPDATE_LOCAL_SERVICE)
load-image: update-image
$(CONTAINER_COMMAND) save -o build/assisted_service_image.tar $(SERVICE)
kind load image-archive build/assisted_service_image.tar

build-image: update-minimal

Expand Down Expand Up @@ -394,9 +377,9 @@ ifdef DEBUG_SERVICE
endif
$(call restart_service_pods)

deploy-test: _verify_cluster generate-keys update-local-image
deploy-test: _verify_cluster generate-keys load-image
-$(KUBECTL) delete deployments.apps assisted-service &> /dev/null
export SERVICE=${LOCAL_SERVICE_IMAGE} && export TEST_FLAGS=--subsystem-test && \
export TEST_FLAGS=--subsystem-test && \
export AUTH_TYPE="rhsso" && export DUMMY_IGNITION="True" && \
export IPV6_SUPPORT="True" && ENABLE_ORG_TENANCY="True" && ENABLE_ORG_BASED_FEATURE_GATES="True" && \
export RELEASE_SOURCES='$(or ${RELEASE_SOURCES},${DEFAULT_RELEASE_SOURCES})' && \
Expand Down Expand Up @@ -432,7 +415,7 @@ test:
$(MAKE) _run_subsystem_test AUTH_TYPE=rhsso ENABLE_ORG_TENANCY=true ENABLE_ORG_BASED_FEATURE_GATES=true

test-kube-api:
$(MAKE) _run_subsystem_test AUTH_TYPE=local ENABLE_KUBE_API=true FOCUS="$(or ${FOCUS},kube-api)"
$(MAKE) _run_subsystem_test AUTH_TYPE=local ENABLE_KUBE_API=true FOCUS="$(or ${FOCUS},kube-api)"

# An alias for the test target
subsystem-test: test
Expand All @@ -441,10 +424,21 @@ subsystem-test: test
subsystem-test-kube-api: test-kube-api

_run_subsystem_test:
INVENTORY=$(shell $(call get_service_host_port,assisted-service) | sed 's/http:\/\///g') \
DB_HOST=$(shell $(call get_service_host_port,postgres) | sed 's/http:\/\///g' | cut -d ":" -f 1) \
DB_PORT=$(shell $(call get_service_host_port,postgres) | sed 's/http:\/\///g' | cut -d ":" -f 2) \
OCM_HOST=$(shell $(call get_service_host_port,wiremock) | sed 's/http:\/\///g') \
if [[ $(TARGET) == "kind" ]]; then \
assisted_service_url="${HOSTNAME}:80"; \
db_host="localhost"; \
db_port="5432"; \
ocm_host="${HOSTNAME}:80"; \
else \
assisted_service_url="$(shell $(call get_service_host_port,assisted-service) | sed 's/http:\/\///g')"; \
db_host="$(shell $(call get_service_host_port,postgres) | sed 's/http:\/\///g' | cut -d ":" -f 1)"; \
db_port="$(shell $(call get_service_host_port,postgres) | sed 's/http:\/\///g' | cut -d ":" -f 2)"; \
ocm_host="$(shell $(call get_service_host_port,wiremock) | sed 's/http:\/\///g')"; \
fi; \
INVENTORY=$$assisted_service_url \
DB_HOST=$$db_host \
DB_PORT=$$db_port \
OCM_HOST=$$ocm_host \
TEST_TOKEN="$(shell cat $(BUILD_FOLDER)/auth-tokenString)" \
TEST_TOKEN_2="$(shell cat $(BUILD_FOLDER)/auth-tokenString2)" \
TEST_TOKEN_ADMIN="$(shell cat $(BUILD_FOLDER)/auth-tokenAdminString)" \
Expand All @@ -461,9 +455,6 @@ enable-kube-api-for-subsystem: $(BUILD_FOLDER)

deploy-wiremock: deploy-namespace
python3 ./tools/deploy_wiremock.py --target $(TARGET) --namespace "$(NAMESPACE)"
timeout 5m ./hack/wait_for_wiremock.sh
OCM_URL=$$(kubectl get service wiremock -n $(NAMESPACE) -ojson | jq --from-file ./hack/k8s_service_host_port.jq --raw-output); \
export OCM_URL && go run ./hack/add_wiremock_stubs.go

deploy-olm: deploy-namespace
python3 ./tools/deploy_olm.py --target $(TARGET)
Expand Down Expand Up @@ -568,3 +559,17 @@ operator-bundle-build: generate-bundle

operator-index-build:
opm index add --bundles $(BUNDLE_IMAGE) --tag $(INDEX_IMAGE) --container-tool $(CONTAINER_COMMAND)

install-kind-if-needed:
./hack/kind/kind.sh install

delete-kind-cluster: install-kind-if-needed
kind delete cluster

create-hub-cluster: install-kind-if-needed
./hack/kind/kind.sh create

subsystem-tests: create-hub-cluster
pip install waiting strato-skipper
$(MAKE) deploy-service-for-subsystem-test TARGET=kind SERVICE=localhost/assisted-service:latest
$(MAKE) subsystem-test HOSTNAME=$(shell hostname) TARGET=kind
3 changes: 2 additions & 1 deletion deploy/postgres/postgres-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,10 @@ metadata:
labels:
app: postgres
spec:
type: LoadBalancer
type: NodePort
ports:
- port: 5432
nodePort: 30000
selector:
app: postgres
status:
Expand Down
19 changes: 19 additions & 0 deletions deploy/wiremock/wiremock-ingress.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: wiremock
namespace: REPLACE_NAMESPACE
annotations:
haproxy.router.openshift.io/timeout: 120s
spec:
rules:
- host: REPLACE_HOSTNAME
http:
paths:
- path: "/__admin/mappings"
pathType: Prefix
backend:
service:
name: wiremock
port:
number: 8080
93 changes: 15 additions & 78 deletions docs/dev/running-test.md
Original file line number Diff line number Diff line change
@@ -1,87 +1,27 @@
# How to run Assisted-service subsystem tests
# Running Assisted-service Subsystem Tests on Kubernetes

There are two "flavors" of subsystem tests:
## Overview

* subsystem tests for the service deployed in REST-API mode
* subsystem tests for the service deployed in kube-api mode
This document details the steps required to run subsystem tests for the Assisted-service deployed in two modes:
- REST-API mode
- Kube-API mode

Assisted-service subsystem tests require that you first deploy the
assisted-service on a k8s cluster (e.g. minikube).
## Deployment for Subsystem Tests

This document will explain how you can easily deploy the service in preperation
for subsystem tests, and also how to run the tests themselves.
Assisted-service components will be deployed in a Kind cluster using the Podman provider. Necessary tools like `kind`, `strato-skipper`, and `waiting` will be installed automatically if not present.

The subsystem tests themselves are located in the
[subsystem](https://github.com/openshift/assisted-service/tree/master/subsystem)
directory, but they are launched via `make` targets in the Makefile at the root
of this repository.
## Running the tests

## Service deployment

This section will show you how you can deploy the assisted installer to
a minikube cluster in preperation for subsystem tests.

### minikube

First we must prepare the minikube cluster -

```bash
# Optionally delete the existing minikube cluster:
# minikube delete

# Clean remains of any networks created by minikube
podman network rm minikube || true

# Start minikube
minikube start --insecure-registry=$(hostname --ip):5000 --driver=podman --addons dashboard --force

# enable the registry addon using quay.io images (to overcome docker-hub's rate-limiter)
minikube addons enable registry --images="Registry=quay.io/libpod/registry:2.8"

# Make the registry addon accessible locally:
nohup kubectl port-forward svc/registry --address 0.0.0.0 5000:80 -n kube-system &>/dev/null &
export LOCAL_SUBSYSTEM_REGISTRY=$(hostname --ip):5000

echo "Waiting for registry to become ready..."
while ! curl --location $LOCAL_SUBSYSTEM_REGISTRY; do
sleep 10
echo "kubectl registry service tunnel at port 5000 is not available yet, retrying..."
echo "If this persists, try running the kubectl port-forward command above without"
echo "nohup, /dev/null redirection and the background job & operator and see if there"
echo "are any errors"
done

# Make a tunnel to make minikube services reachable (the command will ask for root password):
nohup minikube tunnel &>/dev/null &
```

Now that the cluster is prepared, we can deploy the service -

To deploy the service in REST-API mode, run:

```bash
skipper make deploy-service-for-subsystem-test
```

To deploy the service in kube-api mode, run:
To test REST-API mode, run:

```bash
ENABLE_KUBE_API=true skipper make deploy-service-for-subsystem-test
skipper make enable-kube-api-for-subsystem
make subsystem-tests
```

## Running the subsystem tests

To run the REST-API subsystem tests, run:

```bash
skipper make subsystem-test
```

To run the kube-api subsystem tests, run:
To test kube-api mode, run:

```bash
skipper make subsystem-test-kube-api
ENABLE_KUBE_API=true make subsystem-tests
```

Optionally the following environment variables can be exported:
Expand All @@ -90,14 +30,11 @@ Optionally the following environment variables can be exported:
* `SKIP="install_cluster"` - An optional flag to skip scopes with regular expressions.
* `VERBOSE=true` - An optional flag to print verbosed data.

## Update service for the subsystem tests

If you are making changes to the service's code and don't want to go through
the slow steps above once again, you can simply run this command instead:
## Quick Update and Test
To quickly update the service and run tests after making code changes, use:

```bash
skipper make patch-service
```

It will build and push a new image of the service to the container registry,
then trigger a rollout of the service Deployment.
This command builds a new service image, pushes it to the container registry, and triggers a rollout of the updated service Deployment.
11 changes: 11 additions & 0 deletions hack/kind/kind-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 30000
hostPort: 5432
protocol: TCP
60 changes: 60 additions & 0 deletions hack/kind/kind.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/usr/bin/env bash

set -o nounset
set -o errexit
set -o pipefail

KIND_VERSION="0.17.0"

__dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)

function check() {
if [ "$(kind --version)" == "kind version $KIND_VERSION" ]; then
return 0
else
echo "Does not have 'kind' with version $KIND_VERSION!"
return 1
fi
}

function install() {
if check; then
return 0
fi

echo "Installing kind $KIND_VERSION..."
sudo curl --retry 5 --connect-timeout 30 -L https://kind.sigs.k8s.io/dl/v$KIND_VERSION/kind-linux-amd64 -o /usr/local/bin/kind
sudo chmod u+x /usr/local/bin/kind
echo "Installed successfully!"
}

function setup_contour() {
# based on https://kind.sigs.k8s.io/docs/user/ingress/#contour
# for more information about contour, see: https://projectcontour.io
kubectl apply -f https://projectcontour.io/quickstart/contour.yaml
kubectl rollout status -n projectcontour daemonset envoy --timeout 2m
kubectl rollout status -n projectcontour deployment contour --timeout 2m
}

function create() {
check

if ! kind export kubeconfig &> /dev/null ; then
KIND_EXPERIMENTAL_PROVIDER=podman kind create cluster --config $__dir/kind-config.yaml
else
echo "Cluster already existing. Skipping creation"
fi

setup_contour
}

function delete() {
kind delete cluster
}

if [ $# -eq 0 ]; then
echo "Usage: $__dir/kind.sh (install|check|create)"
exit 1
else
$@
fi
21 changes: 0 additions & 21 deletions hack/wait_for_wiremock.sh

This file was deleted.

Loading

0 comments on commit 6efed48

Please sign in to comment.