From 1b6c40c231a60691ef7dbc90487d7bb3d6745843 Mon Sep 17 00:00:00 2001 From: Armel Soro Date: Fri, 16 Feb 2024 10:57:26 +0100 Subject: [PATCH] Add E2E tests against our examples on real clusters - Do not error out when deleting a non-existing namespace - Stream command output to the GinkgoWriter in real-time as well This allows following what happens when calling potentially long-running commands - Implement airgap test mode - Ignore error when creating a namespace that already exists - Allow to use existing mirror registry in airgap scenario - Extract constants for test modes - Add documentation - Find an easier way to determine the IMG variable, using the Makefile - Add more examples to README.md - Add note about clusters with hosted control planes --- .rhdh/docs/airgap.adoc | 2 + .rhdh/docs/installing-ci-builds.adoc | 2 + Makefile | 34 +++- examples/janus-cr-with-app-configs.yaml | 2 +- tests/e2e/README.md | 82 ++++++++ tests/e2e/e2e_suite_test.go | 245 ++++++++++++++++++++++++ tests/e2e/e2e_test.go | 150 +++++++++++++++ tests/helper/helper_backstage.go | 130 +++++++++++++ tests/helper/utils.go | 161 ++++++++++++++++ 9 files changed, 806 insertions(+), 2 deletions(-) create mode 100644 tests/e2e/README.md create mode 100644 tests/e2e/e2e_suite_test.go create mode 100644 tests/e2e/e2e_test.go create mode 100644 tests/helper/helper_backstage.go create mode 100644 tests/helper/utils.go diff --git a/.rhdh/docs/airgap.adoc b/.rhdh/docs/airgap.adoc index 59806b33..f9a08998 100644 --- a/.rhdh/docs/airgap.adoc +++ b/.rhdh/docs/airgap.adoc @@ -1,5 +1,7 @@ ==== Installing Red Hat Developer Hub (RHDH) in restricted environments +WARNING: The procedure below will not work properly on hosted clusters like link:https://hypershift-docs.netlify.app/[HyperShift] or link:https://www.redhat.com/en/blog/red-hat-openshift-service-aws-hosted-control-planes-now-available[ROSA with hosted control planes] due to a limitation preventing link:https://docs.openshift.com/container-platform/4.14/rest_api/operator_apis/imagecontentsourcepolicy-operator-openshift-io-v1alpha1.html[`ImageContentSourcePolicy`] from being propagated to the cluster nodes. There is currently no workaround for these clusters. + On an OpenShift cluster operating in a restricted network, public resources are not available. However, deploying the RHDH Operator and running RHDH requires the following public resources: diff --git a/.rhdh/docs/installing-ci-builds.adoc b/.rhdh/docs/installing-ci-builds.adoc index affbc32b..9b0f755f 100644 --- a/.rhdh/docs/installing-ci-builds.adoc +++ b/.rhdh/docs/installing-ci-builds.adoc @@ -1,5 +1,7 @@ == Installing CI builds of Red Hat Developer Hub +WARNING: The procedure below will not work properly on hosted clusters like link:https://hypershift-docs.netlify.app/[HyperShift] or link:https://www.redhat.com/en/blog/red-hat-openshift-service-aws-hosted-control-planes-now-available[ROSA with hosted control planes] due to a limitation preventing link:https://docs.openshift.com/container-platform/4.14/rest_api/operator_apis/imagecontentsourcepolicy-operator-openshift-io-v1alpha1.html[`ImageContentSourcePolicy`] from being propagated to the cluster nodes. There is currently no workaround for these clusters. + *Prerequisites* * You are logged in as an administrator on the OpenShift web console. diff --git a/Makefile b/Makefile index c89e2eba..dd08657a 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,8 @@ VERSION ?= 0.0.1 # Using docker or podman to build and push images CONTAINER_ENGINE ?= docker +PKGS := $(shell go list ./... | grep -v /tests) + # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") # To re-generate a bundle for other specific channels without changing the standard setup, you can: @@ -127,7 +129,7 @@ vet: ## Run go vet against code. .PHONY: test test: manifests generate fmt vet envtest ## Run tests. We need LOCALBIN=$(LOCALBIN) to get correct default-config path mkdir -p $(LOCALBIN)/default-config && cp config/manager/$(CONF_DIR)/* $(LOCALBIN)/default-config - LOCALBIN=$(LOCALBIN) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out + LOCALBIN=$(LOCALBIN) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $(PKGS) -coverprofile cover.out ##@ Build @@ -227,6 +229,9 @@ GOSEC_VERSION ?= v2.18.2 GOSEC_FMT ?= sarif # for other options, see https://github.com/securego/gosec#output-formats GOSEC_OUTPUT_FILE ?= gosec.sarif +GINKGO ?= $(LOCALBIN)/ginkgo +GINKGO_VERSION ?= v2.9.5 + KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -365,3 +370,30 @@ catalog-update: ## Update catalog source in the default namespace for catalogsou .PHONY: deploy-openshift deploy-openshift: release-build release-push catalog-update ## Deploy the operator on openshift cluster +# After this time, Ginkgo will emit progress reports, so we can get visibility into long-running tests. +POLL_PROGRESS_INTERVAL := 120s +TIMEOUT ?= 14400s + +GINKGO_FLAGS_ALL = $(GINKGO_TEST_ARGS) --randomize-all --poll-progress-after=$(POLL_PROGRESS_INTERVAL) --poll-progress-interval=$(POLL_PROGRESS_INTERVAL) -timeout $(TIMEOUT) --no-color + +# Flags for tests that may be run in parallel +GINKGO_FLAGS=$(GINKGO_FLAGS_ALL) -nodes=$(TEST_EXEC_NODES) +# Flags to run one test per core. +GINKGO_FLAGS_AUTO = $(GINKGO_FLAGS_ALL) -p +ifdef TEST_EXEC_NODES + TEST_EXEC_NODES := $(TEST_EXEC_NODES) +else + TEST_EXEC_NODES := 1 +endif + +.PHONY: ginkgo +ginkgo: $(GINKGO) ## Download Ginkgo locally if necessary. +$(GINKGO): $(LOCALBIN) + test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION) + +.PHONY: test-e2e +test-e2e: ginkgo ## Run end-to-end tests. See the 'tests/e2e/README.md' file for more details. + $(GINKGO) $(GINKGO_FLAGS) tests/e2e + +show-img: + @echo $(IMG) diff --git a/examples/janus-cr-with-app-configs.yaml b/examples/janus-cr-with-app-configs.yaml index 2ddaf4ba..275b80ee 100644 --- a/examples/janus-cr-with-app-configs.yaml +++ b/examples/janus-cr-with-app-configs.yaml @@ -1,7 +1,7 @@ apiVersion: janus-idp.io/v1alpha1 kind: Backstage metadata: - name: my-backstage-app-with-app-config + name: bs-app-config spec: database: enableLocalDb: true diff --git a/tests/e2e/README.md b/tests/e2e/README.md new file mode 100644 index 00000000..2e42ee15 --- /dev/null +++ b/tests/e2e/README.md @@ -0,0 +1,82 @@ +## End-to-end tests + +The end-to-end tests use the [Ginkgo framework](https://onsi.github.io/ginkgo/) and allow to test the operator against a real cluster in the following scenarios: +- building and deploying the operator image off of the current code +- using a specific image or a specific downstream build + +Deployment of the operator itself can be done by: +- deploying with or without OLM, +- or deploying the downstream bundle in both online and air-gapped scenarios + +To run the end-to-end tests, you can use: +```shell +$ make test-e2e +``` + +### Configuration + +The behavior is configurable using the following environment variables: + +| Name | Type | Description | Default value | Example | +|------------------------------------------------------------------------------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|---------------------------------------------------------| +| `BACKSTAGE_OPERATOR_TEST_MODE` | string | The test mode:
- if not set, it will call `make deploy`
- `olm`: it will call `make deploy-olm`
- `rhdh-latest` or `rhdh-next`: it will install the operator using the [`install-rhdh-catalog-source.sh`](../../.rhdh/scripts/install-rhdh-catalog-source.sh) script
- `rhdh-airgap`: it will install the operator using the [`prepare-restricted-environment.sh`](../../.rhdh/scripts/prepare-restricted-environment.sh) script. | | `rhdh-latest` | +| `IMG` (or any variables from the Makefile that are used by `make deploy` or `make deploy-olm`) | string | The image to use. Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is not set or set to `olm` | `VERSION` defined in [`Makefile`](../../Makefile) | `quay.io/janus-idp/operator:0.0.1-latest` | +| `BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES` | bool | If set to `true`, it will build the operator image with `make image-build`.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is not set or set to `olm`. | | `false` | +| `BACKSTAGE_OPERATOR_TESTS_PUSH_IMAGES` | bool | If set to `true`, it will push the operator image with `make image-push`.
Relevant if BACKSTAGE_OPERATOR_TEST_MODE is not set or set to `olm`. | | `false` | +| `BACKSTAGE_OPERATOR_TESTS_PLATFORM` | string | The platform type, to directly load the operator image if supported instead of pushing it.
Relevant if BACKSTAGE_OPERATOR_TEST_MODE is not set or set to
`olm`.br>Supported values: `kind` | | `kind` | +| `BACKSTAGE_OPERATOR_TESTS_KIND_CLUSTER` | string | Name of the local KinD cluster to use. Relevant only if `BACKSTAGE_OPERATOR_TESTS_PLATFORM` is `kind`. | `kind` | `kind-local-k8s-cluster` | +| `BACKSTAGE_OPERATOR_TESTS_AIRGAP_INDEX_IMAGE` | string | Index image to use in the airgap scenario.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is `rhdh-airgap`. | `quay.io/rhdh/iib:latest-v4.14-x86_64` | `registry.redhat.io/redhat/redhat-operator-index:v4.14` | +| `BACKSTAGE_OPERATOR_TESTS_AIRGAP_OPERATOR_VERSION` | string | Operator version to use in the airgap scenario.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is `rhdh-airgap`. | `v1.1.0` | `v1.1.0` | +| `BACKSTAGE_OPERATOR_TESTS_AIRGAP_MIRROR_REGISTRY` | string | Existing mirror registry to use in the airgap scenario.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is `rhdh-airgap`
. | | `my-registry.example.com` | + +### Examples + +#### Testing a specific version + +This should work on any Kubernetes cluster: + +```shell +$ make test-e2e VERSION=0.0.1-latest +``` + +#### Build and testing local changes on KinD + +```shell +$ kind create cluster +$ make test-e2e BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES=true BACKSTAGE_OPERATOR_TESTS_PLATFORM=kind +``` + +#### Testing a specific image (e.g. PR image) + +```shell +$ make test-e2e IMG=quay.io/janus-idp/operator:0.0.1-pr-201-7d08c24 +``` + +#### Testing a specific version using OLM + +This requires the [Operator Lifecycle Manager (OLM)](https://olm.operatorframework.io/) to be installed in the cluster: + +```shell +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=olm +``` + +#### Testing a downstream build of RHDH + +This requires an OpenShift cluster. If testing a CI build, please follow the instructions in [Installing CI builds of Red Hat Developer Hub](../../.rhdh/docs/installing-ci-builds.adoc) to add your Quay token to the cluster. + +```shell +# latest +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=rhdh-latest + +# or next +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=rhdh-next +``` + +#### Airgap testing of RHDH + +This requires an OpenShift cluster. +Please also read the prerequisites in [Installing Red Hat Developer Hub (RHDH) in restricted environments](../../.rhdh/docs/airgap.adoc). + +```shell +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=rhdh-airgap +``` diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go new file mode 100644 index 00000000..9af9660e --- /dev/null +++ b/tests/e2e/e2e_suite_test.go @@ -0,0 +1,245 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "janus-idp.io/backstage-operator/tests/helper" +) + +const ( + rhdhLatestTestMode = "rhdh-latest" + rhdhNextTestMode = "rhdh-next" + rhdhAirgapTestMode = "rhdh-airgap" + olmDeployTestMode = "olm" + defaultDeployTestMode = "" +) + +var _namespace = "backstage-system" +var testMode = os.Getenv("BACKSTAGE_OPERATOR_TEST_MODE") + +// Run E2E tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + fmt.Fprintln(GinkgoWriter, "Starting Backstage Operator suite") + RunSpecs(t, "Backstage E2E suite") +} + +func installRhdhOperator(flavor string) (podLabel string) { + Expect(helper.IsOpenShift()).Should(BeTrue(), "install RHDH script works only on OpenShift clusters!") + cmd := exec.Command(filepath.Join(".rhdh", "scripts", "install-rhdh-catalog-source.sh"), "--"+flavor, "--install-operator", "rhdh") + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + podLabel = "app=rhdh-operator" + return podLabel +} + +func installRhdhOperatorAirgapped() (podLabel string) { + Expect(helper.IsOpenShift()).Should(BeTrue(), "airgap preparation script for RHDH works only on OpenShift clusters!") + indexImg, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_AIRGAP_INDEX_IMAGE") + if !ok { + //TODO(rm3l): find a way to pass the right OCP version and arch + indexImg = "quay.io/rhdh/iib:latest-v4.14-x86_64" + } + operatorVersion, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_AIRGAP_OPERATOR_VERSION") + if !ok { + operatorVersion = "v1.1.0" + } + args := []string{ + "--prod_operator_index", indexImg, + "--prod_operator_package_name", "rhdh", + "--prod_operator_bundle_name", "rhdh-operator", + "--prod_operator_version", operatorVersion, + } + if mirrorRegistry, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_AIRGAP_MIRROR_REGISTRY"); ok { + args = append(args, "--use_existing_mirror_registry", mirrorRegistry) + } + cmd := exec.Command(filepath.Join(".rhdh", "scripts", "prepare-restricted-environment.sh"), args...) + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + // Create a subscription in the rhdh-operator namespace + helper.CreateNamespace(_namespace) + cmd = exec.Command(helper.GetPlatformTool(), "-n", _namespace, "apply", "-f", "-") + stdin, err := cmd.StdinPipe() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + go func() { + defer stdin.Close() + _, _ = io.WriteString(stdin, fmt.Sprintf(` +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: rhdh + namespace: %s +spec: + channel: fast + installPlanApproval: Automatic + name: rhdh + source: rhdh-disconnected-install + sourceNamespace: openshift-marketplace +`, _namespace)) + }() + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + podLabel = "app=rhdh-operator" + return podLabel +} + +func installOperatorWithMakeDeploy(withOlm bool) { + img, err := helper.Run(exec.Command("make", "--no-print-directory", "show-img")) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + operatorImage := strings.TrimSpace(string(img)) + imgArg := fmt.Sprintf("IMG=%s", operatorImage) + + if os.Getenv("BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES") == "true" { + By("building the manager(Operator) image") + cmd := exec.Command("make", "image-build", imgArg) + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + if os.Getenv("BACKSTAGE_OPERATOR_TESTS_PUSH_IMAGES") == "true" { + By("building the manager(Operator) image") + cmd := exec.Command("make", "image-push", imgArg) + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + if os.Getenv("BACKSTAGE_OPERATOR_TESTS_PLATFORM") == "kind" { + By("loading the the manager(Operator) image on Kind") + err := helper.LoadImageToKindClusterWithName(operatorImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("installing CRDs") + cmd := exec.Command("make", "install") + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + deployCmd := "deploy" + if withOlm { + deployCmd += "-olm" + } + cmd = exec.Command("make", deployCmd, imgArg) + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} + +var _ = SynchronizedBeforeSuite(func() []byte { + //runs *only* on process #1 + fmt.Fprintln(GinkgoWriter, "isOpenshift:", helper.IsOpenShift()) + + managerPodLabel := "control-plane=controller-manager" + + switch testMode { + case rhdhLatestTestMode, rhdhNextTestMode: + _namespace = "rhdh-operator" + managerPodLabel = installRhdhOperator(strings.TrimPrefix(testMode, "rhdh-")) + case rhdhAirgapTestMode: + _namespace = "rhdh-operator" + installRhdhOperatorAirgapped() + case olmDeployTestMode, defaultDeployTestMode: + helper.CreateNamespace(_namespace) + installOperatorWithMakeDeploy(testMode == olmDeployTestMode) + default: + Fail("unknown test mode: " + testMode) + return nil + } + + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get pod name + cmd := exec.Command(helper.GetPlatformTool(), "get", + "pods", "-l", managerPodLabel, + "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", _namespace, + ) + podOutput, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + podNames := helper.GetNonEmptyLines(string(podOutput)) + g.Expect(podNames).Should(HaveLen(1), fmt.Sprintf("expected 1 controller pods running, but got %d", len(podNames))) + controllerPodName := podNames[0] + g.Expect(controllerPodName).ShouldNot(BeEmpty()) + + // Validate pod status + cmd = exec.Command(helper.GetPlatformTool(), "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", _namespace, + ) + status, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(Equal("Running"), fmt.Sprintf("controller pod in %s status", status)) + } + EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + + return nil +}, func(_ []byte) { + //runs on *all* processes +}) + +var _ = SynchronizedAfterSuite(func() { + //runs on *all* processes +}, func() { + //runs *only* on process #1 + switch testMode { + case rhdhLatestTestMode, rhdhNextTestMode, rhdhAirgapTestMode: + uninstallRhdhOperator(testMode == rhdhAirgapTestMode) + case olmDeployTestMode, defaultDeployTestMode: + uninstallOperatorWithMakeUndeploy(testMode == olmDeployTestMode) + } + helper.DeleteNamespace(_namespace, true) +}) + +func uninstallRhdhOperator(withAirgap bool) { + cmd := exec.Command(helper.GetPlatformTool(), "delete", "subscription", "rhdh", "-n", _namespace, "--ignore-not-found=true") + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + cs := "rhdh-fast" + if withAirgap { + cs = "rhdh-disconnected-install" + } + cmd = exec.Command(helper.GetPlatformTool(), "delete", "catalogsource", cs, "-n", "openshift-marketplace", "--ignore-not-found=true") + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + if withAirgap { + helper.DeleteNamespace("airgap-helper-ns", false) + } +} + +func uninstallOperatorWithMakeUndeploy(withOlm bool) { + By("undeploying the controller-manager") + undeployCmd := "undeploy" + if withOlm { + undeployCmd += "-olm" + } + cmd := exec.Command("make", undeployCmd) + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go new file mode 100644 index 00000000..e623d737 --- /dev/null +++ b/tests/e2e/e2e_test.go @@ -0,0 +1,150 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "janus-idp.io/backstage-operator/tests/helper" +) + +var _ = Describe("Backstage Operator E2E", func() { + + var ( + projectDir string + ns string + ) + + BeforeEach(func() { + var err error + projectDir, err = helper.GetProjectDir() + Expect(err).ShouldNot(HaveOccurred()) + + ns = fmt.Sprintf("e2e-test-%d-%s", GinkgoParallelProcess(), helper.RandString(5)) + helper.CreateNamespace(ns) + }) + + AfterEach(func() { + helper.DeleteNamespace(ns, false) + }) + + Context("Examples CRs", func() { + + for _, tt := range []struct { + name string + crFilePath string + crName string + isRouteDisabled bool + additionalApiEndpointTests []helper.ApiEndpointTest + }{ + { + name: "minimal with no spec", + crFilePath: filepath.Join("examples", "bs1.yaml"), + crName: "bs1", + }, + { + name: "specific route sub-domain", + crFilePath: filepath.Join("examples", "bs-route.yaml"), + crName: "bs-route", + }, + { + name: "route disabled", + crFilePath: filepath.Join("examples", "bs-route-disabled.yaml"), + crName: "bs-route-disabled", + isRouteDisabled: true, + }, + { + name: "Janus CR with app-configs, dynamic plugins, extra files and extra-envs", + crFilePath: filepath.Join("examples", "janus-cr-with-app-configs.yaml"), + crName: "bs-app-config", + additionalApiEndpointTests: []helper.ApiEndpointTest{ + { + Endpoint: "/api/dynamic-plugins-info/loaded-plugins", + ExpectedHttpStatusCode: 200, + BodyMatcher: SatisfyAll( + ContainSubstring("backstage-plugin-catalog-backend-module-github-dynamic"), + ContainSubstring("@dfatwork-pkgs/scaffolder-backend-module-http-request-wrapped-dynamic"), + ContainSubstring("@dfatwork-pkgs/explore-backend-wrapped-dynamic"), + ), + }, + }, + }, + } { + tt := tt + When(fmt.Sprintf("%s (%s)", tt.name, tt.crFilePath), func() { + BeforeEach(func() { + cmd := exec.Command(helper.GetPlatformTool(), "apply", "-f", filepath.Join(projectDir, tt.crFilePath), "-n", ns) + _, err := helper.Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should run successfully", func() { + By("validating that the status of the custom resource created is updated or not", func() { + Eventually(helper.VerifyBackstageCRStatus, 30*time.Second, time.Second). + WithArguments(ns, tt.crName, "Deployed"). + Should(Succeed()) + }) + + By("validating that pod(s) status.phase=Running", func() { + Eventually(helper.VerifyBackstagePodStatus, 3*time.Minute, time.Second). + WithArguments(ns, tt.crName, "Running"). + Should(Succeed()) + }) + + if helper.IsOpenShift() { + if tt.isRouteDisabled { + By("ensuring no route was created", func() { + Consistently(func(g Gomega, crName string) { + exists, err := helper.DoesBackstageRouteExist(ns, tt.crName) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(exists).Should(BeTrue()) + }, 15*time.Second, time.Second).WithArguments(tt.crName).ShouldNot(Succeed()) + }) + } else { + By("ensuring the route is reachable", func() { + defaultApiEndpointTests := []helper.ApiEndpointTest{ + { + Endpoint: "/", + ExpectedHttpStatusCode: 200, + BodyMatcher: ContainSubstring("You need to enable JavaScript to run this app"), + }, + { + Endpoint: "/api/dynamic-plugins-info/loaded-plugins", + ExpectedHttpStatusCode: 200, + BodyMatcher: SatisfyAll( + ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-quay-dynamic"), + ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-regex-dynamic"), + ContainSubstring("roadiehq-scaffolder-backend-module-utils-dynamic"), + ), + }, + } + Eventually(helper.VerifyBackstageRoute, time.Minute, time.Second). + WithArguments(ns, tt.crName, append(defaultApiEndpointTests, tt.additionalApiEndpointTests...)). + Should(Succeed()) + }) + } + } + }) + }) + } + }) + + //TODO(rm3l): other scenarios to test: CR deletion, CR updates +}) diff --git a/tests/helper/helper_backstage.go b/tests/helper/helper_backstage.go new file mode 100644 index 00000000..debf97a8 --- /dev/null +++ b/tests/helper/helper_backstage.go @@ -0,0 +1,130 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "crypto/tls" + "fmt" + "io" + "net/http" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +type ApiEndpointTest struct { + Endpoint string + ExpectedHttpStatusCode int + BodyMatcher types.GomegaMatcher +} + +func VerifyBackstagePodStatus(g Gomega, ns string, crName string, expectedStatus string) { + cmd := exec.Command("kubectl", "get", + "pods", "-l", "janus-idp.io/app=backstage-"+crName, + "-o", "jsonpath={.items[*].status}", "-n", ns, + ) + status, err := Run(cmd) + fmt.Fprintln(GinkgoWriter, string(status)) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(ContainSubstring(fmt.Sprintf(`"phase":%q`, expectedStatus)), + fmt.Sprintf("backstage pod in %s status", status)) +} + +func VerifyBackstageCRStatus(g Gomega, ns string, crName string, expectedStatus string) { + cmd := exec.Command(GetPlatformTool(), "get", "backstage", crName, "-o", "jsonpath={.status.conditions}", "-n", ns) + status, err := Run(cmd) + fmt.Fprintln(GinkgoWriter, string(status)) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(ContainSubstring(expectedStatus), + fmt.Sprintf("status condition with type %s should be set", expectedStatus)) +} + +func DoesBackstageRouteExist(ns string, crName string) (bool, error) { + routeName := "backstage-" + crName + out, err := Run(exec.Command(GetPlatformTool(), "get", "route", routeName, "-n", ns)) + if err != nil { + if strings.Contains(string(out), fmt.Sprintf("%q not found", routeName)) { + return false, nil + } + return false, err + } + return true, nil +} + +func GetBackstageRouteHost(ns string, crName string) (string, error) { + routeName := "backstage-" + crName + + hostBytes, err := Run(exec.Command( + GetPlatformTool(), "get", "route", routeName, "-o", "go-template={{if .spec.host}}{{.spec.host}}{{end}}", "-n", ns)) + if err != nil { + return "", fmt.Errorf("unable to determine host for route %s/%s: %w", ns, routeName, err) + } + host := string(hostBytes) + if host != "" { + return host, nil + } + + // try with subdomain in case it was set + subDomainBytes, err := Run(exec.Command( + GetPlatformTool(), "get", "route", routeName, "-o", "go-template={{if .spec.subdomain}}{{.spec.subdomain}}{{end}}", "-n", ns)) + if err != nil { + return "", fmt.Errorf("unable to determine subdomain for route %s/%s: %w", ns, routeName, err) + } + subDomain := string(subDomainBytes) + if subDomain == "" { + return "", nil + } + ingressDomainBytes, err := Run(exec.Command(GetPlatformTool(), "get", "ingresses.config/cluster", "-o", "jsonpath={.spec.domain}")) + if err != nil { + return "", fmt.Errorf("unable to determine ingress sub-domain: %w", err) + } + ingressDomain := string(ingressDomainBytes) + if ingressDomain == "" { + return "", nil + } + return fmt.Sprintf("%s.%s", subDomain, ingressDomain), err +} + +func VerifyBackstageRoute(g Gomega, ns string, crName string, tests []ApiEndpointTest) { + host, err := GetBackstageRouteHost(ns, crName) + fmt.Fprintln(GinkgoWriter, host) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(host).ShouldNot(BeEmpty()) + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + httpClient := &http.Client{Transport: tr} + + performTest := func(tt ApiEndpointTest) { + url := fmt.Sprintf("https://%s/%s", host, strings.TrimPrefix(tt.Endpoint, "/")) + resp, rErr := httpClient.Get(url) + g.Expect(rErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while trying to GET %q", url)) + defer resp.Body.Close() + + g.Expect(resp.StatusCode).Should(Equal(tt.ExpectedHttpStatusCode), "context: "+tt.Endpoint) + body, rErr := io.ReadAll(resp.Body) + g.Expect(rErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while trying to read response body from 'GET %q'", url)) + if tt.BodyMatcher != nil { + g.Expect(string(body)).Should(tt.BodyMatcher, "context: "+tt.Endpoint) + } + } + for _, tt := range tests { + performTest(tt) + } +} diff --git a/tests/helper/utils.go b/tests/helper/utils.go new file mode 100644 index 00000000..ff8face6 --- /dev/null +++ b/tests/helper/utils.go @@ -0,0 +1,161 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/discovery" + ctrl "sigs.k8s.io/controller-runtime" +) + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") + +var ( + _isOpenShift bool +) + +func init() { + _isOpenShift = func() bool { + restConfig := ctrl.GetConfigOrDie() + dcl, err := discovery.NewDiscoveryClientForConfig(restConfig) + if err != nil { + return false + } + + apiList, err := dcl.ServerGroups() + if err != nil { + return false + } + + apiGroups := apiList.Groups + for i := 0; i < len(apiGroups); i++ { + if apiGroups[i].Name == "route.openshift.io" { + return true + } + } + + return false + }() +} + +func GetPlatformTool() string { + if IsOpenShift() { + return "oc" + } + return "kubectl" +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + fmt.Fprintf(GinkgoWriter, "running dir: %s\n", cmd.Dir) + + cmd.Env = append(cmd.Env, os.Environ()...) + + if err := os.Chdir(cmd.Dir); err != nil { + fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + command := strings.Join(cmd.Args, " ") + fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + + var stdBuffer bytes.Buffer + mw := io.MultiWriter(GinkgoWriter, &stdBuffer) + cmd.Stdout = mw + cmd.Stderr = mw + + err := cmd.Run() + outBytes := stdBuffer.Bytes() + if err != nil { + return outBytes, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(outBytes)) + } + + return outBytes, nil +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/tests/e2e", "", -1) + return wd, nil +} + +func CreateNamespace(ns string) { + cmd := exec.Command(GetPlatformTool(), "create", "namespace", ns) + out, err := Run(cmd) + if err != nil && strings.Contains(string(out), fmt.Sprintf("%q already exists", ns)) { + return + } + Expect(err).ShouldNot(HaveOccurred()) +} + +func DeleteNamespace(ns string, wait bool) { + cmd := exec.Command(GetPlatformTool(), "delete", "namespace", ns, + fmt.Sprintf("--wait=%s", strconv.FormatBool(wait)), "--ignore-not-found=true") + _, err := Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) +} + +func RandString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +func IsOpenShift() bool { + return _isOpenShift +}