From 58c9b5dedb2a51b7cff1e3154374cec172c68c9e Mon Sep 17 00:00:00 2001 From: Armel Soro Date: Mon, 26 Feb 2024 11:32:40 +0100 Subject: [PATCH] Add E2E tests using our examples against real clusters (#204) * Add E2E tests against our examples on real clusters - Do not error out when deleting a non-existing namespace - Stream command output to the GinkgoWriter in real-time as well This allows following what happens when calling potentially long-running commands - Implement airgap test mode - Ignore error when creating a namespace that already exists - Allow to use existing mirror registry in airgap scenario - Extract constants for test modes - Add documentation - Find an easier way to determine the IMG variable, using the Makefile - Add more examples to README.md - Add note about clusters with hosted control planes - Support k3d clusters - Support Minikube clusters - Load image into local clusters using an archive instead This allows this logic to be agnostic to the container engine used to build the image. We rely on the container image to export the image to an archive ('{podman,docker} image save'). - Run E2E test nightly on main and release branch * Try running E2E tests on PRs by leveraging the already built operator image * Revert "Try running E2E tests on PRs by leveraging the already built operator image" This reverts commit fc87e04ee419a9b4a27002ede9c5972128ea832a. * Check if image exists locally before trying to export an archive If not, try to pull it automatically. This would avoid having to manually pull it. * Update README.md Co-authored-by: Gennady Azarenkov * Ignore gosec warnings in test code Those are not used in production * Clarify in README that a connection to a cluster in the current kubeconfig is needed * Increase timeout when waiting for controller to be up On fresh clusters, 1 minute might be too short * fixup! Clarify in README that a connection to a cluster in the current kubeconfig is needed --------- Co-authored-by: Gennady Azarenkov --- .github/workflows/nightly.yaml | 82 ++++++++ Makefile | 37 +++- examples/rhdh-cr-with-app-configs.yaml | 2 +- tests/e2e/README.md | 146 ++++++++++++++ tests/e2e/e2e_suite_test.go | 258 +++++++++++++++++++++++++ tests/e2e/e2e_test.go | 189 ++++++++++++++++++ tests/helper/helper_backstage.go | 160 +++++++++++++++ tests/helper/utils.go | 238 +++++++++++++++++++++++ 8 files changed, 1110 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/nightly.yaml create mode 100644 tests/e2e/README.md create mode 100644 tests/e2e/e2e_suite_test.go create mode 100644 tests/e2e/e2e_test.go create mode 100644 tests/helper/helper_backstage.go create mode 100644 tests/helper/utils.go diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml new file mode 100644 index 00000000..e37da84c --- /dev/null +++ b/.github/workflows/nightly.yaml @@ -0,0 +1,82 @@ +name: Nightly checks + +on: + # workflow_dispatch so that it can be triggered manually if needed + workflow_dispatch: + schedule: + - cron: "34 23 * * *" + +jobs: + e2e-tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + branch: [ main, 1.1.x ] + name: E2E Tests - ${{ matrix.branch }} + concurrency: + group: ${{ github.workflow }}-${{ matrix.branch }} + cancel-in-progress: true + env: + CONTAINER_ENGINE: podman + steps: + - uses: actions/checkout@v4 # default branch will be checked out by default on scheduled workflows + with: + fetch-depth: 0 + + - if: ${{ matrix.branch != 'main' }} + name: Checkout ${{ matrix.branch }} branch + run: git switch ${{ matrix.branch }} + + # check changes in this commit for regex include and exclude matches; pipe to an env var + - name: Check for changes to build + run: | + # don't fail if nothing returned by grep + set +e + CHANGES="$(git diff --name-only HEAD~1 | \ + grep -E "workflows/.+-container-build.yaml|Makefile|bundle/|config/|go.mod|go.sum|.+\.go|docker/|\.dockerignore" | \ + grep -v -E ".+_test.go|/.rhdh/")"; + echo "Changed files for this commit:" + echo "==============================" + echo "$CHANGES" + echo "==============================" + { + echo 'CHANGES<> "$GITHUB_ENV" + + - name: Determine built operator image + # run this stage only if there are changes that match the includes and not the excludes + if: ${{ env.CHANGES != '' }} + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + BASE_VERSION=$(grep -E "^VERSION \?=" Makefile | sed -r -e "s/.+= //") # 0.1.0 + echo "OPERATOR_IMAGE=quay.io/janus-idp/operator:${BASE_VERSION}-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Wait until image exists in registry or timeout is reached + # run this stage only if there are changes that match the includes and not the excludes + if: ${{ env.CHANGES != '' }} + timeout-minutes: 10 + run: | + echo "Waiting until operator image is found or timeout expires: ${{ env.OPERATOR_IMAGE }}..." + until ${CONTAINER_ENGINE} image pull "${{ env.OPERATOR_IMAGE }}"; do + sleep 2 + echo ... + done + echo "... operator image found: ${{ env.OPERATOR_IMAGE }}." + + - name: Start Minikube + # run this stage only if there are changes that match the includes and not the excludes + if: ${{ env.CHANGES != '' }} + uses: medyagh/setup-minikube@606b71970c783154fe49b711486c717f5780f485 # v0.0.15 + with: + addons: ingress + + - name: Run E2E tests + # run this stage only if there are changes that match the includes and not the excludes + if: ${{ env.CHANGES != '' }} + env: + BACKSTAGE_OPERATOR_TESTS_PLATFORM: minikube + IMG: ${{ env.OPERATOR_IMAGE }} + run: make test-e2e diff --git a/Makefile b/Makefile index be64f069..65d051b5 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,8 @@ VERSION ?= 0.2.0 # Using docker or podman to build and push images CONTAINER_ENGINE ?= docker +PKGS := $(shell go list ./... | grep -v /tests) + # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") # To re-generate a bundle for other specific channels without changing the standard setup, you can: @@ -127,7 +129,7 @@ vet: ## Run go vet against code. .PHONY: test test: manifests generate fmt vet envtest ## Run tests. We need LOCALBIN=$(LOCALBIN) to get correct default-config path mkdir -p $(LOCALBIN)/default-config && cp config/manager/$(CONF_DIR)/* $(LOCALBIN)/default-config - LOCALBIN=$(LOCALBIN) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out + LOCALBIN=$(LOCALBIN) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $(PKGS) -coverprofile cover.out ##@ Build @@ -228,6 +230,9 @@ GOSEC_VERSION ?= v2.18.2 GOSEC_FMT ?= sarif # for other options, see https://github.com/securego/gosec#output-formats GOSEC_OUTPUT_FILE ?= gosec.sarif +GINKGO ?= $(LOCALBIN)/ginkgo +GINKGO_VERSION ?= v2.9.5 + KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -366,3 +371,33 @@ catalog-update: ## Update catalog source in the default namespace for catalogsou .PHONY: deploy-openshift deploy-openshift: release-build release-push catalog-update ## Deploy the operator on openshift cluster +# After this time, Ginkgo will emit progress reports, so we can get visibility into long-running tests. +POLL_PROGRESS_INTERVAL := 120s +TIMEOUT ?= 14400s + +GINKGO_FLAGS_ALL = $(GINKGO_TEST_ARGS) --randomize-all --poll-progress-after=$(POLL_PROGRESS_INTERVAL) --poll-progress-interval=$(POLL_PROGRESS_INTERVAL) -timeout $(TIMEOUT) --no-color + +# Flags for tests that may be run in parallel +GINKGO_FLAGS=$(GINKGO_FLAGS_ALL) -nodes=$(TEST_EXEC_NODES) +# Flags to run one test per core. +GINKGO_FLAGS_AUTO = $(GINKGO_FLAGS_ALL) -p +ifdef TEST_EXEC_NODES + TEST_EXEC_NODES := $(TEST_EXEC_NODES) +else + TEST_EXEC_NODES := 1 +endif + +.PHONY: ginkgo +ginkgo: $(GINKGO) ## Download Ginkgo locally if necessary. +$(GINKGO): $(LOCALBIN) + test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION) + +.PHONY: test-e2e +test-e2e: ginkgo ## Run end-to-end tests. See the 'tests/e2e/README.md' file for more details. + $(GINKGO) $(GINKGO_FLAGS) tests/e2e + +show-img: + @echo -n $(IMG) + +show-container-engine: + @echo -n $(CONTAINER_ENGINE) diff --git a/examples/rhdh-cr-with-app-configs.yaml b/examples/rhdh-cr-with-app-configs.yaml index 5694142f..28e62fc2 100644 --- a/examples/rhdh-cr-with-app-configs.yaml +++ b/examples/rhdh-cr-with-app-configs.yaml @@ -1,7 +1,7 @@ apiVersion: rhdh.redhat.com/v1alpha1 kind: Backstage metadata: - name: my-backstage-app-with-app-config + name: bs-app-config spec: database: enableLocalDb: true diff --git a/tests/e2e/README.md b/tests/e2e/README.md new file mode 100644 index 00000000..9c11751a --- /dev/null +++ b/tests/e2e/README.md @@ -0,0 +1,146 @@ +## End-to-end tests + +The end-to-end tests use the [Ginkgo framework](https://onsi.github.io/ginkgo/) and allow to test the operator against a real cluster in the following scenarios: +- building and deploying the operator image off of the current code +- using a specific image or a specific downstream build + +Deployment of the operator itself can be done by: +- deploying with or without OLM, +- or deploying the downstream bundle in both online and air-gapped scenarios + +To run the end-to-end tests, make sure you have an active connection to a cluster in your current [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) and run: +```shell +# Check your current context +$ kubectl config current-context +$ make test-e2e +``` + +### Configuration + +The behavior is configurable using the following environment variables: + +| Name | Type | Description | Default value | Example | +|------------------------------------------------------------------------------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|---------------------------------------------------------| +| `BACKSTAGE_OPERATOR_TEST_MODE` | string | The test mode:
- if not set, it will call `make deploy`
- `olm`: it will call `make deploy-olm`
- `rhdh-latest` or `rhdh-next`: it will install the operator using the [`install-rhdh-catalog-source.sh`](../../.rhdh/scripts/install-rhdh-catalog-source.sh) script
- `rhdh-airgap`: it will install the operator using the [`prepare-restricted-environment.sh`](../../.rhdh/scripts/prepare-restricted-environment.sh) script. | | `rhdh-latest` | +| `IMG` (or any variables from the Makefile that are used by `make deploy` or `make deploy-olm`) | string | The image to use. Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is not set or set to `olm` | `VERSION` defined in [`Makefile`](../../Makefile) | `quay.io/janus-idp/operator:0.0.1-latest` | +| `BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES` | bool | If set to `true`, it will build the operator image with `make image-build`.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is not set or set to `olm`. | | `false` | +| `BACKSTAGE_OPERATOR_TESTS_PUSH_IMAGES` | bool | If set to `true`, it will push the operator image with `make image-push`.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is not set or set to `olm`. | | `false` | +| `BACKSTAGE_OPERATOR_TESTS_PLATFORM` | string | The platform type, to directly load the operator image if supported instead of pushing it.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is not set or set to `olm`.br>Supported values: [`kind`](#building-and-testing-local-changes-on-kind), [`k3d`](#building-and-testing-local-changes-on-k3d), [`minikube`](#building-and-testing-local-changes-on-minikube) | | `kind` | +| `BACKSTAGE_OPERATOR_TESTS_KIND_CLUSTER` | string | Name of the local KinD cluster to use. Relevant only if `BACKSTAGE_OPERATOR_TESTS_PLATFORM` is `kind`. | `kind` | `kind-local-k8s-cluster` | +| `BACKSTAGE_OPERATOR_TESTS_K3D_CLUSTER` | string | Name of the local k3d cluster to use. Relevant only if `BACKSTAGE_OPERATOR_TESTS_PLATFORM` is `k3d`. | `k3s-default` | `k3d-local-k8s-cluster` | +| `BACKSTAGE_OPERATOR_TESTS_AIRGAP_INDEX_IMAGE` | string | Index image to use in the airgap scenario.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is `rhdh-airgap`. | `quay.io/rhdh/iib:latest-v4.14-x86_64` | `registry.redhat.io/redhat/redhat-operator-index:v4.14` | +| `BACKSTAGE_OPERATOR_TESTS_AIRGAP_OPERATOR_VERSION` | string | Operator version to use in the airgap scenario.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is `rhdh-airgap`. | `v1.1.0` | `v1.1.0` | +| `BACKSTAGE_OPERATOR_TESTS_AIRGAP_MIRROR_REGISTRY` | string | Existing mirror registry to use in the airgap scenario.
Relevant if `BACKSTAGE_OPERATOR_TEST_MODE` is `rhdh-airgap`
. | | `my-registry.example.com` | + +### Examples + +#### Testing the operator available for the VERSION (default) + +In this scenario, you want to run the E2E test suite against the operator image corresponding to the `VERSION` declared in the project [`Makefile`](../../Makefile), which should be publicly available at `quay.io/janus-idp/operator:`. + +This is the default behavior. + +This should work on any Kubernetes or OpenShift cluster: + +```shell +$ make test-e2e +``` + +#### Testing a specific image (e.g. PR image) + +In this scenario, you want to run the E2E test suite against an existing operator image. + +This should work on any Kubernetes or OpenShift cluster: + +```shell +# if the tag is already published and available at the default location: quay.io/janus-idp/operator +$ make test-e2e VERSION=0.2.0-3d1c1e0 + +# or you can override the full image repo name +$ make test-e2e IMG=my.registry.example.com/operator:0.2.0-3d1c1e0 +``` + +Note that `VERSION` and `IMG` override the respective variables declared in the project [`Makefile`](../../Makefile). + +#### Building and testing local changes on supported local clusters + +In this scenario, you are iterating locally, and want to run the E2E test suite against your local changes. You are already using a local cluster like [`kind`](https://kind.sigs.k8s.io/), [`k3d`](https://k3d.io/) or [`minikube`](https://minikube.sigs.k8s.io/docs/), which provide the ability to import images into the cluster nodes. + +To do so, you can: +1. set `BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES` to `true`, which will result in building the operator image from the local changes, +2. and set `BACKSTAGE_OPERATOR_TESTS_PLATFORM` to a supported local cluster, which will result in loading the image built directly in that cluster (without having to push to a separate registry). + +##### `kind` + +```shell +$ kind create cluster +$ make test-e2e \ + BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES=true \ + BACKSTAGE_OPERATOR_TESTS_PLATFORM=kind +``` + +##### `k3d` + +```shell +$ k3d cluster create +$ make test-e2e \ + BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES=true \ + BACKSTAGE_OPERATOR_TESTS_PLATFORM=k3d +``` + +##### `minikube` + +```shell +$ minikube start +$ make test-e2e \ + BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES=true \ + BACKSTAGE_OPERATOR_TESTS_PLATFORM=minikube +``` + +#### Testing a specific version using OLM + +In this scenario, you want to leverage the [Operator Lifecycle Manager (OLM)](https://olm.operatorframework.io/) to deploy the Operator. + +This requires OLM to be installed in the cluster. + +```shell +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=olm +``` + +#### Testing a downstream build of Red Hat Developer Hub (RHDH) + +In this scenario, you want to run the E2E tests against a downstream build of RHDH. + +This works only against OpenShift clusters. So make sure you are logged in to the OpenShift cluster using the `oc` command. See [Logging in to the OpenShift CLI](https://docs.openshift.com/container-platform/4.14/cli_reference/openshift_cli/getting-started-cli.html#cli-logging-in_cli-developer-commands) for more details. + +You can check your current context by running `oc config current-context` or `kubectl config current-context`. + +If testing a CI build, please follow the instructions in [Installing CI builds of Red Hat Developer Hub](../../.rhdh/docs/installing-ci-builds.adoc) to add your Quay token to the cluster. + +```shell +# latest +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=rhdh-latest + +# or next +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=rhdh-next +``` + +#### Airgap testing of Red Hat Developer Hub (RHDH) + +In this scenario, you want to run the E2E tests against an OpenShift cluster running in a restricted network. For this, the command below will make sure to prepare it by copying all the necessary images to a mirror registry, then deploy the operator. + +Make sure you are logged in to the OpenShift cluster using the `oc` command. See [Logging in to the OpenShift CLI](https://docs.openshift.com/container-platform/4.14/cli_reference/openshift_cli/getting-started-cli.html#cli-logging-in_cli-developer-commands) for more details. + +You can check your current context by running `oc config current-context` or `kubectl config current-context`. + +Also make sure to read the prerequisites in [Installing Red Hat Developer Hub (RHDH) in restricted environments](../../.rhdh/docs/airgap.adoc). + +```shell +# if you want to have a mirror registry to be created for you as part of the airgap environment setup +$ make test-e2e BACKSTAGE_OPERATOR_TEST_MODE=rhdh-airgap + +# or if you already have a mirror registry available and reachable from within your cluster +$ make test-e2e \ + BACKSTAGE_OPERATOR_TEST_MODE=rhdh-airgap \ + BACKSTAGE_OPERATOR_TESTS_AIRGAP_MIRROR_REGISTRY=my-registry.example.com +``` diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go new file mode 100644 index 00000000..042ae386 --- /dev/null +++ b/tests/e2e/e2e_suite_test.go @@ -0,0 +1,258 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "redhat-developer/red-hat-developer-hub-operator/tests/helper" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const ( + rhdhLatestTestMode = "rhdh-latest" + rhdhNextTestMode = "rhdh-next" + rhdhAirgapTestMode = "rhdh-airgap" + olmDeployTestMode = "olm" + defaultDeployTestMode = "" +) + +var _namespace = "backstage-system" +var testMode = os.Getenv("BACKSTAGE_OPERATOR_TEST_MODE") + +// Run E2E tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + fmt.Fprintln(GinkgoWriter, "Starting Backstage Operator suite") + RunSpecs(t, "Backstage E2E suite") +} + +func installRhdhOperator(flavor string) (podLabel string) { + Expect(helper.IsOpenShift()).Should(BeTrue(), "install RHDH script works only on OpenShift clusters!") + cmd := exec.Command(filepath.Join(".rhdh", "scripts", "install-rhdh-catalog-source.sh"), "--"+flavor, "--install-operator", "rhdh") + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + podLabel = "app=rhdh-operator" + return podLabel +} + +func installRhdhOperatorAirgapped() (podLabel string) { + Expect(helper.IsOpenShift()).Should(BeTrue(), "airgap preparation script for RHDH works only on OpenShift clusters!") + indexImg, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_AIRGAP_INDEX_IMAGE") + if !ok { + //TODO(rm3l): find a way to pass the right OCP version and arch + indexImg = "quay.io/rhdh/iib:latest-v4.14-x86_64" + } + operatorVersion, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_AIRGAP_OPERATOR_VERSION") + if !ok { + operatorVersion = "v1.1.0" + } + args := []string{ + "--prod_operator_index", indexImg, + "--prod_operator_package_name", "rhdh", + "--prod_operator_bundle_name", "rhdh-operator", + "--prod_operator_version", operatorVersion, + } + if mirrorRegistry, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_AIRGAP_MIRROR_REGISTRY"); ok { + args = append(args, "--use_existing_mirror_registry", mirrorRegistry) + } + cmd := exec.Command(filepath.Join(".rhdh", "scripts", "prepare-restricted-environment.sh"), args...) + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + // Create a subscription in the rhdh-operator namespace + helper.CreateNamespace(_namespace) + cmd = exec.Command(helper.GetPlatformTool(), "-n", _namespace, "apply", "-f", "-") + stdin, err := cmd.StdinPipe() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + go func() { + defer stdin.Close() + _, _ = io.WriteString(stdin, fmt.Sprintf(` +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: rhdh + namespace: %s +spec: + channel: fast + installPlanApproval: Automatic + name: rhdh + source: rhdh-disconnected-install + sourceNamespace: openshift-marketplace +`, _namespace)) + }() + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + podLabel = "app=rhdh-operator" + return podLabel +} + +func installOperatorWithMakeDeploy(withOlm bool) { + img, err := helper.Run(exec.Command("make", "--no-print-directory", "show-img")) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + operatorImage := strings.TrimSpace(string(img)) + imgArg := fmt.Sprintf("IMG=%s", operatorImage) + + if os.Getenv("BACKSTAGE_OPERATOR_TESTS_BUILD_IMAGES") == "true" { + By("building the manager(Operator) image") + cmd := exec.Command("make", "image-build", imgArg) + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + if os.Getenv("BACKSTAGE_OPERATOR_TESTS_PUSH_IMAGES") == "true" { + By("building the manager(Operator) image") + cmd := exec.Command("make", "image-push", imgArg) + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + plt, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_PLATFORM") + if ok { + var localClusterImageLoader func(string) error + switch plt { + case "kind": + localClusterImageLoader = helper.LoadImageToKindClusterWithName + case "k3d": + localClusterImageLoader = helper.LoadImageToK3dClusterWithName + case "minikube": + localClusterImageLoader = helper.LoadImageToMinikubeClusterWithName + } + Expect(localClusterImageLoader).ShouldNot(BeNil(), fmt.Sprintf("unsupported platform %q to push images to", plt)) + By("loading the the manager(Operator) image on " + plt) + err = localClusterImageLoader(operatorImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("installing CRDs") + cmd := exec.Command("make", "install") + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + deployCmd := "deploy" + if withOlm { + deployCmd += "-olm" + } + cmd = exec.Command("make", deployCmd, imgArg) + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} + +var _ = SynchronizedBeforeSuite(func() []byte { + //runs *only* on process #1 + fmt.Fprintln(GinkgoWriter, "isOpenshift:", helper.IsOpenShift()) + + managerPodLabel := "control-plane=controller-manager" + + switch testMode { + case rhdhLatestTestMode, rhdhNextTestMode: + _namespace = "rhdh-operator" + managerPodLabel = installRhdhOperator(strings.TrimPrefix(testMode, "rhdh-")) + case rhdhAirgapTestMode: + _namespace = "rhdh-operator" + installRhdhOperatorAirgapped() + case olmDeployTestMode, defaultDeployTestMode: + helper.CreateNamespace(_namespace) + installOperatorWithMakeDeploy(testMode == olmDeployTestMode) + default: + Fail("unknown test mode: " + testMode) + return nil + } + + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get pod name + cmd := exec.Command(helper.GetPlatformTool(), "get", + "pods", "-l", managerPodLabel, + "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", _namespace, + ) + podOutput, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + podNames := helper.GetNonEmptyLines(string(podOutput)) + g.Expect(podNames).Should(HaveLen(1), fmt.Sprintf("expected 1 controller pods running, but got %d", len(podNames))) + controllerPodName := podNames[0] + g.Expect(controllerPodName).ShouldNot(BeEmpty()) + + // Validate pod status + cmd = exec.Command(helper.GetPlatformTool(), "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", _namespace, + ) + status, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(Equal("Running"), fmt.Sprintf("controller pod in %s status", status)) + } + EventuallyWithOffset(1, verifyControllerUp, 5*time.Minute, time.Second).Should(Succeed()) + + return nil +}, func(_ []byte) { + //runs on *all* processes +}) + +var _ = SynchronizedAfterSuite(func() { + //runs on *all* processes +}, func() { + //runs *only* on process #1 + switch testMode { + case rhdhLatestTestMode, rhdhNextTestMode, rhdhAirgapTestMode: + uninstallRhdhOperator(testMode == rhdhAirgapTestMode) + case olmDeployTestMode, defaultDeployTestMode: + uninstallOperatorWithMakeUndeploy(testMode == olmDeployTestMode) + } + helper.DeleteNamespace(_namespace, true) +}) + +func uninstallRhdhOperator(withAirgap bool) { + cmd := exec.Command(helper.GetPlatformTool(), "delete", "subscription", "rhdh", "-n", _namespace, "--ignore-not-found=true") + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + cs := "rhdh-fast" + if withAirgap { + cs = "rhdh-disconnected-install" + } + cmd = exec.Command(helper.GetPlatformTool(), "delete", "catalogsource", cs, "-n", "openshift-marketplace", "--ignore-not-found=true") + _, err = helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + if withAirgap { + helper.DeleteNamespace("airgap-helper-ns", false) + } +} + +func uninstallOperatorWithMakeUndeploy(withOlm bool) { + By("undeploying the controller-manager") + undeployCmd := "undeploy" + if withOlm { + undeployCmd += "-olm" + } + cmd := exec.Command("make", undeployCmd) + _, err := helper.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go new file mode 100644 index 00000000..a22cd10c --- /dev/null +++ b/tests/e2e/e2e_test.go @@ -0,0 +1,189 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "os/exec" + "path/filepath" + "strconv" + "time" + + "redhat-developer/red-hat-developer-hub-operator/tests/helper" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Backstage Operator E2E", func() { + + var ( + projectDir string + ns string + ) + + BeforeEach(func() { + var err error + projectDir, err = helper.GetProjectDir() + Expect(err).ShouldNot(HaveOccurred()) + + ns = fmt.Sprintf("e2e-test-%d-%s", GinkgoParallelProcess(), helper.RandString(5)) + helper.CreateNamespace(ns) + }) + + AfterEach(func() { + helper.DeleteNamespace(ns, false) + }) + + Context("Examples CRs", func() { + + for _, tt := range []struct { + name string + crFilePath string + crName string + isRouteDisabled bool + additionalApiEndpointTests []helper.ApiEndpointTest + }{ + { + name: "minimal with no spec", + crFilePath: filepath.Join("examples", "bs1.yaml"), + crName: "bs1", + }, + { + name: "specific route sub-domain", + crFilePath: filepath.Join("examples", "bs-route.yaml"), + crName: "bs-route", + }, + { + name: "route disabled", + crFilePath: filepath.Join("examples", "bs-route-disabled.yaml"), + crName: "bs-route-disabled", + isRouteDisabled: true, + }, + { + name: "RHDH CR with app-configs, dynamic plugins, extra files and extra-envs", + crFilePath: filepath.Join("examples", "rhdh-cr-with-app-configs.yaml"), + crName: "bs-app-config", + additionalApiEndpointTests: []helper.ApiEndpointTest{ + { + Endpoint: "/api/dynamic-plugins-info/loaded-plugins", + ExpectedHttpStatusCode: 200, + BodyMatcher: SatisfyAll( + ContainSubstring("backstage-plugin-catalog-backend-module-github-dynamic"), + ContainSubstring("@dfatwork-pkgs/scaffolder-backend-module-http-request-wrapped-dynamic"), + ContainSubstring("@dfatwork-pkgs/explore-backend-wrapped-dynamic"), + ), + }, + }, + }, + } { + tt := tt + When(fmt.Sprintf("applying %s (%s)", tt.name, tt.crFilePath), func() { + var crPath string + BeforeEach(func() { + crPath = filepath.Join(projectDir, tt.crFilePath) + cmd := exec.Command(helper.GetPlatformTool(), "apply", "-f", crPath, "-n", ns) + _, err := helper.Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should handle CR as expected", func() { + By("validating that the status of the custom resource created is updated or not", func() { + Eventually(helper.VerifyBackstageCRStatus, time.Minute, time.Second). + WithArguments(ns, tt.crName, "Deployed"). + Should(Succeed()) + }) + + By("validating that pod(s) status.phase=Running", func() { + Eventually(helper.VerifyBackstagePodStatus, 7*time.Minute, time.Second). + WithArguments(ns, tt.crName, "Running"). + Should(Succeed()) + }) + + if helper.IsOpenShift() { + if tt.isRouteDisabled { + By("ensuring no route was created", func() { + Consistently(func(g Gomega, crName string) { + exists, err := helper.DoesBackstageRouteExist(ns, tt.crName) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(exists).Should(BeTrue()) + }, 15*time.Second, time.Second).WithArguments(tt.crName).ShouldNot(Succeed()) + }) + } else { + By("ensuring the route is reachable", func() { + ensureRouteIsReachable(ns, tt.crName, tt.additionalApiEndpointTests) + }) + } + } + + var isRouteEnabledNow bool + By("updating route spec in CR", func() { + // enables route that was previously disabled, and disables route that was previously enabled. + isRouteEnabledNow = tt.isRouteDisabled + err := helper.PatchBackstageCR(ns, tt.crName, fmt.Sprintf(` +{ + "spec": { + "application": { + "route": { + "enabled": %s + } + } + } +}`, strconv.FormatBool(isRouteEnabledNow)), + "merge") + Expect(err).ShouldNot(HaveOccurred()) + }) + if helper.IsOpenShift() { + if isRouteEnabledNow { + By("ensuring the route is reachable", func() { + ensureRouteIsReachable(ns, tt.crName, tt.additionalApiEndpointTests) + }) + } else { + By("ensuring route no longer exists eventually", func() { + Eventually(func(g Gomega, crName string) { + exists, err := helper.DoesBackstageRouteExist(ns, tt.crName) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(exists).Should(BeFalse()) + }, time.Minute, time.Second).WithArguments(tt.crName).Should(Succeed()) + }) + } + } + + By("deleting CR", func() { + cmd := exec.Command(helper.GetPlatformTool(), "delete", "-f", crPath, "-n", ns) + _, err := helper.Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) + }) + + if helper.IsOpenShift() && isRouteEnabledNow { + By("ensuring application is no longer reachable", func() { + Eventually(func(g Gomega, crName string) { + exists, err := helper.DoesBackstageRouteExist(ns, tt.crName) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(exists).Should(BeFalse()) + }, time.Minute, time.Second).WithArguments(tt.crName).Should(Succeed()) + }) + } + }) + }) + } + }) +}) + +func ensureRouteIsReachable(ns string, crName string, additionalApiEndpointTests []helper.ApiEndpointTest) { + Eventually(helper.VerifyBackstageRoute, time.Minute, time.Second). + WithArguments(ns, crName, additionalApiEndpointTests). + Should(Succeed()) +} diff --git a/tests/helper/helper_backstage.go b/tests/helper/helper_backstage.go new file mode 100644 index 00000000..65622da3 --- /dev/null +++ b/tests/helper/helper_backstage.go @@ -0,0 +1,160 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "crypto/tls" + "fmt" + "io" + "net/http" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +type ApiEndpointTest struct { + Endpoint string + ExpectedHttpStatusCode int + BodyMatcher types.GomegaMatcher +} + +func VerifyBackstagePodStatus(g Gomega, ns string, crName string, expectedStatus string) { + cmd := exec.Command("kubectl", "get", "pods", + "-l", "rhdh.redhat.com/app=backstage-"+crName, + "-o", "jsonpath={.items[*].status}", + "-n", ns, + ) // #nosec G204 + status, err := Run(cmd) + fmt.Fprintln(GinkgoWriter, string(status)) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(ContainSubstring(fmt.Sprintf(`"phase":%q`, expectedStatus)), + fmt.Sprintf("backstage pod in %s status", status)) +} + +func VerifyBackstageCRStatus(g Gomega, ns string, crName string, expectedStatus string) { + cmd := exec.Command(GetPlatformTool(), "get", "backstage", crName, "-o", "jsonpath={.status.conditions}", "-n", ns) // #nosec G204 + status, err := Run(cmd) + fmt.Fprintln(GinkgoWriter, string(status)) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(ContainSubstring(expectedStatus), + fmt.Sprintf("status condition with type %s should be set", expectedStatus)) +} + +func PatchBackstageCR(ns string, crName string, jsonPatch string, patchType string) error { + p := patchType + if p == "" { + p = "strategic" + } + _, err := Run(exec.Command(GetPlatformTool(), "-n", ns, "patch", "backstage", crName, "--patch", jsonPatch, "--type="+p)) // #nosec G204 + return err +} + +func DoesBackstageRouteExist(ns string, crName string) (bool, error) { + routeName := "backstage-" + crName + out, err := Run(exec.Command(GetPlatformTool(), "get", "route", routeName, "-n", ns)) // #nosec G204 + if err != nil { + if strings.Contains(string(out), fmt.Sprintf("%q not found", routeName)) { + return false, nil + } + return false, err + } + return true, nil +} + +func GetBackstageRouteHost(ns string, crName string) (string, error) { + routeName := "backstage-" + crName + + hostBytes, err := Run(exec.Command( + GetPlatformTool(), "get", "route", routeName, "-o", "go-template={{if .spec.host}}{{.spec.host}}{{end}}", "-n", ns)) // #nosec G204 + if err != nil { + return "", fmt.Errorf("unable to determine host for route %s/%s: %w", ns, routeName, err) + } + host := string(hostBytes) + if host != "" { + return host, nil + } + + // try with subdomain in case it was set + subDomainBytes, err := Run(exec.Command( + GetPlatformTool(), "get", "route", routeName, "-o", "go-template={{if .spec.subdomain}}{{.spec.subdomain}}{{end}}", "-n", ns)) // #nosec G204 + if err != nil { + return "", fmt.Errorf("unable to determine subdomain for route %s/%s: %w", ns, routeName, err) + } + subDomain := string(subDomainBytes) + if subDomain == "" { + return "", nil + } + ingressDomainBytes, err := Run(exec.Command(GetPlatformTool(), "get", "ingresses.config/cluster", "-o", "jsonpath={.spec.domain}")) // #nosec G204 + if err != nil { + return "", fmt.Errorf("unable to determine ingress sub-domain: %w", err) + } + ingressDomain := string(ingressDomainBytes) + if ingressDomain == "" { + return "", nil + } + return fmt.Sprintf("%s.%s", subDomain, ingressDomain), err +} + +var defaultApiEndpointTests = []ApiEndpointTest{ + { + Endpoint: "/", + ExpectedHttpStatusCode: 200, + BodyMatcher: ContainSubstring("You need to enable JavaScript to run this app"), + }, + { + Endpoint: "/api/dynamic-plugins-info/loaded-plugins", + ExpectedHttpStatusCode: 200, + BodyMatcher: SatisfyAll( + ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-quay-dynamic"), + ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-regex-dynamic"), + ContainSubstring("roadiehq-scaffolder-backend-module-utils-dynamic"), + ), + }, +} + +func VerifyBackstageRoute(g Gomega, ns string, crName string, tests []ApiEndpointTest) { + host, err := GetBackstageRouteHost(ns, crName) + fmt.Fprintln(GinkgoWriter, host) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(host).ShouldNot(BeEmpty()) + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // #nosec G402 -- test code only, not used in production + }, + } + httpClient := &http.Client{Transport: tr} + + performTest := func(tt ApiEndpointTest) { + url := fmt.Sprintf("https://%s/%s", host, strings.TrimPrefix(tt.Endpoint, "/")) + resp, rErr := httpClient.Get(url) + g.Expect(rErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while trying to GET %q", url)) + defer resp.Body.Close() + + g.Expect(resp.StatusCode).Should(Equal(tt.ExpectedHttpStatusCode), "context: "+tt.Endpoint) + body, rErr := io.ReadAll(resp.Body) + g.Expect(rErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while trying to read response body from 'GET %q'", url)) + if tt.BodyMatcher != nil { + g.Expect(string(body)).Should(tt.BodyMatcher, "context: "+tt.Endpoint) + } + } + allTests := append(defaultApiEndpointTests, tests...) + for _, tt := range allTests { + performTest(tt) + } +} diff --git a/tests/helper/utils.go b/tests/helper/utils.go new file mode 100644 index 00000000..8e3f9368 --- /dev/null +++ b/tests/helper/utils.go @@ -0,0 +1,238 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/discovery" + ctrl "sigs.k8s.io/controller-runtime" +) + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") + +var ( + _isOpenShift bool +) + +func init() { + _isOpenShift = func() bool { + restConfig := ctrl.GetConfigOrDie() + dcl, err := discovery.NewDiscoveryClientForConfig(restConfig) + if err != nil { + return false + } + + apiList, err := dcl.ServerGroups() + if err != nil { + return false + } + + apiGroups := apiList.Groups + for i := 0; i < len(apiGroups); i++ { + if apiGroups[i].Name == "route.openshift.io" { + return true + } + } + + return false + }() +} + +func GetPlatformTool() string { + if IsOpenShift() { + return "oc" + } + return "kubectl" +} + +func saveImageArchive(name string) (string, error) { + cEng, err := Run(exec.Command("make", "--no-print-directory", "show-container-engine")) + if err != nil { + return "", err + } + containerEngine := strings.TrimSpace(string(cEng)) + + // check if image exists locally first. It not, try to pull it + _, err = Run(exec.Command(containerEngine, "image", "inspect", name)) // #nosec G204 + if err != nil { + // image likely does not exist locally + _, err = Run(exec.Command(containerEngine, "image", "pull", name)) // #nosec G204 + if err != nil { + return "", fmt.Errorf("image %q not found locally and not able to pull it: %w", name, err) + } + } + + f, err := os.CreateTemp("", "tmp_image_archive-") + if err != nil { + return "", err + } + tmp := f.Name() + _, err = Run(exec.Command(containerEngine, "image", "save", "--output", tmp, name)) // #nosec G204 + return tmp, err +} + +// LoadImageToKindClusterWithName loads a local container image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + archive, err := saveImageArchive(name) + defer func() { + if archive != "" { + _ = os.Remove(archive) + } + }() + if err != nil { + return err + } + + cluster := "kind" + if v, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_KIND_CLUSTER"); ok { + cluster = v + } + cmd := exec.Command("kind", "load", "image-archive", "--name", cluster, archive) // #nosec G204 + _, err = Run(cmd) + return err +} + +// LoadImageToK3dClusterWithName loads a local container image to the k3d cluster +func LoadImageToK3dClusterWithName(name string) error { + archive, err := saveImageArchive(name) + defer func() { + if archive != "" { + _ = os.Remove(archive) + } + }() + if err != nil { + return err + } + + cluster := "k3s-default" + if v, ok := os.LookupEnv("BACKSTAGE_OPERATOR_TESTS_K3D_CLUSTER"); ok { + cluster = v + } + cmd := exec.Command("k3d", "image", "import", archive, "--cluster", cluster) // #nosec G204 + _, err = Run(cmd) + return err +} + +// LoadImageToMinikubeClusterWithName loads a local container image to the Minikube cluster +func LoadImageToMinikubeClusterWithName(name string) error { + archive, err := saveImageArchive(name) + defer func() { + if archive != "" { + _ = os.Remove(archive) + } + }() + if err != nil { + return err + } + + _, err = Run(exec.Command("minikube", "image", "load", archive)) // #nosec G204 + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + fmt.Fprintf(GinkgoWriter, "running dir: %s\n", cmd.Dir) + + cmd.Env = append(cmd.Env, os.Environ()...) + + if err := os.Chdir(cmd.Dir); err != nil { + fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + command := strings.Join(cmd.Args, " ") + fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + + var stdBuffer bytes.Buffer + mw := io.MultiWriter(GinkgoWriter, &stdBuffer) + cmd.Stdout = mw + cmd.Stderr = mw + + err := cmd.Run() + outBytes := stdBuffer.Bytes() + if err != nil { + return outBytes, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(outBytes)) + } + + return outBytes, nil +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/tests/e2e", "", -1) + return wd, nil +} + +func CreateNamespace(ns string) { + cmd := exec.Command(GetPlatformTool(), "create", "namespace", ns) // #nosec G204 + out, err := Run(cmd) + if err != nil && strings.Contains(string(out), fmt.Sprintf("%q already exists", ns)) { + return + } + Expect(err).ShouldNot(HaveOccurred()) +} + +func DeleteNamespace(ns string, wait bool) { + cmd := exec.Command(GetPlatformTool(), + "delete", + "namespace", + ns, + fmt.Sprintf("--wait=%s", strconv.FormatBool(wait)), + "--ignore-not-found=true", + ) // #nosec G204 + _, err := Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) +} + +func RandString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +func IsOpenShift() bool { + return _isOpenShift +}