diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index d562d21fd..6aaa95983 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -3,6 +3,7 @@ name: e2e on: workflow_dispatch: pull_request: + merge_group: push: branches: - main @@ -22,4 +23,16 @@ jobs: - name: Run e2e tests run: | - make e2e + # By default make stops building on first non-zero exit code which + # in case of E2E tests will mean that code coverage will only be + # collected on successful runs. We want to collect coverage even + # after failing tests. + # With -k flag make will continue the build, but will return non-zero + # exit code in case of any errors. + make -k e2e + + - uses: codecov/codecov-action@v3 + with: + files: e2e-cover.out + flags: e2e + functionalities: fixes diff --git a/.github/workflows/go-apidiff.yaml b/.github/workflows/go-apidiff.yaml index 8b0741bb4..70a3bcc35 100644 --- a/.github/workflows/go-apidiff.yaml +++ b/.github/workflows/go-apidiff.yaml @@ -1,5 +1,7 @@ name: go-apidiff -on: [ pull_request ] +on: + pull_request: + merge_group: jobs: go-apidiff: runs-on: ubuntu-latest diff --git a/.github/workflows/pages.yaml b/.github/workflows/pages.yaml new file mode 100644 index 000000000..8436fe4da --- /dev/null +++ b/.github/workflows/pages.yaml @@ -0,0 +1,24 @@ +name: Deploy Documentation site +on: + push: + branches: + - main +permissions: + contents: write +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v3 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force \ No newline at end of file diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 92f3ed1ba..2888aed6e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -10,6 +10,7 @@ on: pull_request: branches: - main + merge_group: jobs: goreleaser: diff --git a/.github/workflows/sanity.yaml b/.github/workflows/sanity.yaml index f68d7ea25..3cbef5f88 100644 --- a/.github/workflows/sanity.yaml +++ b/.github/workflows/sanity.yaml @@ -3,6 +3,7 @@ name: sanity on: workflow_dispatch: pull_request: + merge_group: push: branches: - main diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index a169f2960..f0411b93e 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -3,6 +3,7 @@ name: unit-test on: workflow_dispatch: pull_request: + merge_group: push: branches: - main @@ -25,5 +26,5 @@ jobs: - uses: codecov/codecov-action@v3 with: files: cover.out - fail_ci_if_error: true + flags: unit functionalities: fixes diff --git a/.gitignore b/.gitignore index ecfd7f0f4..498367fda 100644 --- a/.gitignore +++ b/.gitignore @@ -14,8 +14,9 @@ Dockerfile.cross # Test binary, build with `go test -c` *.test -# Output of the go coverage tool, specifically when used with LiteIDE +# Output of the go coverage tools *.out +coverage # Release output dist/** @@ -34,3 +35,7 @@ install.sh \#*\# .\#* +# documentation website asset folder +docs/_site + +.tiltbuild/ diff --git a/Makefile b/Makefile index b6376e3ed..756dc9b55 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,8 @@ KIND_CLUSTER_NAME ?= operator-controller CONTAINER_RUNTIME ?= docker +KUSTOMIZE_BUILD_DIR ?= config/default + # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -39,8 +41,7 @@ SHELL = /usr/bin/env bash -o pipefail # Disable -j flag for make .NOTPARALLEL: -.PHONY: all -all: build +.DEFAULT_GOAL := build ##@ General @@ -89,32 +90,45 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... -.PHONY: test test-e2e e2e kind-load kind-cluster kind-cluster-cleanup +.PHONY: test test: manifests generate fmt vet test-unit e2e ## Run all tests. +.PHONY: test-e2e FOCUS := $(if $(TEST),-v -focus "$(TEST)") E2E_FLAGS ?= "" test-e2e: $(GINKGO) ## Run the e2e tests $(GINKGO) --tags $(GO_BUILD_TAGS) $(E2E_FLAGS) -trace -progress $(FOCUS) test/e2e +.PHONY: test-unit ENVTEST_VERSION = $(shell go list -m k8s.io/client-go | cut -d" " -f2 | sed 's/^v0\.\([[:digit:]]\{1,\}\)\.[[:digit:]]\{1,\}$$/1.\1.x/') UNIT_TEST_DIRS=$(shell go list ./... | grep -v /test/) test-unit: $(SETUP_ENVTEST) ## Run the unit tests eval $$($(SETUP_ENVTEST) use -p env $(ENVTEST_VERSION)) && go test -tags $(GO_BUILD_TAGS) -count=1 -short $(UNIT_TEST_DIRS) -coverprofile cover.out +.PHONY: e2e e2e: KIND_CLUSTER_NAME=operator-controller-e2e -e2e: run kind-load-test-artifacts test-e2e kind-cluster-cleanup ## Run e2e test suite on local kind cluster +e2e: KUSTOMIZE_BUILD_DIR=config/e2e +e2e: GO_BUILD_FLAGS=-cover +e2e: run kind-load-test-artifacts test-e2e e2e-coverage kind-cluster-cleanup ## Run e2e test suite on local kind cluster + +.PHONY: e2e-coverage +e2e-coverage: + COVERAGE_OUTPUT=./e2e-cover.out ./hack/e2e-coverage.sh +.PHONY: kind-load kind-load: $(KIND) ## Loads the currently constructed image onto the cluster $(KIND) load docker-image $(IMG) --name $(KIND_CLUSTER_NAME) -kind-cluster: $(KIND) kind-cluster-cleanup ## Standup a kind cluster +.PHONY: kind-cluster +kind-cluster: $(KIND) ## Standup a kind cluster $(KIND) create cluster --name ${KIND_CLUSTER_NAME} $(KIND) export kubeconfig --name ${KIND_CLUSTER_NAME} +.PHONY: kind-cluster-cleanup kind-cluster-cleanup: $(KIND) ## Delete the kind cluster $(KIND) delete cluster --name ${KIND_CLUSTER_NAME} +.PHONY: kind-load-test-artifacts kind-load-test-artifacts: $(KIND) ## Load the e2e testdata container images into a kind cluster $(CONTAINER_RUNTIME) build $(TESTDATA_DIR)/bundles/registry-v1/prometheus-operator.v0.37.0 -t localhost/testdata/bundles/registry-v1/prometheus-operator:v0.37.0 $(CONTAINER_RUNTIME) build $(TESTDATA_DIR)/bundles/registry-v1/prometheus-operator.v0.47.0 -t localhost/testdata/bundles/registry-v1/prometheus-operator:v0.47.0 @@ -135,8 +149,9 @@ export GO_BUILD_ASMFLAGS ?= all=-trimpath=${PWD} export GO_BUILD_LDFLAGS ?= -s -w -X $(shell go list -m)/version.Version=$(VERSION) export GO_BUILD_GCFLAGS ?= all=-trimpath=${PWD} export GO_BUILD_TAGS ?= upstream +export GO_BUILD_FLAGS ?= -BUILDCMD = go build -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o $(BUILDBIN)/manager ./cmd/manager +BUILDCMD = go build $(GO_BUILD_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o $(BUILDBIN)/manager ./cmd/manager .PHONY: build-deps build-deps: manifests generate fmt vet @@ -156,10 +171,6 @@ go-build-linux: .PHONY: run run: docker-build kind-cluster kind-load install ## Build the operator-controller then deploy it into a new kind cluster. -.PHONY: wait -wait: - kubectl wait --for=condition=Available --namespace=$(OPERATOR_CONTROLLER_NAMESPACE) deployment/operator-controller-controller-manager --timeout=$(WAIT_TIMEOUT) - .PHONY: docker-build docker-build: build-linux ## Build docker image for operator-controller with GOOS=linux and local GOARCH. docker build -t ${IMG} -f Dockerfile ./bin/linux @@ -172,12 +183,14 @@ docker-build: build-linux ## Build docker image for operator-controller with GOO export ENABLE_RELEASE_PIPELINE ?= false export GORELEASER_ARGS ?= --snapshot --clean +.PHONY: release release: $(GORELEASER) ## Runs goreleaser for the operator-controller. By default, this will run only as a snapshot and will not publish any artifacts unless it is run with different arguments. To override the arguments, run with "GORELEASER_ARGS=...". When run as a github action from a tag, this target will publish a full release. $(GORELEASER) $(GORELEASER_ARGS) +.PHONY: quickstart quickstart: export MANIFEST="https://github.com/operator-framework/operator-controller/releases/download/$(VERSION)/operator-controller.yaml" quickstart: $(KUSTOMIZE) generate ## Generate the installation release manifests and scripts - $(KUSTOMIZE) build config/default | sed "s/:devel/:$(VERSION)/g" > operator-controller.yaml + $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) | sed "s/:devel/:$(VERSION)/g" > operator-controller.yaml envsubst '$$CATALOGD_VERSION,$$CERT_MGR_VERSION,$$RUKPAK_VERSION,$$MANIFEST' < scripts/install.tpl.sh > install.sh ##@ Deployment @@ -189,7 +202,7 @@ endif .PHONY: install install: export MANIFEST="./operator-controller.yaml" install: manifests $(KUSTOMIZE) generate ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/default > operator-controller.yaml + $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) > operator-controller.yaml envsubst '$$CATALOGD_VERSION,$$CERT_MGR_VERSION,$$RUKPAK_VERSION,$$MANIFEST' < scripts/install.tpl.sh | bash -s .PHONY: uninstall @@ -199,8 +212,8 @@ uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specifi .PHONY: deploy deploy: manifests $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default | kubectl apply -f - + $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) | kubectl apply -f - .PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. - $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) | kubectl delete --ignore-not-found=$(ignore-not-found) -f - diff --git a/Tiltfile b/Tiltfile new file mode 100644 index 000000000..12b931d62 --- /dev/null +++ b/Tiltfile @@ -0,0 +1,66 @@ +# This loads a helper function that isn't part of core Tilt that simplifies restarting the process in the container +# when files changes. +load('ext://restart_process', 'docker_build_with_restart') + +# Treat the main binary as a local resource, so we can automatically rebuild it when any of the deps change. This +# builds it locally, targeting linux, so it can run in a linux container. +local_resource( + 'manager_binary', + cmd=''' +mkdir -p .tiltbuild/bin +CGO_ENABLED=0 GOOS=linux go build -o .tiltbuild/bin/manager ./cmd/manager +''', + deps=['api', 'cmd/manager', 'internal', 'pkg', 'go.mod', 'go.sum'] +) + +# Configure our image build. If the file in live_update.sync (.tiltbuild/bin/manager) changes, Tilt +# copies it to the running container and restarts it. +docker_build_with_restart( + # This has to match an image in the k8s_yaml we call below, so Tilt knows to use this image for our Deployment, + # instead of the actual image specified in the yaml. + ref='quay.io/operator-framework/operator-controller:devel', + # This is the `docker build` context, and because we're only copying in the binary we've already had Tilt build + # locally, we set the context to the directory containing the binary. + context='.tiltbuild/bin', + # We use a slimmed-down Dockerfile that only has $binary in it. + dockerfile_contents=''' +FROM gcr.io/distroless/static:debug +EXPOSE 8080 +WORKDIR / +COPY manager manager +''', + # The set of files Tilt should include in the build. In this case, it's just the binary we built above. + only='manager', + # If .tiltbuild/bin/manager changes, Tilt will copy it into the running container and restart the process. + live_update=[ + sync('.tiltbuild/bin/manager', '/manager'), + ], + # The command to run in the container. + entrypoint="/manager", +) + +# Tell Tilt what to deploy by running kustomize and then doing some manipulation to make things work for Tilt. +objects = decode_yaml_stream(kustomize('config/default')) +for o in objects: + # For Tilt's live_update functionality to work, we have to run the container as root. Remove any PSA labels to allow + # this. + if o['kind'] == 'Namespace' and 'labels' in o['metadata']: + labels_to_delete = [label for label in o['metadata']['labels'] if label.startswith('pod-security.kubernetes.io')] + for label in labels_to_delete: + o['metadata']['labels'].pop(label) + + if o['kind'] != 'Deployment': + # We only need to modify Deployments, so we can skip this + continue + + # For Tilt's live_update functionality to work, we have to run the container as root. Otherwise, Tilt won't + # be able to untar the updated binary in the container's file system (this is how live update + # works). If there are any securityContexts, remove them. + if "securityContext" in o['spec']['template']['spec']: + o['spec']['template']['spec'].pop('securityContext') + for c in o['spec']['template']['spec']['containers']: + if "securityContext" in c: + c.pop('securityContext') + +# Now apply all the yaml +k8s_yaml(encode_yaml_stream(objects)) diff --git a/config/e2e/kustomization.yaml b/config/e2e/kustomization.yaml new file mode 100644 index 000000000..7d0ba86c5 --- /dev/null +++ b/config/e2e/kustomization.yaml @@ -0,0 +1,9 @@ +namespace: operator-controller-system + +resources: +- ../default +- manager_e2e_coverage_pvc.yaml +- manager_e2e_coverage_copy_pod.yaml + +patches: +- path: manager_e2e_coverage_patch.yaml diff --git a/config/e2e/manager_e2e_coverage_copy_pod.yaml b/config/e2e/manager_e2e_coverage_copy_pod.yaml new file mode 100644 index 000000000..45139d062 --- /dev/null +++ b/config/e2e/manager_e2e_coverage_copy_pod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: e2e-coverage-copy-pod + labels: + app.kubernetes.io/name: e2e-coverage-copy-pod + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: e2e-coverage + app.kubernetes.io/created-by: operator-controller + app.kubernetes.io/part-of: operator-controller + app.kubernetes.io/managed-by: kustomize +spec: + restartPolicy: Never + securityContext: + runAsNonRoot: true + runAsUser: 65532 + seccompProfile: + type: RuntimeDefault + containers: + - name: tar + image: busybox:1.36 + command: ["sleep", "infinity"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + volumeMounts: + - name: e2e-coverage-volume + mountPath: /e2e-coverage + readOnly: true + volumes: + - name: e2e-coverage-volume + persistentVolumeClaim: + claimName: e2e-coverage + readOnly: true diff --git a/config/e2e/manager_e2e_coverage_patch.yaml b/config/e2e/manager_e2e_coverage_patch.yaml new file mode 100644 index 000000000..bda011daf --- /dev/null +++ b/config/e2e/manager_e2e_coverage_patch.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + - name: manager + env: + - name: GOCOVERDIR + value: /e2e-coverage + volumeMounts: + - name: e2e-coverage-volume + mountPath: /e2e-coverage + volumes: + - name: e2e-coverage-volume + persistentVolumeClaim: + claimName: e2e-coverage diff --git a/config/e2e/manager_e2e_coverage_pvc.yaml b/config/e2e/manager_e2e_coverage_pvc.yaml new file mode 100644 index 000000000..126d4d4e6 --- /dev/null +++ b/config/e2e/manager_e2e_coverage_pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: e2e-coverage +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Mi diff --git a/docs/Tasks/adding-a-catalog.md b/docs/Tasks/adding-a-catalog.md new file mode 100644 index 000000000..56235c404 --- /dev/null +++ b/docs/Tasks/adding-a-catalog.md @@ -0,0 +1,85 @@ +--- +layout: default +title: Adding a catalog of operators to the cluster +nav_order: 1 +parent: Tasks +--- + +Operator authors have the mechanisms to offer their product as part of a curated catalog of operators, that they can push updates to over-the-air (eg publish new versions, publish patched versions with CVEs, etc). Cluster admins can sign up to receive these updates on clusters, by adding the catalog to the cluster. When a catalog is added to a cluster, the kubernetes extension packages (operators, or any other extension package) in that catalog become available on cluster for installation and receiving updates. + +For example, the [k8s-operatorhub/community-operators](https://github.com/k8s-operatorhub/community-operators) is a catalog of curated operators that contains a list of operators being developed by the community. The list of operators can be viewed in [Operatorhub.io](https://operatorhub.io). This catalog is distributed as an image [quay.io/operatorhubio/catalog](https://quay.io/repository/operatorhubio/catalog?tag=latest&tab=tags) for consumption on clusters. + +To consume this catalog on cluster, create a `Catalog` Custom Resource(CR) with the image specified in the `spec.source.image` field: + +```bash +$ kubectl apply -f - <_binary` `local_resource`, +Tilt automatically rebuilds the go binary. As soon as the binary is rebuilt, Tilt pushes it (and only it) into the +appropriate running container, and then restarts the process.