From 5dbce8175cfaf33ac740800bdbb385ab5b9aaace Mon Sep 17 00:00:00 2001 From: Anik Date: Thu, 15 Jun 2023 16:20:01 -0400 Subject: [PATCH] Publish /docs to github pages --- .github/workflows/pages.yaml | 69 +++++ .gitignore | 2 + docs/Gemfile | 7 + docs/Gemfile.lock | 81 +++++ docs/README.md | 44 +++ docs/Releases/index.md | 7 + docs/Releases/v0.2.0-release.md | 364 +++++++++++++++++++++++ docs/Tasks/adding-a-catalog.md | 85 ++++++ docs/Tasks/deleting-an-operator.md | 21 ++ docs/Tasks/explore-available-packages.md | 142 +++++++++ docs/Tasks/index.md | 6 + docs/Tasks/installing-an-operator.md | 69 +++++ docs/_config.yaml | 5 + docs/components.md | 21 ++ docs/index.md | 55 ++++ docs/olmv1_roadmap.md | 27 +- 16 files changed, 984 insertions(+), 21 deletions(-) create mode 100644 .github/workflows/pages.yaml create mode 100644 docs/Gemfile create mode 100644 docs/Gemfile.lock create mode 100644 docs/README.md create mode 100644 docs/Releases/index.md create mode 100644 docs/Releases/v0.2.0-release.md create mode 100644 docs/Tasks/adding-a-catalog.md create mode 100644 docs/Tasks/deleting-an-operator.md create mode 100644 docs/Tasks/explore-available-packages.md create mode 100644 docs/Tasks/index.md create mode 100644 docs/Tasks/installing-an-operator.md create mode 100644 docs/_config.yaml create mode 100644 docs/components.md create mode 100644 docs/index.md diff --git a/.github/workflows/pages.yaml b/.github/workflows/pages.yaml new file mode 100644 index 000000000..02ba79cb0 --- /dev/null +++ b/.github/workflows/pages.yaml @@ -0,0 +1,69 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# Sample workflow for building and deploying a Jekyll site to GitHub Pages +name: Deploy Jekyll site to Pages + +on: + push: + branches: ["main"] + paths: + - "docs/**" + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow one concurrent deployment +concurrency: + group: "pages" + cancel-in-progress: true + +jobs: + # Build job + build: + runs-on: ubuntu-latest + defaults: + run: + working-directory: docs + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.1' # Not needed with a .ruby-version file + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + cache-version: 0 # Increment this number if you need to re-download cached gems + working-directory: '${{ github.workspace }}/docs' + - name: Setup Pages + id: pages + uses: actions/configure-pages@v3 + - name: Build with Jekyll + # Outputs to the './_site' directory by default + run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" + env: + JEKYLL_ENV: production + - name: Upload artifact + # Automatically uploads an artifact from the './_site' directory by default + uses: actions/upload-pages-artifact@v1 + with: + path: "docs/_site/" + + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v2 \ No newline at end of file diff --git a/.gitignore b/.gitignore index ecfd7f0f4..d3ab616ae 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ install.sh \#*\# .\#* +# documentation wesite asset folder +docs/_site \ No newline at end of file diff --git a/docs/Gemfile b/docs/Gemfile new file mode 100644 index 000000000..3a63ef2e0 --- /dev/null +++ b/docs/Gemfile @@ -0,0 +1,7 @@ +source 'https://rubygems.org' + +gem "jekyll", "~> 4.3.2" # installed by `gem jekyll` +# gem "webrick" # required when using Ruby >= 3 and Jekyll <= 4.2.2 + +gem "just-the-docs", "0.5.2" # pinned to the current release +# gem "just-the-docs" # always download the latest release \ No newline at end of file diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock new file mode 100644 index 000000000..6af5aad7a --- /dev/null +++ b/docs/Gemfile.lock @@ -0,0 +1,81 @@ +GEM + remote: https://rubygems.org/ + specs: + addressable (2.8.4) + public_suffix (>= 2.0.2, < 6.0) + colorator (1.1.0) + concurrent-ruby (1.2.2) + em-websocket (0.5.3) + eventmachine (>= 0.12.9) + http_parser.rb (~> 0) + eventmachine (1.2.7) + ffi (1.15.5) + forwardable-extended (2.6.0) + google-protobuf (3.23.2-arm64-darwin) + google-protobuf (3.23.2-x86_64-linux) + http_parser.rb (0.8.0) + i18n (1.14.1) + concurrent-ruby (~> 1.0) + jekyll (4.3.2) + addressable (~> 2.4) + colorator (~> 1.0) + em-websocket (~> 0.5) + i18n (~> 1.0) + jekyll-sass-converter (>= 2.0, < 4.0) + jekyll-watch (~> 2.0) + kramdown (~> 2.3, >= 2.3.1) + kramdown-parser-gfm (~> 1.0) + liquid (~> 4.0) + mercenary (>= 0.3.6, < 0.5) + pathutil (~> 0.9) + rouge (>= 3.0, < 5.0) + safe_yaml (~> 1.0) + terminal-table (>= 1.8, < 4.0) + webrick (~> 1.7) + jekyll-sass-converter (3.0.0) + sass-embedded (~> 1.54) + jekyll-seo-tag (2.8.0) + jekyll (>= 3.8, < 5.0) + jekyll-watch (2.2.1) + listen (~> 3.0) + just-the-docs (0.5.2) + jekyll (>= 3.8.5) + jekyll-seo-tag (>= 2.0) + rake (>= 12.3.1) + kramdown (2.4.0) + rexml + kramdown-parser-gfm (1.1.0) + kramdown (~> 2.0) + liquid (4.0.4) + listen (3.8.0) + rb-fsevent (~> 0.10, >= 0.10.3) + rb-inotify (~> 0.9, >= 0.9.10) + mercenary (0.4.0) + pathutil (0.16.2) + forwardable-extended (~> 2.6) + public_suffix (5.0.1) + rake (13.0.6) + rb-fsevent (0.11.2) + rb-inotify (0.10.1) + ffi (~> 1.0) + rexml (3.2.5) + rouge (3.30.0) + safe_yaml (1.0.5) + sass-embedded (1.58.3) + google-protobuf (~> 3.21) + rake (>= 10.0.0) + terminal-table (3.0.2) + unicode-display_width (>= 1.1.1, < 3) + unicode-display_width (2.4.2) + webrick (1.8.1) + +PLATFORMS + arm64-darwin-22 + x86_64-linux + +DEPENDENCIES + jekyll (~> 4.3.2) + just-the-docs (= 0.5.2) + +BUNDLED WITH + 2.4.14 diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..0b97266fc --- /dev/null +++ b/docs/README.md @@ -0,0 +1,44 @@ +# Why are we building OLM v1? + +Operator-lifecycle-manager's mission has been to manage the lifecycle of cluster extensions centrally and declaratively on Kubernetes clusters. Its purpose has always been to make installing, +running, and updating functional extensions to the cluster easy, safe, and reproducible for cluster administrators and PaaS administrators, throughout the lifecycle of the underlying cluster. + +OLM so far has focused on providing unique support for these specific needs for a particular type of cluster extension, which have been coined as [operators](https://operatorhub.io/what-is-an-operator#:~:text=is%20an%20Operator-,What%20is%20an%20Operator%20after%20all%3F,or%20automation%20software%20like%20Ansible.). +Operators were classified as one or more Kubernetes controllers, shipping with one or more API extensions (CustomResourceDefinitions) to provide additional functionality to the cluster. +Over the last few years of running OLM in production cluster, it became apparent that there's an appetite to deviate from this coupling of CRDs and controllers, to encompass the lifecycling +of extentions that are not just operators. + +OLM has been assisting in defining a lifecycle for these extensions in which they get installed, potentially causing other extensions to be installed as well as dependencies, with a limited set of +customization of configuration at runtime, an upgrade model following a path defined by the extension developer, and eventual decommission and removal. There is a dependency model in which extensions can +rely on each other for required services that are out of scope of the primary purpose of an extension, allowing each extension to focus on a specific purpose. OLM also prevents conflicting +extensions from running on the cluster, either with conflicting dependency constraints or conflicts in ownership of services provided via APIs. Since cluster extensions need to be supported +with an enterprise-grade product lifecycle, there has been a growing need for allowing operator authors to limit installation and upgrade of their extension by specifing addtional environmental +constraints as dependencies, primarily to align with what was tested by the operator author's QE processes. In other words, there is an ever growing ask for OLM to allow the author to enforce these +support limitations in the form of additional constraints specified by operator authors in their packaging for OLM. + +During their lifecycle on the cluster, OLM also manages the permissions and capabilities extensions have on the cluster as well as the permission and access tenants on the cluster have to the +extensions. This is done using the Kubernetes RBAC system, in combination with tenant isolation using Kubernetes namespaces. While the interaction surface of the extensions is solely composed of +Kubernetes APIs the extensions define, there is an acute need to rethink the way tenant(i.e consumers of extentions) isolation is achieved. The ask from OLM, is to provide tenant isolation in +a more intuitive way than [is implemented in OLM v0](https://olm.operatorframework.io/docs/advanced-tasks/operator-scoping-with-operatorgroups/#docs) + +OLM also defines a packaging model in which catalogs of extensions, usually containing the entire version history of each extension, are made available to clusters for cluster users to +browse and select from. While these catalogs have so far been packaged and shipped as container images, there is a growing appetite to allow more ways of packaging and shipping these catalogs, +besides also simplifying the building process of these catalogs, which so far have been very costly. The effort to bring down the cost was kicked off in OLM v0 with conversion of the underlying +datastore for catalog metadata to [File-based Catalogs](https://olm.operatorframework.io/docs/reference/file-based-catalogs/), with more effort being invested to slim down the process in v1. +Via new versions of extensions delivered with this packaging system, OLM is able to apply updates to existing running extensions on the cluster in a way where the integrity of the cluster is +maintained and constraints and dependencies are kept satisfied. + +Finally, the scope of OLM's area of operation in v0 is the one cluster it is running on, with namespace-based handling of catalog access and extension API accessibility and discoverability. +Expansion of this scope is indirectly expected through the work of the [Kubernetes Control Plane (kcp) project](https://github.com/kcp-dev/kcp), which in its first incarnation will likely +use its own synchronization mechanism to get OLM-managed extensions deployed eventually on one or more physical clusters from a shared, virtual control plane called a “workspace”. +While this is an area under active development and subject to change, OLM will most likely need to become aware of kcp in a future state. In v1 of OLM, the scope of OLM will increase to span +multiple clusters following the kcp model, though likely many aspects of this will become transparent to OLM itself through the workspace abstraction that kcp provides. +So in other words, what needs to change in OLM 1.0 is how all of the tasks mentioned above are carried out from the user perspective, and how much control users have in the process, and which +persona is involved. + + +For a more detailed writeup of the requriements from OLM v1, please read the [Product Requiment Documentation](/docs/olmv1_roadmap.md) + +# OLM v1 progress report + +The OLM v1 project is being tracked in the github project https://github.com/orgs/operator-framework/projects/8/ diff --git a/docs/Releases/index.md b/docs/Releases/index.md new file mode 100644 index 000000000..41e20631d --- /dev/null +++ b/docs/Releases/index.md @@ -0,0 +1,7 @@ +--- +layout: default +title: Releases +nav_order: 5 +has_children: true +--- + diff --git a/docs/Releases/v0.2.0-release.md b/docs/Releases/v0.2.0-release.md new file mode 100644 index 000000000..eef275974 --- /dev/null +++ b/docs/Releases/v0.2.0-release.md @@ -0,0 +1,364 @@ +--- +layout: default +title: v0.2.0 +nav_order: 1 +parent: Releases +--- + +Hello again, world! The Operator-framework community is excited to announce the [v0.2.0](https://github.com/operator-framework/operator-controller/releases/tag/v0.2.0) release of operator-controller! + +With this release of operator-controller, you can install an operator from a catalog of operators. This might sound familiar to exisiting users of OLM v0, however, there are some key differences that is evident in the user experience of installing an operator from a catalog of operators, even at this early stage of OLM v1. + +Let us walk through an example of installing an operator from a catalog of operators, which will highlight the differences mentioned above. + +First, let's get a [Kind](https://kind.sigs.k8s.io) cluster fired up + +```bash +$ kind create cluster +Creating cluster "kind" ... + ✓ Ensuring node image (kindest/node:v1.26.3) 🖼 + ✓ Preparing nodes 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 +Set kubectl context to "kind-kind" +You can now use your cluster with: + +kubectl cluster-info --context kind-kind + +Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂 +``` + +Next, let's install the v0.2.0 release of operator-controller + +```bash +$ curl -L -s https://github.com/operator-framework/operator-controller/releases/download/v0.2.0/install.sh | bash -s +. +. +deployment.apps/operator-controller-controller-manager created +deployment.apps/operator-controller-controller-manager condition met +``` + +If we look into the cluster, we'll see that the installation process installed the various [components](components.md) of OLM v1: + +```bash +$ kubectl get namespaces +NAME STATUS AGE +catalogd-system Active 2m4s +cert-manager Active 3m28s +crdvalidator-system Active 3m1s +default Active 17m +kube-node-lease Active 17m +kube-public Active 17m +kube-system Active 17m +local-path-storage Active 16m +operator-controller-system Active 82s +rukpak-system Active 3m1s +``` + +Looks like all the [components](/docs/components.md) `catalogd-system`, `operator-controller-system`, and `rukpak-system` are available in the cluster! + +Let's now add a repository (catalog) of operators to the cluster, to make the operators shipped in the catalog available for installation on the cluster. We'll use the upstream community-operators catalog [quay.io/operatorhubio/catalog](https://quay.io/repository/operatorhubio/catalog?tab=tags&tag=latest): + +```bash +$ kubectl apply -f - < **_NOTE:_** +> ### Installing specific versions of an operator +> By default, the latest version of the operator is installed when a version is not specified in the `Operator` CR. We can confirm this (for now) by matching the image reference from the `installedBundleResource` field with the image reference for the latest version, which in argocd-operator's case, is v0.6.0: +> +>```bash +$ kubectl get bundlemetadata operatorhubio-argocd-operator.v0.6.0 -o yaml | grep image + image: quay.io/operatorhubio/argocd-operator@sha256:1a9b3c8072f2d7f4d6528fa32905634d97b7b4c239ef9887e3fb821ff033fef6 + - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + - image: quay.io/argoprojlabs/argocd-operator@sha256:99aeec24cc406d06d18822347d9ac3ed053a702d8419191e4e681075fed7b9bb + - image: quay.io/operatorhubio/argocd-operator@sha256:1a9b3c8072f2d7f4d6528fa32905634d97b7b4c239ef9887e3fb821ff033fef6 +``` +> +>Looks like the last image reference is infact the one that is also visible in the `installedBundleResource` field. +> +>We can also pin the installation of an operator to a specific version, if we wish to install something other than the latest: +> +>```bash +$ kubectl apply -f - < +>$ kubectl get operator argocd-operator -o yaml | grep status -A 4 +status: + conditions: + - lastTransitionTime: "2023-06-21T15:32:54Z" + message: resolved to "quay.io/operatorhubio/argocd-operator@sha256:5f4533de114e9301e836155190dc2c66a969f55085af03a4959a2fb0af74e1f4" + observedGeneration: 1 +-- + status: "True" + type: Resolved + - lastTransitionTime: "2023-06-21T15:33:03Z" + message: installed from "quay.io/operatorhubio/argocd-operator@sha256:5f4533de114e9301e836155190dc2c66a969f55085af03a4959a2fb0af74e1f4" + observedGeneration: 1 +-- + status: "True" + type: Installed + installedBundleResource: quay.io/operatorhubio/argocd-operator@sha256:5f4533de114e9301e836155190dc2c66a969f55085af03a4959a2fb0af74e1f4 + resolvedBundleResource: quay.io/operatorhubio/argocd-operator@sha256:5f4533de114e9301e836155190dc2c66a969f55085af03a4959a2fb0af74e1f4 +``` +> +>We can confirm that the v0.5.0 version was in fact the version that was installed, by again matching the image reference visible in the `installedBundleResource` field with the image reference for v0.5.0 +> +>```bash +k get bundlemetadata operatorhubio-argocd-operator.v0.5.0 -o yaml | grep image + image: quay.io/operatorhubio/argocd-operator@sha256:5f4533de114e9301e836155190dc2c66a969f55085af03a4959a2fb0af74e1f4 + - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + - image: quay.io/argoprojlabs/argocd-operator@sha256:2f5c0d4567607266ccacb91d8c2e3b18c2afe0edaf55855dcb1a06b02173b520 + - image: quay.io/operatorhubio/argocd-operator@sha256:5f4533de114e9301e836155190dc2c66a969f55085af03a4959a2fb0af74e1f4 +``` +> In one of the upcoming releases, we plan on providing more straightforward way of gauging the information about which version of an operator has been installed. The discussion for that effort is being tracked in [this issue](https://github.com/operator-framework/operator-controller/issues/270). + + +Checkout a live demo of the release here: + +[![Video](https://img.youtube.com/vi/RR88HeoCyfE/0.jpg)](https://www.youtube.com/watch?v=RR88HeoCyfE&t=2322s "Installing an operator from a catalog of operators") + diff --git a/docs/Tasks/adding-a-catalog.md b/docs/Tasks/adding-a-catalog.md new file mode 100644 index 000000000..fe14e8612 --- /dev/null +++ b/docs/Tasks/adding-a-catalog.md @@ -0,0 +1,85 @@ +--- +layout: default +title: Adding a catalog of operators to the cluster +nav_order: 1 +parent: Tasks +--- + +Operator authors have the mechanisms to offer their product as part of a curated catalog of operators, that they can push updates to over-the-air (eg publish new versions, publish patched versions with CVEs, etc). Cluster admins can sign up to recieve these updates on clusters, by adding the catalog to the cluster. When a catalog is added to a cluster, the kubernetes extension packages (operators, or any other extension package) in that catalog become available on cluster for installation and receiving updates. + +For example, the [k8s-operatorhub/community-operators](https://github.com/k8s-operatorhub/community-operators) is a catalog of curated operators that contains a list of operators being developed by the community. The list of operators can be viewed in [Operatorhub.io](https://operatorhub.io). This catalog is distributed as an image [quay.io/operatorhubio/catalog](https://quay.io/repository/operatorhubio/catalog?tag=latest&tab=tags) for consumption on clusters. + +To consume this catalog on cluster, create a `Catalog CR` with the image specified in the `spec.source.image` field: + +```bash +$ kubectl apply -f - <