diff --git a/Makefile b/Makefile index 320b7ea357..7ad1e5eaae 100644 --- a/Makefile +++ b/Makefile @@ -777,6 +777,10 @@ deploy/route: @oc process -f ./templates/route-template.yml --local | oc apply -f - -n $(NAMESPACE) .PHONY: deploy/route +# This will create the redhat-pull-secret secret in the rhacs-vertical-pod-autoscaler namespace if it does not exist +deploy/redhat-pull-secret: + ./scripts/redhat-pull-secret.sh rhacs-vertical-pod-autoscaler + # When making changes to the gitops configuration for development purposes # situated here dev/env/manifests/fleet-manager/04-gitops-config.yaml, this # target will update the gitops configmap on the dev cluster. diff --git a/dev/config/gitops-config.yaml b/dev/config/gitops-config.yaml index 2f8d973e81..2c2f5064de 100644 --- a/dev/config/gitops-config.yaml +++ b/dev/config/gitops-config.yaml @@ -7,6 +7,18 @@ rhacsOperators: image: "quay.io/rhacs-eng/stackrox-operator:4.4.2" centralLabelSelector: "rhacs.redhat.com/version-selector=4.4.2" securedClusterReconcilerEnabled: false +verticalPodAutoscaling: + recommenders: + - image: registry.redhat.io/openshift4/ose-vertical-pod-autoscaler-rhel9@sha256:d268de83bd371a725c0d0dad8a48008afa982c10e2997b24ff6d139d90d964f6 + name: vpa-1 + imagePullSecrets: [{ name: redhat-pull-secret }] + recommendationMarginFraction: 0.30 + podRecommendationMinCpuMillicores: 10 + - image: registry.redhat.io/openshift4/ose-vertical-pod-autoscaler-rhel9@sha256:d268de83bd371a725c0d0dad8a48008afa982c10e2997b24ff6d139d90d964f6 + name: vpa-2 + imagePullSecrets: [{ name: redhat-pull-secret }] + recommendationMarginFraction: 0.30 + podRecommendationMinCpuMillicores: 20 tenantResources: default: | labels: @@ -22,6 +34,18 @@ tenantResources: verticalPodAutoscalers: central: enabled: true + updatePolicy: + updateMode: "Auto" + minReplicas: 1 + resourcePolicy: + containerPolicies: + - containerName: "*" + minAllowed: + cpu: "100m" + memory: "50Mi" + maxAllowed: + cpu: "1" + memory: "2Gi" centrals: overrides: - instanceIds: diff --git a/dev/env/scripts/bootstrap.sh b/dev/env/scripts/bootstrap.sh index 5c0219418c..96756fcaf5 100755 --- a/dev/env/scripts/bootstrap.sh +++ b/dev/env/scripts/bootstrap.sh @@ -49,7 +49,11 @@ else log "Skipping installation of Vertical Pod Autoscaler" fi -apply "${MANIFESTS_DIR}/monitoring" +# skip manifests if openshift cluster using is_openshift_cluster +if ! is_openshift_cluster "$CLUSTER_TYPE"; then + apply "${MANIFESTS_DIR}/monitoring" +fi + apply "${MANIFESTS_DIR}/addons" if is_local_cluster "$CLUSTER_TYPE"; then diff --git a/dev/env/scripts/up.sh b/dev/env/scripts/up.sh index d01c703849..c7715cc856 100755 --- a/dev/env/scripts/up.sh +++ b/dev/env/scripts/up.sh @@ -54,6 +54,8 @@ if ! is_openshift_cluster "$CLUSTER_TYPE"; then $KUBECTL -n "$ACSCS_NAMESPACE" create secret generic fleet-manager-tls 2> /dev/null || true $KUBECTL -n "$ACSCS_NAMESPACE" create secret generic fleet-manager-envoy-tls 2> /dev/null || true $KUBECTL -n "$ACSCS_NAMESPACE" create secret generic fleet-manager-active-tls 2> /dev/null || true + # Create the redhat-pull-secret in the rhacs-vertical-pod-autoscaler namespace + make -C "$GITROOT" deploy/redhat-pull-secret fi log "Deploying fleet-manager" diff --git a/fleetshard/pkg/central/charts/charts_test.go b/fleetshard/pkg/central/charts/charts_test.go index 36a476d50b..0e810345da 100644 --- a/fleetshard/pkg/central/charts/charts_test.go +++ b/fleetshard/pkg/central/charts/charts_test.go @@ -9,7 +9,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" - "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/chartutil" "github.com/stretchr/testify/assert" @@ -41,10 +40,7 @@ func TestTenantResourcesChart(t *testing.T) { } func TestInstallOrUpdateChartCreateNew(t *testing.T) { - chartFiles, err := TraverseChart(testdata, "testdata/test-chart") - require.NoError(t, err) - chart, err := loader.LoadFiles(chartFiles) - require.NoError(t, err) + chart := mustGetChart(t, "test-chart") fakeClient := testutils.NewFakeClientBuilder(t).Build() ctx := context.Background() @@ -70,10 +66,7 @@ func TestInstallOrUpdateChartCreateNew(t *testing.T) { } func TestInstallOrUpdateChartUpdateExisting(t *testing.T) { - chartFiles, err := TraverseChart(testdata, "testdata/test-chart") - require.NoError(t, err) - chart, err := loader.LoadFiles(chartFiles) - require.NoError(t, err) + chart := mustGetChart(t, "test-chart") fakeClient := testutils.NewFakeClientBuilder(t, dummyDeployment).Build() ctx := context.Background() diff --git a/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/Chart.yaml b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/Chart.yaml new file mode 100644 index 0000000000..10da20c4ea --- /dev/null +++ b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: rhacs-vertical-pod-autoscaler +description: A Helm chart for Kubernetes +type: application +version: 0.0.0 +appVersion: "0.0.0" diff --git a/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/proxy-config.yaml b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/proxy-config.yaml new file mode 100644 index 0000000000..cca8114a72 --- /dev/null +++ b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/proxy-config.yaml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: proxy-config + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "rhacs-vpa-recommender" + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} + helm.sh/chart: "{{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_"}}" + meta.helm.sh/release-name: {{ $.Release.Name | quote }} + meta.helm.sh/release-namespace: {{ $.Release.Namespace | quote }} +data: + server.js: | + + /** + * On openshift, prometheus has a kube-rbac-proxy that requires a token to access the metrics endpoint. + * VerticalPodAutoscaler recommenders don't support a token-based authentication. + * This proxy is a simple http server that forwards requests to the prometheus metrics endpoint with the token. + **/ + + const http = require('http'); + const https = require('https'); + const fs = require('fs'); + const os = require('os'); + + const TOKEN_PATH = process.env.TOKEN_PATH || '/var/run/secrets/kubernetes.io/serviceaccount/token' + const UPSTREAM_PROTOCOL = process.env.UPSTREAM_PROTOCOL || 'http' + const UPSTREAM_HOST = process.env.UPSTREAM_HOST + const UPSTREAM_PORT = process.env.UPSTREAM_PORT + const LISTEN_PORT = process.env.LISTEN_PORT || "9000" + + function onRequest(req, res) { + const secret = fs.readFileSync(TOKEN_PATH, 'utf8'); + const options = { + hostname: UPSTREAM_HOST, + port: UPSTREAM_PORT, + path: req.url, + method: req.method, + protocol: UPSTREAM_PROTOCOL + ':', + headers: { + ...req.headers, + 'authorization': 'Bearer ' + secret, + 'host': UPSTREAM_HOST + }, + changeOrigin: true + }; + const fn = options.protocol === 'https:' ? https : http; + const proxy = fn.request(options, function (r) { + res.writeHead(r.statusCode, r.headers); + r.pipe(res, {end: true}); + }); + req.pipe(proxy, {end: true}); + } + + http.createServer(onRequest).listen(LISTEN_PORT, '0.0.0.0'); + console.log('Proxying on port 9000') diff --git a/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/recommender-deployment.yaml b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/recommender-deployment.yaml new file mode 100644 index 0000000000..55d6a9d58d --- /dev/null +++ b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/recommender-deployment.yaml @@ -0,0 +1,272 @@ +{{/*volumeMounts: +- name: service-ca-bundle + mountPath: /etc/pki/ca-trust/extracted/pem/ + +volumes: +- name: service-ca-bundle + configMap: + name: service-ca-bundle + items: + - key: ca-bundle.crt*/}} + +{{- range .Values.recommenders }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: "rhacs-vpa-recommender" + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} + helm.sh/chart: "{{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_"}}" + meta.helm.sh/release-name: {{ $.Release.Name | quote }} + meta.helm.sh/release-namespace: {{ $.Release.Namespace | quote }} + name: {{ .name | quote }} + namespace: {{ $.Release.Namespace | quote }} +spec: + replicas: 1 + selector: + matchLabels: + app: vpa-recommender + vertical-pod-autoscaler: {{ .name | quote }} + template: + metadata: + labels: + app: vpa-recommender + vertical-pod-autoscaler: {{ .name | quote }} + spec: + {{- if .nodeSelector }} + {{ toYaml .nodeSelector | nindent 6 }} + {{ end }} + serviceAccountName: rhacs-vpa-recommender + terminationGracePeriodSeconds: 30 + {{- if .tolerations }} + {{ toYaml .tolerations | nindent 6 }} + {{ end }} + {{- if .priorityClassName }} + {{ toYaml .priorityClassName | nindent 6 }} + {{ end }} + {{- if .imagePullSecrets }} + imagePullSecrets: {{ toYaml .imagePullSecrets | nindent 8 }} + {{ end }} + volumes: + - name: service-ca-bundle + configMap: + optional: true + name: openshift-service-ca.crt + containers: + - name: recommender + image: {{ .image | quote }} + imagePullPolicy: Always + volumeMounts: + - name: service-ca-bundle + mountPath: /etc/pki/ca-trust/extracted/pem/openshift-service-ca.crt + subPath: service-ca.crt + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: [ ALL ] + {{- if .resources }} + resources: {{ toYaml .resources | nindent 12 }} + {{ end }} + command: + - recommender + args: + - --recommender-name={{ .name }} + - --logtostderr + {{- if .logLevel }} + - --v={{ .logLevel }} + {{ end }} + {{- if .recommendationMarginFraction }} + - --recommendation-margin-fraction={{ .recommendationMarginFraction }} + {{ end }} + {{- if .podRecommendationMinCpuMillicores }} + - --pod-recommendation-min-cpu-millicores={{ .podRecommendationMinCpuMillicores }} + {{ end }} + {{- if .podRecommendationMinMemoryMb }} + - --pod-recommendation-min-memory-mb={{ .podRecommendationMinMemoryMb }} + {{ end }} + {{- if .targetCpuPercentile }} + - --target-cpu-percentile={{ .targetCpuPercentile }} + {{ end }} + {{- if .recommendationLowerBoundCpuPercentile }} + - --recommendation-lower-bound-cpu-percentile={{ .recommendationLowerBoundCpuPercentile }} + {{ end }} + {{- if .recommendationUpperBoundCpuPercentile }} + - --recommendation-upper-bound-cpu-percentile={{ .recommendationUpperBoundCpuPercentile }} + {{ end }} + {{- if .targetMemoryPercentile }} + - --target-memory-percentile={{ .targetMemoryPercentile }} + {{ end }} + {{- if .recommendationLowerBoundMemoryPercentile }} + - --recommendation-lower-bound-memory-percentile={{ .recommendationLowerBoundMemoryPercentile }} + {{ end }} + {{- if .recommendationUpperBoundMemoryPercentile }} + - --recommendation-upper-bound-memory-percentile={{ .recommendationUpperBoundMemoryPercentile }} + {{ end }} + {{- if .checkpointsTimeout }} + - --checkpoints-timeout={{ .checkpointsTimeout }} + {{ end }} + {{- if .minCheckpoints }} + - --min-checkpoints={{ .minCheckpoints }} + {{ end }} + {{- if .memorySaver }} + - --memory-saver + {{ end }} + {{- if .recommenderInterval }} + - --recommender-interval={{ .recommenderInterval }} + {{ end }} + {{- if .checkpointsGcInterval }} + - --checkpoints-gc-interval={{ .checkpointsGcInterval }} + {{ end }} + {{- /* If we're using the proxy, point to the sidecar */}} + {{- if and .prometheusAddress .useProxy }} + - --prometheus-address=http://localhost:9000 + {{- else if .prometheusAddress }} + - --prometheus-address={{ .address }} + {{ end }} + {{- if .prometheusCadvisorJobName }} + - --prometheus-cadvisor-job-name={{ .prometheusCadvisorJobName }} + {{ end }} + {{- if .address }} + - --address={{ .address }} + {{ end }} + {{- if .kubeconfig }} + - --kubeconfig={{ .kubeconfig }} + {{ end }} + {{- if .kubeApiQps }} + - --kube-api-qps={{ .kubeApiQps }} + {{ end }} + {{- if .kubeApiBurst }} + - --kube-api-burst={{ .kubeApiBurst }} + {{ end }} + {{- if .storage }} + - --storage={{ .storage }} + {{ end }} + {{- if .historyLength }} + - --history-length={{ .historyLength }} + {{ end }} + {{- if .historyResolution }} + - --history-resolution={{ .historyResolution }} + {{ end }} + {{- if .prometheusQueryTimeout }} + - --prometheus-query-timeout={{ .prometheusQueryTimeout }} + {{ end }} + {{- if .podLabelPrefix }} + - --pod-label-prefix={{ .podLabelPrefix }} + {{ end }} + {{- if .metricForPodLabels }} + - --metric-for-pod-labels={{ .metricForPodLabels }} + {{ end }} + {{- if .podNamespaceLabel }} + - --pod-namespace-label={{ .podNamespaceLabel }} + {{ end }} + {{- if .podNameLabel }} + - --pod-name-label={{ .podNameLabel }} + {{ end }} + {{- if .containerNamespaceLabel }} + - --container-namespace-label={{ .containerNamespaceLabel }} + {{ end }} + {{- if .containerPodNameLabel }} + - --container-pod-name-label={{ .containerPodNameLabel }} + {{ end }} + {{- if .containerNameLabel }} + - --container-name-label={{ .containerNameLabel }} + {{ end }} + {{- if .vpaObjectNamespace }} + - --vpa-object-namespace={{ .vpaObjectNamespace }} + {{ end }} + {{- if .memoryAggregationInterval }} + - --memory-aggregation-interval={{ .memoryAggregationInterval }} + {{ end }} + {{- if .memoryAggregationIntervalCount }} + - --memory-aggregation-interval-count={{ .memoryAggregationIntervalCount }} + {{ end }} + {{- if .memoryHistogramDecayHalfLife }} + - --memory-histogram-decay-half-life={{ .memoryHistogramDecayHalfLife }} + {{ end }} + {{- if .cpuHistogramDecayHalfLife }} + - --cpu-histogram-decay-half-life={{ .cpuHistogramDecayHalfLife }} + {{ end }} + {{- if .cpuIntegerPostProcessorEnabled }} + - --cpu-integer-post-processor-enabled={{ .cpuIntegerPostProcessorEnabled }} + {{ end }} + {{- if .useExternalMetrics }} + - --use-external-metrics={{ .useExternalMetrics }} + {{ end }} + {{- if .externalMetricsCpuMetric }} + - --external-metrics-cpu-metric={{ .externalMetricsCpuMetric }} + {{ end }} + {{- if .externalMetricsMemoryMetric }} + - --external-metrics-memory-metric={{ .externalMetricsMemoryMetric }} + {{ end }} + {{- if .oomBumpUpRatio }} + - --oom-bump-up-ratio={{ .oomBumpUpRatio }} + {{ end }} + {{- if .oomMinBumpUpBytes }} + - --oom-min-bump-up-bytes={{ .oomMinBumpUpBytes }} + {{ end }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + {{- if and .prometheusAddress .useProxy }} + {{- $promUrl := urlParse .prometheusAddress }} + {{- $promProtocol := index $promUrl "scheme" }} + {{- $promHostPort := split ":" (index $promUrl "host") }} + {{- $promHost := $promHostPort._0 }} + {{- $promPort := $promHostPort._1 }} + {{- $_ := required "prometheusAddress must be fully defined ://host:port" $promProtocol }} + {{- $_ := required "prometheusAddress must be fully defined ://host:port" $promHost }} + {{- $_ := required "prometheusAddress must be fully defined ://host:port" $promPort }} + - name: proxy + image: {{ .proxyImage | default "registry.access.redhat.com/ubi8/nodejs-18:latest" | quote }} + imagePullPolicy: Always + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: [ ALL ] + env: + - name: UPSTREAM_HOST + value: {{ $promHost | quote }} + - name: UPSTREAM_PORT + value: {{ $promPort | quote }} + - name: UPSTREAM_PROTOCOL + value: {{ $promProtocol | quote }} + - name: LISTEN_PORT + value: "9000" + - name: TOKEN_PATH + value: "/var/run/secrets/kubernetes.io/serviceaccount/token" + - name: NODE_EXTRA_CA_CERTS + value: /service-ca-certs/openshift-service-ca.crt + command: [ node ] + args: [ '/app/server.js' ] + volumeMounts: + - mountPath: /app/server.js + name: proxy-config + subPath: server.js + - mountPath: /service-ca-certs/openshift-service-ca.crt + name: service-ca-bundle + subPath: service-ca.crt + {{- end }} + {{- if and .prometheusAddress .useProxy }} + volumes: + - configMap: + defaultMode: 420 + name: openshift-service-ca.crt + optional: true + name: service-ca-bundle + - name: proxy-config + configMap: + name: proxy-config + {{ end }} +--- +{{ end }} diff --git a/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/recommender-rbac.yaml b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/recommender-rbac.yaml new file mode 100644 index 0000000000..6d5ea7ece5 --- /dev/null +++ b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/templates/recommender-rbac.yaml @@ -0,0 +1,105 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rhacs-vpa-recommender + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "rhacs-vpa-recommender" + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} + helm.sh/chart: "{{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_"}}" + meta.helm.sh/release-name: {{ $.Release.Name | quote }} + meta.helm.sh/release-namespace: {{ $.Release.Namespace | quote }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rhacs-vpa-recommender + labels: + app.kubernetes.io/name: "rhacs-vpa-recommender" + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} + helm.sh/chart: "{{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_"}}" + meta.helm.sh/release-name: {{ $.Release.Name | quote }} + meta.helm.sh/release-namespace: {{ $.Release.Namespace | quote }} +rules: + - apiGroups: ["autoscaling.k8s.io"] + resources: ["verticalpodautoscalers"] + verbs: ["get", "list", "watch", "patch"] + + - apiGroups: ["autoscaling.k8s.io"] + resources: ["verticalpodautoscalers/status"] + verbs: ["get", "list", "watch", "patch"] + + - apiGroups: ["autoscaling.k8s.io"] + resources: ["verticalpodautoscalercheckpoints"] + verbs: ["get", "list", "watch", "patch", "create"] + + - apiGroups: ["apps.openshift.io"] + resources: ["scale", "deploymentconfigs"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "deployments", "daemonsets"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["*"] + resources: ["scale"] + verbs: ["get", "watch"] + + - apiGroups: [""] + resources: ["replicationcontrollers", "pods", "nodes", "limitranges"] + verbs: ["get", "list", "watch"] + + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create"] + + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list"] + + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rhacs-vpa-recommender + labels: + app.kubernetes.io/name: "rhacs-vpa-recommender" + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} + helm.sh/chart: "{{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_"}}" + meta.helm.sh/release-name: {{ $.Release.Name | quote }} + meta.helm.sh/release-namespace: {{ $.Release.Namespace | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rhacs-vpa-recommender +subjects: + - kind: ServiceAccount + name: rhacs-vpa-recommender + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rhacs-vpa-recommender-cluster-monitoring-view +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-monitoring-view +subjects: + - kind: ServiceAccount + name: rhacs-vpa-recommender + namespace: rhacs-vertical-pod-autoscaler +--- diff --git a/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/values.yaml b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/values.yaml new file mode 100644 index 0000000000..4105bd8488 --- /dev/null +++ b/fleetshard/pkg/central/charts/data/rhacs-vertical-pod-autoscaler/values.yaml @@ -0,0 +1 @@ +recommenders: [] diff --git a/fleetshard/pkg/central/charts/generic.go b/fleetshard/pkg/central/charts/generic.go new file mode 100644 index 0000000000..9dc9addaf1 --- /dev/null +++ b/fleetshard/pkg/central/charts/generic.go @@ -0,0 +1,354 @@ +package charts + +import ( + "context" + "errors" + "fmt" + + "github.com/golang/glog" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +const ( + labelManagedBy = "app.kubernetes.io/managed-by" + labelHelmReleaseName = "meta.helm.sh/release-name" + labelHelmReleaseNamespace = "meta.helm.sh/release-namespace" + labelHelmChart = "helm.sh/chart" + annotationAppliedConfiguration = "rhacs.redhat.com/applied-configuration" +) + +// HelmRecocilerParams contains the parameters required to reconcile a Helm release. +type HelmReconcilerParams struct { + ReleaseName string + Namespace string + ManagerName string + Chart *chart.Chart + Values chartutil.Values + Client ctrlClient.Client + RestMapper meta.RESTMapper + AllowedGVKs []schema.GroupVersionKind + CreateNamespace bool +} + +func validateParams(p HelmReconcilerParams) error { + if p.ReleaseName == "" { + return fmt.Errorf("ReleaseName cannot be empty") + } + if p.Namespace == "" { + return fmt.Errorf("Namespace cannot be empty") + } + if p.ManagerName == "" { + return fmt.Errorf("ManagerName cannot be empty") + } + if p.Chart == nil { + return fmt.Errorf("Chart cannot be nil") + } + if p.Client == nil { + return fmt.Errorf("Client cannot be nil") + } + if p.RestMapper == nil { + return fmt.Errorf("RestMapper cannot be nil") + } + if len(p.AllowedGVKs) == 0 { + return fmt.Errorf("AllowedGVKs cannot be empty") + } + return nil +} + +// Reconcile reconciles a Helm release by ensuring that the objects in the Helm Chart are created, updated or garbage-collected in the cluster. +// This is a generic reconciliation method that can be used to reconcile any Helm release programmatically. +// This routine does not create a "helm release secret", but rather will reconcile objects based on the GVKs +// provided in HelmReconcilerParams.AllowedGVKs. It uses ownership labels to track ownership of objects, and will fail +// to update or delete objects that do not have those labels. +func Reconcile(ctx context.Context, p HelmReconcilerParams) error { + + // sanity checks + if err := validateParams(p); err != nil { + return err + } + + if p.CreateNamespace { + if err := ensureNamespaceExists(ctx, p.Client, p.Namespace); err != nil { + return err + } + } + + // Creating a map of allowed GVKs for faster lookup + allowedGvkMap := make(map[schema.GroupVersionKind]struct{}) + for _, gvk := range p.AllowedGVKs { + allowedGvkMap[gvk] = struct{}{} + } + + // Render the Helm chart + renderedObjs, err := RenderToObjects(p.ReleaseName, p.Namespace, p.Chart, p.Values) + if err != nil { + return fmt.Errorf("failed to render objects from chart: %w", err) + } + + // Grouping the rendered objects by GVK + renderedObjsByGVK := make(map[schema.GroupVersionKind][]*unstructured.Unstructured) + for _, renderedObj := range renderedObjs { + gvk := renderedObj.GroupVersionKind() + // Fail if the rendered object GVK is not in the allowed GVKs + if _, ok := allowedGvkMap[gvk]; !ok { + return fmt.Errorf("object %s has unexpected GVK %s", renderedObj.GetName(), gvk.String()) + } + renderedObjsByGVK[gvk] = append(renderedObjsByGVK[gvk], renderedObj) + } + + ownershipLabels := getOwnershipLabels(p.Chart, p.ReleaseName, p.Namespace, p.ManagerName) + + // Reconcile each allowedGVK separately + for allowedGVK := range allowedGvkMap { + renderedObjsForGvk := renderedObjsByGVK[allowedGVK] + if err := reconcileGvk(ctx, p, allowedGVK, renderedObjsForGvk, ownershipLabels); err != nil { + return err + } + } + + return nil + +} + +// ensureNamespaceExists ensures that the namespace with the given name exists in the cluster. +func ensureNamespaceExists(ctx context.Context, cli ctrlClient.Client, name string) error { + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + var existing v1.Namespace + if err := cli.Get(ctx, ctrlClient.ObjectKeyFromObject(ns), &existing); err != nil { + if !k8serrors.IsNotFound(err) { + return fmt.Errorf("failed to get namespace %s: %w", name, err) + } + } else { + if existing.DeletionTimestamp != nil { + return fmt.Errorf("namespace %s is being deleted", name) + } + return nil + } + + if err := cli.Create(ctx, ns); err != nil { + if !k8serrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create namespace %s: %w", name, err) + } + } + return nil +} + +// reconcileGvk will reconcile objects with the given GroupVersionKind. +func reconcileGvk(ctx context.Context, params HelmReconcilerParams, gvk schema.GroupVersionKind, wantObjs []*unstructured.Unstructured, ownershipLabels map[string]string) error { + + restMapping, err := params.RestMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return fmt.Errorf("failed to get rest mapping for %s: %w", gvk.String(), err) + } + + // Checks if the GVK is cluster-scoped or namespaced-scoped + isNamespacedGVK := restMapping.Scope.Name() == meta.RESTScopeNameNamespace + + existingObjs := &unstructured.UnstructuredList{} + + { + // List existing objects of the given GVK + + var listOptions []ctrlClient.ListOption + listOptions = append(listOptions, ctrlClient.MatchingLabels{ + labelManagedBy: params.ManagerName, + labelHelmReleaseNamespace: params.Namespace, + labelHelmReleaseName: params.ReleaseName, + + // Do not include the labelHelmChart, because it includes the chart version. + // For example, "helm.sh/chart": "my-chart-0.0.0" + // If the chart version changes, we still assume that the objects are managed by the release. + }) + + // If the GVK is namespaced, we list objects in the namespace + if isNamespacedGVK { + listOptions = append(listOptions, ctrlClient.InNamespace(params.Namespace)) + } + + existingObjs.SetGroupVersionKind(gvk) + if err := params.Client.List(ctx, existingObjs, listOptions...); err != nil { + return fmt.Errorf("failed to list existing objects of kind %s: %w", gvk.String(), err) + } + } + + // Objects we want + wantNames := sets.NewString() + wantByName := make(map[string]*unstructured.Unstructured) + for _, obj := range wantObjs { + obj := obj + wantNames.Insert(obj.GetName()) + wantByName[obj.GetName()] = obj + } + + // Objects we have + existingNames := sets.NewString() + existingByName := make(map[string]*unstructured.Unstructured) + for _, existingObj := range existingObjs.Items { + existingObj := existingObj + existingNames.Insert(existingObj.GetName()) + existingByName[existingObj.GetName()] = &existingObj + } + + // Objects to delete + namesToDelete := existingNames.Difference(wantNames) + + // Delete phase + for _, nameToDelete := range namesToDelete.List() { + objToDelete := existingByName[nameToDelete] + + glog.Infof("deleting object %q of type %v", nameToDelete, gvk) + + // Do not delete object that are not managed by us + if err := checkOwnership(objToDelete, params.ManagerName, params.ReleaseName, params.Namespace); err != nil { + return fmt.Errorf("cannot delete object %q of type %v: %w", nameToDelete, gvk, err) + } + + // Do not delete object that is already being deleted + if objToDelete.GetDeletionTimestamp() != nil { + continue + } + + if err := params.Client.Delete(ctx, objToDelete); err != nil { + if !k8serrors.IsNotFound(err) { + return fmt.Errorf("failed to delete object %s: %w", nameToDelete, err) + } + } + } + + // Create / Update + for _, wantObj := range wantObjs { + objectName := wantObj.GetName() + + applyLabelsToObject(wantObj, ownershipLabels) + + if isNamespacedGVK { + wantObj.SetNamespace(params.Namespace) + } + + wantManifest, err := yaml.Marshal(wantObj.Object) + if err != nil { + return fmt.Errorf("failed to marshal object %q of type %v: %w", objectName, gvk, err) + } + + { + // Apply the applied-configuration annotation to the object + annotations := wantObj.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[annotationAppliedConfiguration] = string(wantManifest) + wantObj.SetAnnotations(annotations) + } + + if existingObject, alreadyExists := existingByName[objectName]; alreadyExists { + + // Do not update object that are not managed by us + if err := checkOwnership(existingObject, params.ManagerName, params.ReleaseName, params.Namespace); err != nil { + return fmt.Errorf("cannot update object %q of type %v: %w", objectName, gvk, err) + } + + // Do not update object that is being deleted + if existingObject.GetDeletionTimestamp() != nil { + return fmt.Errorf("cannot update object %q of type %v because it is being deleted", objectName, gvk) + } + + if existingObject.GetAnnotations() != nil && existingObject.GetAnnotations()[annotationAppliedConfiguration] == string(wantManifest) { + continue // The object is already up-to-date + } + + if err := params.Client.Update(ctx, wantObj); err != nil { + return fmt.Errorf("failed to update object %q of type %v: %w", objectName, gvk, err) + } + + } else { + // The object doesn't exist, create it + glog.Infof("creating object %q of type %v", objectName, gvk) + + if err := params.Client.Create(ctx, wantObj); err != nil { + if k8serrors.IsAlreadyExists(err) { + return fmt.Errorf("cannot create object %q of type %v because it already exists and is not managed by %q or is not part of release %q", objectName, gvk, params.ManagerName, params.ReleaseName) + } else { + return fmt.Errorf("failed to create object %s: %w", objectName, err) + } + } + } + } + + return nil +} + +// getOwnershipLabels returns the labels that should be applied to objects created by the Helm release. +// The presence of those labels on an object means that the object is owned by the Helm release. +func getOwnershipLabels(chart *chart.Chart, releaseName, releaseNamespace, managerName string) map[string]string { + result := make(map[string]string) + result[labelHelmChart] = fmt.Sprintf("%s-%s", chart.Metadata.Name, chart.Metadata.Version) + result[labelHelmReleaseNamespace] = releaseNamespace + result[labelHelmReleaseName] = releaseName + result[labelManagedBy] = managerName + return result +} + +// checkOwnership checks that a given object is managed by the given Helm release. +func checkOwnership(obj *unstructured.Unstructured, managerName, releaseName, releaseNamespace string) error { + + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + var errs []error + if err := requireValue(labels, labelManagedBy, managerName); err != nil { + errs = append(errs, fmt.Errorf("label validation error: %s", err)) + } + if err := requireValue(labels, labelHelmReleaseName, releaseName); err != nil { + errs = append(errs, fmt.Errorf("label validation error: %s", err)) + } + if err := requireValue(labels, labelHelmReleaseNamespace, releaseNamespace); err != nil { + errs = append(errs, fmt.Errorf("label validation error: %s", err)) + } + + if len(errs) > 0 { + return fmt.Errorf("invalid ownership metadata: %w", errors.Join(errs...)) + } + + return nil + +} + +// requireValue checks that a given key in a map has a specific value. +func requireValue(meta map[string]string, k, v string) error { + actual, ok := meta[k] + if !ok { + return fmt.Errorf("missing key %q: must be set to %q", k, v) + } + if actual != v { + return fmt.Errorf("key %q must be set to %q: current value is %q", k, v, actual) + } + return nil +} + +// applyLabelsToObject applies the given labels to the given object +func applyLabelsToObject(obj *unstructured.Unstructured, labels map[string]string) { + existing := obj.GetLabels() + if existing == nil { + existing = make(map[string]string) + } + for k, v := range labels { + existing[k] = v + } + obj.SetLabels(existing) +} diff --git a/fleetshard/pkg/central/charts/generic_test.go b/fleetshard/pkg/central/charts/generic_test.go new file mode 100644 index 0000000000..e8b34ead56 --- /dev/null +++ b/fleetshard/pkg/central/charts/generic_test.go @@ -0,0 +1,295 @@ +package charts + +import ( + "context" + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "helm.sh/helm/v3/pkg/chartutil" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "testing" + "time" +) + +type fakeRESTMapper struct { + meta.RESTMapper + scopeForGvk map[schema.GroupVersionKind]meta.RESTScope +} + +func (f *fakeRESTMapper) setMappingForGvk(gvk schema.GroupVersionKind, mapping *meta.RESTMapping) { + f.scopeForGvk[gvk] = mapping.Scope +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + scope, ok := f.scopeForGvk[schema.GroupVersionKind{Group: gk.Group, Version: versions[0], Kind: gk.Kind}] + if !ok { + return nil, fmt.Errorf("no mapping found for %s", gk.String()) + } + return &meta.RESTMapping{Scope: scope}, nil +} + +var rm = &fakeRESTMapper{scopeForGvk: map[schema.GroupVersionKind]meta.RESTScope{ + {Group: "apps", Version: "v1", Kind: "Deployment"}: meta.RESTScopeNamespace, + {Group: "", Version: "v1", Kind: "ServiceAccount"}: meta.RESTScopeNamespace, + {Group: "", Version: "v1", Kind: "ConfigMap"}: meta.RESTScopeNamespace, + {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}: meta.RESTScopeRoot, +}} + +func getParams(t *testing.T, cli client.Client) HelmReconcilerParams { + chart := mustGetChart(t, "test-chart-2") + return HelmReconcilerParams{ + ReleaseName: "my-release", + Namespace: "my-namespace", + ManagerName: "test", + Chart: chart, + Values: chartutil.Values{}, + Client: cli, + RestMapper: rm, + AllowedGVKs: []schema.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + {Group: "", Version: "v1", Kind: "ServiceAccount"}, + {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}, + }, + } +} + +func TestReconcile_ShouldCreateNamespacedResources(t *testing.T) { + cli := fake.NewFakeClient() + + params := getParams(t, cli) + params.CreateNamespace = true + err := Reconcile(context.Background(), params) + require.NoError(t, err) + + var sa v1.ServiceAccount + err = cli.Get(context.Background(), client.ObjectKey{Namespace: "my-namespace", Name: "dummy"}, &sa) + require.NoError(t, err) + +} + +func TestReconcile_ShouldDeleteUnwantedNamespacedResources(t *testing.T) { + cli := fake.NewFakeClient() + + params := getParams(t, cli) + params.CreateNamespace = true + params.Values["enabled"] = true + + err := Reconcile(context.Background(), params) + require.NoError(t, err) + + var deployment appsv1.Deployment + err = cli.Get(context.Background(), client.ObjectKey{Namespace: params.Namespace, Name: "dummy"}, &deployment) + require.NoError(t, err) + + params.Values["enabled"] = false + err = Reconcile(context.Background(), params) + require.NoError(t, err) + + err = cli.Get(context.Background(), client.ObjectKey{Namespace: params.Namespace, Name: "dummy"}, &deployment) + require.Error(t, err) + require.Nil(t, client.IgnoreNotFound(err)) + +} + +func TestReconcile_ShouldDeleteUnwantedClusterResources(t *testing.T) { + cli := fake.NewFakeClient() + + params := getParams(t, cli) + params.CreateNamespace = true + params.Values["enabled"] = true + + err := Reconcile(context.Background(), params) + require.NoError(t, err) + + var clusterRole rbacv1.ClusterRole + err = cli.Get(context.Background(), client.ObjectKey{Name: "dummy"}, &clusterRole) + require.NoError(t, err) + + params.Values["enabled"] = false + err = Reconcile(context.Background(), params) + require.NoError(t, err) + + err = cli.Get(context.Background(), client.ObjectKey{Name: "dummy"}, &clusterRole) + require.Error(t, err) + require.Nil(t, client.IgnoreNotFound(err)) + +} + +func TestReconcile_ShouldThrowIfUnregisteredGVK(t *testing.T) { + // The allowed GVK is not present in the params. + // The test-Chart-2 has a "Role" resource that is created + // when .Values.forbidden = true + + cli := fake.NewFakeClient() + + params := getParams(t, cli) + params.Values["forbidden"] = true + + err := Reconcile(context.Background(), params) + require.Error(t, err) + +} + +func TestReconcile_ShouldNotCreateNamespaceByDefault(t *testing.T) { + // The allowed GVK is not present in the params. + // The test-Chart-2 has a "Role" resource that is created + // when .Values.forbidden = true + + cli := fake.NewFakeClient() + + params := getParams(t, cli) + err := Reconcile(context.Background(), params) + require.NoError(t, err) + + var ns v1.Namespace + err = cli.Get(context.Background(), client.ObjectKey{Name: params.Namespace}, &ns) + require.Error(t, err) + +} + +func TestReconcile_ShouldCreateNamespace(t *testing.T) { + // The allowed GVK is not present in the params. + // The test-Chart-2 has a "Role" resource that is created + // when .Values.forbidden = true + + cli := fake.NewFakeClient() + + params := getParams(t, cli) + params.CreateNamespace = true + err := Reconcile(context.Background(), params) + require.NoError(t, err) + + var ns v1.Namespace + err = cli.Get(context.Background(), client.ObjectKey{Name: params.Namespace}, &ns) + require.NoError(t, err) + +} + +func TestReconcile_ShouldFailIfNamespaceDeleting(t *testing.T) { + // The allowed GVK is not present in the params. + // The test-Chart-2 has a "Role" resource that is created + // when .Values.forbidden = true + + cli := fake.NewFakeClient(&v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-namespace", + DeletionTimestamp: &metav1.Time{ + Time: metav1.Now().Add(-1 * time.Hour), + }, + Finalizers: []string{"kubernetes"}, + }, + }) + + params := getParams(t, cli) + params.CreateNamespace = true + require.Error(t, Reconcile(context.Background(), params)) + +} + +func TestReconcile_ShouldApplyOwnershipLabels(t *testing.T) { + cli := fake.NewFakeClient() + + params := getParams(t, cli) + params.CreateNamespace = true + err := Reconcile(context.Background(), params) + require.NoError(t, err) + + var sa v1.ServiceAccount + err = cli.Get(context.Background(), client.ObjectKey{Namespace: params.Namespace, Name: "dummy"}, &sa) + require.NoError(t, err) + assert.Equal(t, params.ReleaseName, sa.Labels[labelHelmReleaseName]) + assert.Equal(t, "test-resource-0.0.0", sa.Labels[labelHelmChart]) + assert.Equal(t, params.Namespace, sa.Labels[labelHelmReleaseNamespace]) + assert.Equal(t, params.ManagerName, sa.Labels[labelManagedBy]) +} + +func TestReconcile_ShouldFailIfManagedResourceExist(t *testing.T) { + cli := fake.NewFakeClient(&v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Namespace: "my-namespace", + Labels: map[string]string{ + "helm.sh/release": "other", + }, + }, + }) + + params := getParams(t, cli) + require.Error(t, Reconcile(context.Background(), params)) +} + +func TestReconcile_ShouldFailIfParamsInvalid(t *testing.T) { + cli := fake.NewFakeClient() + params := getParams(t, cli) + params.Client = nil + require.Error(t, Reconcile(context.Background(), params)) +} + +func TestValidateParams(t *testing.T) { + + cli := fake.NewFakeClient() + + tests := []struct { + name string + params func() HelmReconcilerParams + }{ + { + name: "ReleaseName cannot be empty", + params: func() HelmReconcilerParams { + p := getParams(t, cli) + p.ReleaseName = "" + return p + }, + }, { + name: "Namespace cannot be empty", + params: func() HelmReconcilerParams { + p := getParams(t, cli) + p.Namespace = "" + return p + }, + }, { + name: "ManagerName cannot be empty", + params: func() HelmReconcilerParams { + p := getParams(t, cli) + p.ManagerName = "" + return p + }, + }, { + name: "Chart cannot be nil", + params: func() HelmReconcilerParams { + p := getParams(t, cli) + p.Chart = nil + return p + }, + }, { + name: "Client cannot be nil", + params: func() HelmReconcilerParams { + p := getParams(t, nil) + return p + }, + }, + { + name: "AllowedGVKs cannot be empty", + params: func() HelmReconcilerParams { + p := getParams(t, cli) + p.AllowedGVKs = nil + return p + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateParams(tt.params()) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.name) + }) + } +} diff --git a/fleetshard/pkg/central/charts/testdata/test-chart-2/Chart.yaml b/fleetshard/pkg/central/charts/testdata/test-chart-2/Chart.yaml new file mode 100644 index 0000000000..8e011aaf65 --- /dev/null +++ b/fleetshard/pkg/central/charts/testdata/test-chart-2/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +name: test-resource +description: Testing Helm Chart +type: application +version: 0.0.0 +appVersion: 0.0.0 diff --git a/fleetshard/pkg/central/charts/testdata/test-chart-2/templates/dummy.yaml b/fleetshard/pkg/central/charts/testdata/test-chart-2/templates/dummy.yaml new file mode 100644 index 0000000000..8e18d41326 --- /dev/null +++ b/fleetshard/pkg/central/charts/testdata/test-chart-2/templates/dummy.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dummy +--- +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dummy + labels: + foo: {{ .Values.foo }} +spec: + replicas: 1 +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dummy +rules: [] +--- +{{- end }} +{{- if .Values.forbidden }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: dummy +--- +{{- end }} diff --git a/fleetshard/pkg/central/charts/testdata/test-chart-2/values.yaml b/fleetshard/pkg/central/charts/testdata/test-chart-2/values.yaml new file mode 100644 index 0000000000..b6ea483149 --- /dev/null +++ b/fleetshard/pkg/central/charts/testdata/test-chart-2/values.yaml @@ -0,0 +1 @@ +foo: "" diff --git a/fleetshard/pkg/central/charts/utils_test.go b/fleetshard/pkg/central/charts/utils_test.go new file mode 100644 index 0000000000..b073a9323d --- /dev/null +++ b/fleetshard/pkg/central/charts/utils_test.go @@ -0,0 +1,18 @@ +package charts + +import ( + "fmt" + "github.com/stretchr/testify/require" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "testing" +) + +func mustGetChart(t *testing.T, name string) *chart.Chart { + t.Helper() + chartFiles, err := TraverseChart(testdata, fmt.Sprintf("testdata/%s", name)) + require.NoError(t, err) + chart, err := loader.LoadFiles(chartFiles) + require.NoError(t, err) + return chart +} diff --git a/fleetshard/pkg/runtime/runtime.go b/fleetshard/pkg/runtime/runtime.go index 9cd3379fcf..5b19897eae 100644 --- a/fleetshard/pkg/runtime/runtime.go +++ b/fleetshard/pkg/runtime/runtime.go @@ -61,6 +61,7 @@ type Runtime struct { secretCipher cipher.Cipher encryptionKeyGenerator cipher.KeyGenerator addonService cluster.AddonService + vpaReconciler *vpaReconciler } // NewRuntime creates a new runtime @@ -124,6 +125,7 @@ func NewRuntime(ctx context.Context, config *config.Config, k8sClient ctrlClient secretCipher: secretCipher, // pragma: allowlist secret encryptionKeyGenerator: encryptionKeyGen, addonService: addonService, + vpaReconciler: newVPAReconciler(k8sClient, k8sClient.RESTMapper()), }, nil } @@ -160,14 +162,17 @@ func (r *Runtime) Start() error { } if features.TargetedOperatorUpgrades.Enabled() { - err := r.upgradeOperator(list) - if err != nil { + if err := r.upgradeOperator(list); err != nil { err = errors.Wrapf(err, "Upgrading operator") glog.Error(err) return 0, err } } + if err := r.vpaReconciler.reconcile(ctx, list.VerticalPodAutoscaling); err != nil { + glog.Errorf("failed to reconcile verticalPodAutoscaling: %v", err) + } + // Start for each Central its own reconciler which can be triggered by sending a central to the receive channel. reconciledCentralCountCache = int32(len(list.Items)) logger.InfoChangedInt32(&reconciledCentralCountCache, "Received central count changed: received %d centrals", reconciledCentralCountCache) diff --git a/fleetshard/pkg/runtime/vpa.go b/fleetshard/pkg/runtime/vpa.go new file mode 100644 index 0000000000..58285f0688 --- /dev/null +++ b/fleetshard/pkg/runtime/vpa.go @@ -0,0 +1,100 @@ +package runtime + +import ( + "context" + "encoding/json" + "github.com/stackrox/acs-fleet-manager/fleetshard/pkg/central/charts" + "github.com/stackrox/acs-fleet-manager/internal/dinosaur/pkg/api/private" + "helm.sh/helm/v3/pkg/chart" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// newVPAReconciler creates a new VPA reconciler. +func newVPAReconciler(cli ctrlClient.Client, restMapper meta.RESTMapper) *vpaReconciler { + return &vpaReconciler{ + cli: cli, + restMapper: restMapper, + chart: vpaChart, + } +} + +// vpaReconciler is the reconciler for the VPA chart. +type vpaReconciler struct { + cli ctrlClient.Client + restMapper meta.RESTMapper + chart *chart.Chart +} + +// reconcile runs the reconciliation of the VPA chart. +func (r *vpaReconciler) reconcile(ctx context.Context, config private.VerticalPodAutoscaling) error { + params, err := r.getParamsForConfig(config) + if err != nil { + return err + } + return charts.Reconcile(ctx, params) +} + +// getParamsForConfig returns the parameters for the Helm reconciler for the VPA chart. +func (r *vpaReconciler) getParamsForConfig(config private.VerticalPodAutoscaling) (charts.HelmReconcilerParams, error) { + + jsonBytes, err := json.Marshal(config) + if err != nil { + return charts.HelmReconcilerParams{}, err + } + values := make(map[string]interface{}) + if err := json.Unmarshal(jsonBytes, &values); err != nil { + return charts.HelmReconcilerParams{}, err + } + + return charts.HelmReconcilerParams{ + ReleaseName: "rhacs-vpa", + Namespace: "rhacs-vertical-pod-autoscaler", + ManagerName: "fleetshard", + Chart: r.chart, + Values: values, + Client: r.cli, + RestMapper: r.restMapper, + CreateNamespace: true, + AllowedGVKs: []schema.GroupVersionKind{ + { + Kind: "Deployment", + Group: "apps", + Version: "v1", + }, + { + Kind: "ServiceAccount", + Group: "", + Version: "v1", + }, + { + Kind: "ConfigMap", + Group: "", + Version: "v1", + }, + { + Kind: "ClusterRole", + Group: "rbac.authorization.k8s.io", + Version: "v1", + }, + { + Kind: "ClusterRoleBinding", + Group: "rbac.authorization.k8s.io", + Version: "v1", + }, + }, + }, nil +} + +// vpaChart is the Helm chart for the VPA configuration. +var vpaChart *chart.Chart + +// init initializes the VPA chart. +func init() { + var err error + vpaChart, err = charts.GetChart("rhacs-vertical-pod-autoscaler", nil) + if err != nil { + panic(err) + } +} diff --git a/fleetshard/pkg/runtime/vpa_test.go b/fleetshard/pkg/runtime/vpa_test.go new file mode 100644 index 0000000000..d268fa9a9a --- /dev/null +++ b/fleetshard/pkg/runtime/vpa_test.go @@ -0,0 +1,101 @@ +package runtime + +import ( + "context" + "fmt" + "github.com/stackrox/acs-fleet-manager/internal/dinosaur/pkg/api/private" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "testing" +) + +type fakeRESTMapper struct { + meta.RESTMapper + scopeForGvk map[schema.GroupVersionKind]meta.RESTScope +} + +func (f *fakeRESTMapper) setMappingForGvk(gvk schema.GroupVersionKind, mapping *meta.RESTMapping) { + f.scopeForGvk[gvk] = mapping.Scope +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + scope, ok := f.scopeForGvk[schema.GroupVersionKind{Group: gk.Group, Version: versions[0], Kind: gk.Kind}] + if !ok { + return nil, fmt.Errorf("no mapping found for %s", gk.String()) + } + return &meta.RESTMapping{Scope: scope}, nil +} + +var fakeRestMapper meta.RESTMapper = &fakeRESTMapper{scopeForGvk: map[schema.GroupVersionKind]meta.RESTScope{ + {Group: "apps", Version: "v1", Kind: "Deployment"}: meta.RESTScopeNamespace, + {Group: "", Version: "v1", Kind: "ServiceAccount"}: meta.RESTScopeNamespace, + {Group: "", Version: "v1", Kind: "ConfigMap"}: meta.RESTScopeNamespace, + {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}: meta.RESTScopeRoot, + {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"}: meta.RESTScopeRoot, +}} + +func Test_vpaReconciler_Reconcile(t *testing.T) { + cli := fake.NewFakeClient() + v := newVPAReconciler(cli, fakeRestMapper) + err := v.reconcile(context.Background(), private.VerticalPodAutoscaling{ + Recommenders: []private.VpaRecommenderConfig{ + { + Name: "recommender-1", + Image: "image-1", + Resources: private.ResourceRequirements{ + Requests: map[string]string{ + "cpu": "100m", + "memory": "100Mi", + }, + Limits: map[string]string{ + "cpu": "100m", + "memory": "100Mi", + }, + }, + RecommendationMarginFraction: 0.3, + CpuHistogramDecayHalfLife: "1h", + }, + }, + }) + + require.NoError(t, err) + + var deployments appsv1.DeploymentList + err = cli.List(context.Background(), &deployments, client.InNamespace("rhacs-vertical-pod-autoscaler")) + require.NoError(t, err) + assert.Len(t, deployments.Items, 1) + assert.Equal(t, "recommender-1", deployments.Items[0].Name) + require.Len(t, deployments.Items[0].Spec.Template.Spec.Containers, 1) + assert.Equal(t, "image-1", deployments.Items[0].Spec.Template.Spec.Containers[0].Image) + + hasArg := func(value string) { + assert.Contains(t, deployments.Items[0].Spec.Template.Spec.Containers[0].Args, value) + } + + hasArg("--recommendation-margin-fraction=0.3") + hasArg("--cpu-histogram-decay-half-life=1h") + + // check resources + assert.True(t, deployments.Items[0].Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(resource.MustParse("100m"))) + + var sa v1.ServiceAccount + err = cli.Get(context.Background(), client.ObjectKey{Namespace: "rhacs-vertical-pod-autoscaler", Name: "rhacs-vpa-recommender"}, &sa) + require.NoError(t, err) + + var clusterRole rbacv1.ClusterRole + err = cli.Get(context.Background(), client.ObjectKey{Name: "rhacs-vpa-recommender"}, &clusterRole) + require.NoError(t, err) + + var clusterRoleBinding rbacv1.ClusterRoleBinding + err = cli.Get(context.Background(), client.ObjectKey{Name: "rhacs-vpa-recommender"}, &clusterRoleBinding) + require.NoError(t, err) + +} diff --git a/internal/dinosaur/pkg/api/private/api/openapi.yaml b/internal/dinosaur/pkg/api/private/api/openapi.yaml index 87301baf16..1b87a0371b 100644 --- a/internal/dinosaur/pkg/api/private/api/openapi.yaml +++ b/internal/dinosaur/pkg/api/private/api/openapi.yaml @@ -332,6 +332,184 @@ components: - $ref: '#/components/schemas/ListReference' - $ref: '#/components/schemas/ManagedCentralList_allOf' description: A list of ManagedCentral + VerticalPodAutoscaling: + properties: + recommenders: + items: + $ref: '#/components/schemas/VPARecommenderConfig' + type: array + type: object + LocalObjectReference: + properties: + name: + type: string + type: object + ResourceList: + additionalProperties: + type: string + type: object + ResourceRequirements: + properties: + requests: + additionalProperties: + type: string + type: object + limits: + additionalProperties: + type: string + type: object + type: object + Toleration: + properties: + key: + type: string + operator: + enum: + - Exists + - Equal + type: string + value: + type: string + effect: + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + tolerationSeconds: + format: int64 + nullable: true + type: integer + type: object + PodNodeSelector: + additionalProperties: + type: string + type: object + VPARecommenderConfig: + properties: + name: + type: string + image: + type: string + imagePullSecrets: + items: + $ref: '#/components/schemas/LocalObjectReference' + type: array + resources: + $ref: '#/components/schemas/ResourceRequirements' + recommendationMarginFraction: + format: float + type: number + podRecommendationMinCpuMillicores: + format: float + type: number + podRecommendationMinMemoryMb: + format: float + type: number + targetCpuPercentile: + format: float + type: number + recommendationLowerBoundCpuPercentile: + format: float + type: number + recommendationUpperBoundCpuPercentile: + format: float + type: number + targetMemoryPercentile: + format: float + type: number + recommendationLowerBoundMemoryPercentile: + format: float + type: number + recommendationUpperBoundMemoryPercentile: + format: float + type: number + checkpointsTimeout: + type: string + minCheckpoints: + type: integer + memorySaver: + type: boolean + recommenderInterval: + type: string + checkpointsGcInterval: + type: string + prometheusAddress: + type: string + prometheusCadvisorJobName: + type: string + address: + type: string + kubeconfig: + type: string + kubeApiQps: + format: float + type: number + kubeApiBurst: + type: integer + storage: + type: string + historyLength: + type: string + historyResolution: + type: string + prometheusQueryTimeout: + type: string + podLabelPrefix: + type: string + metricForPodLabels: + type: string + podNamespaceLabel: + type: string + podNameLabel: + type: string + containerNamespaceLabel: + type: string + containerPodNameLabel: + type: string + containerNameLabel: + type: string + vpaObjectNamespace: + type: string + memoryAggregationInterval: + type: string + memoryAggregationIntervalCount: + type: integer + memoryHistogramDecayHalfLife: + type: string + cpuHistogramDecayHalfLife: + type: string + cpuIntegerPostProcessorEnabled: + type: boolean + useExternalMetrics: + type: boolean + externalMetricsCpuMetric: + type: string + externalMetricsMemoryMetric: + type: string + oomBumpUpRatio: + format: float64 + type: number + oomMinBumpUpBytes: + format: float64 + type: number + tolerations: + items: + $ref: '#/components/schemas/Toleration' + type: array + nodeSelector: + additionalProperties: + type: string + type: object + useProxy: + type: boolean + proxyImage: + type: string + logLevel: + type: number + required: + - name + type: object RHACSOperatorConfigs: properties: CrdUrls: @@ -571,6 +749,8 @@ components: type: array rhacs_operators: $ref: '#/components/schemas/RHACSOperatorConfigs' + verticalPodAutoscaling: + $ref: '#/components/schemas/VerticalPodAutoscaling' DataPlaneClusterUpdateStatusRequest_addons: example: packageImage: packageImage diff --git a/internal/dinosaur/pkg/api/private/model_local_object_reference.go b/internal/dinosaur/pkg/api/private/model_local_object_reference.go new file mode 100644 index 0000000000..777baa4267 --- /dev/null +++ b/internal/dinosaur/pkg/api/private/model_local_object_reference.go @@ -0,0 +1,16 @@ +/* + * Red Hat Advanced Cluster Security Service Fleet Manager + * + * Red Hat Advanced Cluster Security (RHACS) Service Fleet Manager APIs that are used by internal services e.g fleetshard operators. + * + * API version: 1.4.0 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech). DO NOT EDIT. +package private + +// LocalObjectReference struct for LocalObjectReference +type LocalObjectReference struct { + Name string `json:"name,omitempty"` +} diff --git a/internal/dinosaur/pkg/api/private/model_managed_central_list.go b/internal/dinosaur/pkg/api/private/model_managed_central_list.go index 22c9ea3e67..2493d52c17 100644 --- a/internal/dinosaur/pkg/api/private/model_managed_central_list.go +++ b/internal/dinosaur/pkg/api/private/model_managed_central_list.go @@ -12,7 +12,8 @@ package private // ManagedCentralList A list of ManagedCentral type ManagedCentralList struct { - Kind string `json:"kind"` - Items []ManagedCentral `json:"items"` - RhacsOperators RhacsOperatorConfigs `json:"rhacs_operators,omitempty"` + Kind string `json:"kind"` + Items []ManagedCentral `json:"items"` + RhacsOperators RhacsOperatorConfigs `json:"rhacs_operators,omitempty"` + VerticalPodAutoscaling VerticalPodAutoscaling `json:"verticalPodAutoscaling,omitempty"` } diff --git a/internal/dinosaur/pkg/api/private/model_resource_requirements.go b/internal/dinosaur/pkg/api/private/model_resource_requirements.go new file mode 100644 index 0000000000..29e48be8ff --- /dev/null +++ b/internal/dinosaur/pkg/api/private/model_resource_requirements.go @@ -0,0 +1,17 @@ +/* + * Red Hat Advanced Cluster Security Service Fleet Manager + * + * Red Hat Advanced Cluster Security (RHACS) Service Fleet Manager APIs that are used by internal services e.g fleetshard operators. + * + * API version: 1.4.0 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech). DO NOT EDIT. +package private + +// ResourceRequirements struct for ResourceRequirements +type ResourceRequirements struct { + Requests map[string]string `json:"requests,omitempty"` + Limits map[string]string `json:"limits,omitempty"` +} diff --git a/internal/dinosaur/pkg/api/private/model_toleration.go b/internal/dinosaur/pkg/api/private/model_toleration.go new file mode 100644 index 0000000000..5feaa31fdc --- /dev/null +++ b/internal/dinosaur/pkg/api/private/model_toleration.go @@ -0,0 +1,20 @@ +/* + * Red Hat Advanced Cluster Security Service Fleet Manager + * + * Red Hat Advanced Cluster Security (RHACS) Service Fleet Manager APIs that are used by internal services e.g fleetshard operators. + * + * API version: 1.4.0 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech). DO NOT EDIT. +package private + +// Toleration struct for Toleration +type Toleration struct { + Key string `json:"key,omitempty"` + Operator string `json:"operator,omitempty"` + Value string `json:"value,omitempty"` + Effect string `json:"effect,omitempty"` + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` +} diff --git a/internal/dinosaur/pkg/api/private/model_vertical_pod_autoscaling.go b/internal/dinosaur/pkg/api/private/model_vertical_pod_autoscaling.go new file mode 100644 index 0000000000..b64a6936d8 --- /dev/null +++ b/internal/dinosaur/pkg/api/private/model_vertical_pod_autoscaling.go @@ -0,0 +1,16 @@ +/* + * Red Hat Advanced Cluster Security Service Fleet Manager + * + * Red Hat Advanced Cluster Security (RHACS) Service Fleet Manager APIs that are used by internal services e.g fleetshard operators. + * + * API version: 1.4.0 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech). DO NOT EDIT. +package private + +// VerticalPodAutoscaling struct for VerticalPodAutoscaling +type VerticalPodAutoscaling struct { + Recommenders []VpaRecommenderConfig `json:"recommenders,omitempty"` +} diff --git a/internal/dinosaur/pkg/api/private/model_vpa_recommender_config.go b/internal/dinosaur/pkg/api/private/model_vpa_recommender_config.go new file mode 100644 index 0000000000..b54419b363 --- /dev/null +++ b/internal/dinosaur/pkg/api/private/model_vpa_recommender_config.go @@ -0,0 +1,66 @@ +/* + * Red Hat Advanced Cluster Security Service Fleet Manager + * + * Red Hat Advanced Cluster Security (RHACS) Service Fleet Manager APIs that are used by internal services e.g fleetshard operators. + * + * API version: 1.4.0 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech). DO NOT EDIT. +package private + +// VpaRecommenderConfig struct for VpaRecommenderConfig +type VpaRecommenderConfig struct { + Name string `json:"name"` + Image string `json:"image,omitempty"` + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + Resources ResourceRequirements `json:"resources,omitempty"` + RecommendationMarginFraction float32 `json:"recommendationMarginFraction,omitempty"` + PodRecommendationMinCpuMillicores float32 `json:"podRecommendationMinCpuMillicores,omitempty"` + PodRecommendationMinMemoryMb float32 `json:"podRecommendationMinMemoryMb,omitempty"` + TargetCpuPercentile float32 `json:"targetCpuPercentile,omitempty"` + RecommendationLowerBoundCpuPercentile float32 `json:"recommendationLowerBoundCpuPercentile,omitempty"` + RecommendationUpperBoundCpuPercentile float32 `json:"recommendationUpperBoundCpuPercentile,omitempty"` + TargetMemoryPercentile float32 `json:"targetMemoryPercentile,omitempty"` + RecommendationLowerBoundMemoryPercentile float32 `json:"recommendationLowerBoundMemoryPercentile,omitempty"` + RecommendationUpperBoundMemoryPercentile float32 `json:"recommendationUpperBoundMemoryPercentile,omitempty"` + CheckpointsTimeout string `json:"checkpointsTimeout,omitempty"` + MinCheckpoints int32 `json:"minCheckpoints,omitempty"` + MemorySaver bool `json:"memorySaver,omitempty"` + RecommenderInterval string `json:"recommenderInterval,omitempty"` + CheckpointsGcInterval string `json:"checkpointsGcInterval,omitempty"` + PrometheusAddress string `json:"prometheusAddress,omitempty"` + PrometheusCadvisorJobName string `json:"prometheusCadvisorJobName,omitempty"` + Address string `json:"address,omitempty"` + Kubeconfig string `json:"kubeconfig,omitempty"` + KubeApiQps float32 `json:"kubeApiQps,omitempty"` + KubeApiBurst int32 `json:"kubeApiBurst,omitempty"` + Storage string `json:"storage,omitempty"` + HistoryLength string `json:"historyLength,omitempty"` + HistoryResolution string `json:"historyResolution,omitempty"` + PrometheusQueryTimeout string `json:"prometheusQueryTimeout,omitempty"` + PodLabelPrefix string `json:"podLabelPrefix,omitempty"` + MetricForPodLabels string `json:"metricForPodLabels,omitempty"` + PodNamespaceLabel string `json:"podNamespaceLabel,omitempty"` + PodNameLabel string `json:"podNameLabel,omitempty"` + ContainerNamespaceLabel string `json:"containerNamespaceLabel,omitempty"` + ContainerPodNameLabel string `json:"containerPodNameLabel,omitempty"` + ContainerNameLabel string `json:"containerNameLabel,omitempty"` + VpaObjectNamespace string `json:"vpaObjectNamespace,omitempty"` + MemoryAggregationInterval string `json:"memoryAggregationInterval,omitempty"` + MemoryAggregationIntervalCount int32 `json:"memoryAggregationIntervalCount,omitempty"` + MemoryHistogramDecayHalfLife string `json:"memoryHistogramDecayHalfLife,omitempty"` + CpuHistogramDecayHalfLife string `json:"cpuHistogramDecayHalfLife,omitempty"` + CpuIntegerPostProcessorEnabled bool `json:"cpuIntegerPostProcessorEnabled,omitempty"` + UseExternalMetrics bool `json:"useExternalMetrics,omitempty"` + ExternalMetricsCpuMetric string `json:"externalMetricsCpuMetric,omitempty"` + ExternalMetricsMemoryMetric string `json:"externalMetricsMemoryMetric,omitempty"` + OomBumpUpRatio float32 `json:"oomBumpUpRatio,omitempty"` + OomMinBumpUpBytes float32 `json:"oomMinBumpUpBytes,omitempty"` + Tolerations []Toleration `json:"tolerations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + UseProxy bool `json:"useProxy,omitempty"` + ProxyImage string `json:"proxyImage,omitempty"` + LogLevel float32 `json:"logLevel,omitempty"` +} diff --git a/internal/dinosaur/pkg/gitops/config.go b/internal/dinosaur/pkg/gitops/config.go index 1adf2d82e0..1cb69a60a4 100644 --- a/internal/dinosaur/pkg/gitops/config.go +++ b/internal/dinosaur/pkg/gitops/config.go @@ -3,6 +3,7 @@ package gitops import ( "fmt" + "github.com/stackrox/acs-fleet-manager/internal/dinosaur/pkg/api/private" "github.com/stackrox/acs-fleet-manager/fleetshard/pkg/central/operator" "k8s.io/apimachinery/pkg/util/validation/field" @@ -10,10 +11,11 @@ import ( // Config represents the gitops configuration type Config struct { - TenantResources TenantResourceConfig `json:"tenantResources"` - Centrals CentralsConfig `json:"centrals"` - RHACSOperators operator.OperatorConfigs `json:"rhacsOperators"` - DataPlaneClusters []DataPlaneClusterConfig `json:"dataPlaneClusters"` + TenantResources TenantResourceConfig `json:"tenantResources"` + Centrals CentralsConfig `json:"centrals"` + RHACSOperators operator.OperatorConfigs `json:"rhacsOperators"` + DataPlaneClusters []DataPlaneClusterConfig `json:"dataPlaneClusters"` + VerticalPodAutoscaling private.VerticalPodAutoscaling `json:"verticalPodAutoscaling"` } // AuthProviderAddition represents tenant's additional auth provider gitops configuration @@ -114,6 +116,7 @@ func ValidateConfig(config Config) field.ErrorList { errs = append(errs, validateTenantResourcesConfig(field.NewPath("tenantResources"), config.TenantResources)...) errs = append(errs, operator.Validate(field.NewPath("rhacsOperators"), config.RHACSOperators)...) errs = append(errs, validateDataPlaneClusterConfigs(field.NewPath("dataPlaneClusters"), config.DataPlaneClusters)...) + errs = append(errs, validateVpaConfig(field.NewPath("verticalPodAutoscaling"), &config.VerticalPodAutoscaling)...) return errs } diff --git a/internal/dinosaur/pkg/gitops/vpa.go b/internal/dinosaur/pkg/gitops/vpa.go new file mode 100644 index 0000000000..edddb27b84 --- /dev/null +++ b/internal/dinosaur/pkg/gitops/vpa.go @@ -0,0 +1,189 @@ +package gitops + +import ( + "github.com/prometheus/common/model" + "github.com/stackrox/acs-fleet-manager/internal/dinosaur/pkg/api/private" + "k8s.io/apimachinery/pkg/api/resource" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/sets" + validation2 "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "strings" + "time" +) + +func validateVpaConfig(path *field.Path, vpaConfig *private.VerticalPodAutoscaling) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateVpaRecommenders(path.Child("recommenders"), vpaConfig.Recommenders)...) + return allErrs +} + +func validateVpaRecommenders(path *field.Path, recommenders []private.VpaRecommenderConfig) field.ErrorList { + allErrs := field.ErrorList{} + seenNames := sets.NewString() + for i, recommender := range recommenders { + recommenderPath := path.Index(i) + if seenNames.Has(recommender.Name) { + allErrs = append(allErrs, field.Duplicate(recommenderPath.Child("name"), recommender.Name)) + } + seenNames.Insert(recommender.Name) + allErrs = append(allErrs, validateVpaRecommenderConfig(recommenderPath, &recommender)...) + } + return allErrs +} + +func validateVpaRecommenderConfig(path *field.Path, recommender *private.VpaRecommenderConfig) field.ErrorList { + allErrs := field.ErrorList{} + if recommender.Name == "" { + allErrs = append(allErrs, field.Required(path.Child("name"), "name must be specified")) + } else { + if errs := apimachineryvalidation.NameIsDNSSubdomain(recommender.Name, false); len(errs) > 0 { + allErrs = append(allErrs, field.Invalid(path.Child("name"), recommender.Name, "invalid name: "+errs[0])) + } + } + if recommender.Image == "" { + allErrs = append(allErrs, field.Required(path.Child("image"), "image must be specified")) + } + if recommender.RecommendationMarginFraction < 0 || recommender.RecommendationMarginFraction > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("recommendationMarginFraction"), recommender.RecommendationMarginFraction, "must be between 0 and 1")) + } + if recommender.PodRecommendationMinCpuMillicores < 0 { + allErrs = append(allErrs, field.Invalid(path.Child("podRecommendationMinCpuMillicores"), recommender.PodRecommendationMinCpuMillicores, "must be non-negative")) + } + if recommender.PodRecommendationMinMemoryMb < 0 { + allErrs = append(allErrs, field.Invalid(path.Child("podRecommendationMinMemoryMb"), recommender.PodRecommendationMinMemoryMb, "must be non-negative")) + } + if recommender.TargetMemoryPercentile < 0 || recommender.TargetMemoryPercentile > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("targetMemoryPercentile"), recommender.TargetMemoryPercentile, "must be between 0 and 1")) + } + if recommender.TargetCpuPercentile < 0 || recommender.TargetCpuPercentile > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("targetCpuPercentile"), recommender.TargetCpuPercentile, "must be between 0 and 1")) + } + if recommender.RecommendationLowerBoundMemoryPercentile < 0 || recommender.RecommendationLowerBoundMemoryPercentile > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("recommendationLowerBoundMemoryPercentile"), recommender.RecommendationLowerBoundMemoryPercentile, "must be between 0 and 1")) + } + if recommender.RecommendationUpperBoundMemoryPercentile < 0 || recommender.RecommendationUpperBoundMemoryPercentile > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("recommendationUpperBoundMemoryPercentile"), recommender.RecommendationUpperBoundMemoryPercentile, "must be between 0 and 1")) + } + if recommender.RecommendationLowerBoundCpuPercentile < 0 || recommender.RecommendationLowerBoundCpuPercentile > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("recommendationLowerBoundCpuPercentile"), recommender.RecommendationLowerBoundCpuPercentile, "must be between 0 and 1")) + } + if recommender.RecommendationUpperBoundCpuPercentile < 0 || recommender.RecommendationUpperBoundCpuPercentile > 1 { + allErrs = append(allErrs, field.Invalid(path.Child("recommendationUpperBoundCpuPercentile"), recommender.RecommendationUpperBoundCpuPercentile, "must be between 0 and 1")) + } + if !isValidPromDuration(recommender.HistoryLength) { + allErrs = append(allErrs, field.Invalid(path.Child("historyLength"), recommender.HistoryLength, "must be a valid duration")) + } + if !isValidPromDuration(recommender.HistoryResolution) { + allErrs = append(allErrs, field.Invalid(path.Child("historyResolution"), recommender.HistoryResolution, "must be a valid duration")) + } + if !isValidDuration(recommender.RecommenderInterval) { + allErrs = append(allErrs, field.Invalid(path.Child("recommenderInterval"), recommender.RecommenderInterval, "must be a valid duration")) + } + if !isValidDuration(recommender.CheckpointsTimeout) { + allErrs = append(allErrs, field.Invalid(path.Child("checkpointsTimeout"), recommender.CheckpointsTimeout, "must be a valid duration")) + } + if !isValidDuration(recommender.CheckpointsGcInterval) { + allErrs = append(allErrs, field.Invalid(path.Child("checkpointsGcInterval"), recommender.CheckpointsGcInterval, "must be a valid duration")) + } + if !isValidDuration(recommender.MemoryAggregationInterval) { + allErrs = append(allErrs, field.Invalid(path.Child("memoryAggregationInterval"), recommender.MemoryAggregationInterval, "must be a valid duration")) + } + if !isValidDuration(recommender.MemoryHistogramDecayHalfLife) { + allErrs = append(allErrs, field.Invalid(path.Child("memoryHistogramDecayHalfLife"), recommender.MemoryHistogramDecayHalfLife, "must be a valid duration")) + } + if !isValidDuration(recommender.CpuHistogramDecayHalfLife) { + allErrs = append(allErrs, field.Invalid(path.Child("cpuHistogramDecayHalfLife"), recommender.CpuHistogramDecayHalfLife, "must be a valid duration")) + } + + allErrs = append(allErrs, validateResourceRequirements(path.Child("resources"), recommender.Resources)...) + allErrs = append(allErrs, validateNodeSelector(path.Child("nodeSelector"), recommender.NodeSelector)...) + allErrs = append(allErrs, validateTolerations(path.Child("tolerations"), recommender.Tolerations)...) + + return allErrs +} + +func validateResourceRequirements(path *field.Path, r private.ResourceRequirements) (errs field.ErrorList) { + if r.Requests != nil { + errs = append(errs, validateResourceList(path.Child("requests"), r.Requests)...) + } + if r.Limits != nil { + errs = append(errs, validateResourceList(path.Child("limits"), r.Limits)...) + } + return errs +} + +func validateResourceList(path *field.Path, r map[string]string) (errs field.ErrorList) { + for k, v := range r { + if k != "cpu" && k != "memory" { + errs = append(errs, field.NotSupported(path.Key(k), k, []string{"cpu", "memory"})) + continue + } + _, err := resource.ParseQuantity(v) + if err != nil { + errs = append(errs, field.Invalid(path.Key(k), v, err.Error())) + } + } + return errs +} + +func validateTolerations(path *field.Path, tolerations []private.Toleration) (errs field.ErrorList) { + for i, toleration := range tolerations { + tolerationPath := path.Index(i) + errs = append(errs, validateToleration(tolerationPath, toleration)...) + } + return errs +} + +// validateToleration validates a toleration +// plucked from https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/validation/validation.go#L3939 +func validateToleration(path *field.Path, toleration private.Toleration) (errs field.ErrorList) { + if len(toleration.Key) > 0 { + errs = append(errs, validation.ValidateLabelName(toleration.Key, path.Child("key"))...) + } + if len(toleration.Key) == 0 && toleration.Operator != "Exists" { + errs = append(errs, field.Invalid(path.Child("operator"), toleration.Operator, + "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) + } + if toleration.TolerationSeconds != nil && toleration.Effect != "NoExecute" { + errs = append(errs, field.Invalid(path.Child("effect"), toleration.Effect, + "effect must be 'NoExecute' when `tolerationSeconds` is set")) + } + // validate toleration operator and value + switch toleration.Operator { + // empty operator means Equal + case "Equal", "": + if errMessages := validation2.IsValidLabelValue(toleration.Value); len(errMessages) != 0 { + errs = append(errs, field.Invalid(path.Child("operator"), toleration.Value, strings.Join(errMessages, ";"))) + } + case "Exists": + if len(toleration.Value) > 0 { + errs = append(errs, field.Invalid(path.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) + } + default: + validValues := []string{"Equal", "Exists"} + errs = append(errs, field.NotSupported(path.Child("operator"), toleration.Operator, validValues)) + } + return errs +} + +func validateNodeSelector(path *field.Path, nodeSelector map[string]string) (errs field.ErrorList) { + return validation.ValidateLabels(nodeSelector, path) +} + +func isValidDuration(d string) bool { + if len(d) == 0 { + return true + } + _, err := time.ParseDuration(d) + return err == nil +} + +func isValidPromDuration(d string) bool { + if len(d) == 0 { + return true + } + _, err := model.ParseDuration(d) + return err == nil +} diff --git a/internal/dinosaur/pkg/gitops/vpa_test.go b/internal/dinosaur/pkg/gitops/vpa_test.go new file mode 100644 index 0000000000..21e759d1ad --- /dev/null +++ b/internal/dinosaur/pkg/gitops/vpa_test.go @@ -0,0 +1,317 @@ +package gitops + +import ( + "github.com/stackrox/acs-fleet-manager/internal/dinosaur/pkg/api/private" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/validation/field" + "testing" +) + +func TestValidateRecommenders_duplicateNames(t *testing.T) { + + recommenders := []private.VpaRecommenderConfig{ + { + Name: "foo", + Image: "bla", + }, + { + Name: "foo", + Image: "bla", + }, + } + + errs := validateVpaRecommenders(field.NewPath("recommenders"), recommenders) + + require.Len(t, errs, 1) + assert.Equal(t, errs[0].Type, field.ErrorTypeDuplicate) + +} + +func TestValidateRecommender(t *testing.T) { + + noError := func(t *testing.T, errs field.ErrorList) { + assert.Empty(t, errs) + } + + hasError := func(f, message string) func(t *testing.T, errs field.ErrorList) { + return func(t *testing.T, errs field.ErrorList) { + require.Len(t, errs, 1) + assert.Equal(t, f, errs[0].Field) + assert.Contains(t, errs[0].Detail, message) + } + } + + tests := []struct { + name string + recommender private.VpaRecommenderConfig + assert func(t *testing.T, errs field.ErrorList) + }{ + { + name: "minimal", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + }, + assert: noError, + }, + { + name: "full", + recommender: private.VpaRecommenderConfig{ + Name: "vpa", + Image: "image", + ImagePullSecrets: []private.LocalObjectReference{ + { + Name: "secret", + }, + }, + Resources: private.ResourceRequirements{ + Requests: map[string]string{ + "cpu": "100m", + "memory": "100Mi", + }, + Limits: map[string]string{ + "cpu": "100m", + "memory": "100Mi", + }, + }, + RecommendationMarginFraction: 0.1, + PodRecommendationMinCpuMillicores: 100, + PodRecommendationMinMemoryMb: 0.1, + TargetCpuPercentile: 0.1, + RecommendationLowerBoundCpuPercentile: 0.1, + RecommendationUpperBoundCpuPercentile: 0.1, + TargetMemoryPercentile: 0.1, + RecommendationLowerBoundMemoryPercentile: 0.1, + RecommendationUpperBoundMemoryPercentile: 0.1, + CheckpointsTimeout: "1h", + MinCheckpoints: 10, + MemorySaver: true, + RecommenderInterval: "1h", + CheckpointsGcInterval: "1h", + PrometheusAddress: "address", + PrometheusCadvisorJobName: "job", + Address: "address", + Kubeconfig: "abc", + KubeApiQps: 10, + KubeApiBurst: 10, + Storage: "storage", + HistoryLength: "1h", + HistoryResolution: "1h", + PrometheusQueryTimeout: "1h", + PodLabelPrefix: "prefix", + MetricForPodLabels: "metric", + PodNamespaceLabel: "label", + PodNameLabel: "label", + ContainerNamespaceLabel: "label", + ContainerPodNameLabel: "label", + ContainerNameLabel: "label", + VpaObjectNamespace: "namespace", + MemoryAggregationInterval: "1h", + MemoryAggregationIntervalCount: 10, + MemoryHistogramDecayHalfLife: "1h", + CpuHistogramDecayHalfLife: "1h", + CpuIntegerPostProcessorEnabled: true, + }, + assert: noError, + }, + { + name: "recommendationMarginFraction", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + RecommendationMarginFraction: 1.1, + }, + assert: hasError("recommender.recommendationMarginFraction", "must be between 0 and 1"), + }, + { + name: "podRecommendationMinCpuMillicores", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + PodRecommendationMinCpuMillicores: -1, + }, + assert: hasError("recommender.podRecommendationMinCpuMillicores", "must be non-negative"), + }, + { + name: "podRecommendationMinMemoryMb", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + PodRecommendationMinMemoryMb: -1, + }, + assert: hasError("recommender.podRecommendationMinMemoryMb", "must be non-negative"), + }, + { + name: "targetCpuPercentile", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + TargetCpuPercentile: 1.1, + }, + assert: hasError("recommender.targetCpuPercentile", "must be between 0 and 1"), + }, + { + name: "targetMemoryPercentile", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + TargetMemoryPercentile: 1.1, + }, + assert: hasError("recommender.targetMemoryPercentile", "must be between 0 and 1"), + }, + { + name: "recommendationLowerBoundMemoryPercentile", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + RecommendationLowerBoundMemoryPercentile: 1.1, + }, + assert: hasError("recommender.recommendationLowerBoundMemoryPercentile", "must be between 0 and 1"), + }, + { + name: "recommendationUpperBoundMemoryPercentile", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + RecommendationUpperBoundMemoryPercentile: 1.1, + }, + assert: hasError("recommender.recommendationUpperBoundMemoryPercentile", "must be between 0 and 1"), + }, + { + name: "recommendationLowerBoundCpuPercentile", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + RecommendationLowerBoundCpuPercentile: 1.1, + }, + assert: hasError("recommender.recommendationLowerBoundCpuPercentile", "must be between 0 and 1"), + }, + { + name: "recommendationUpperBoundCpuPercentile", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + RecommendationUpperBoundCpuPercentile: 1.1, + }, + assert: hasError("recommender.recommendationUpperBoundCpuPercentile", "must be between 0 and 1"), + }, + { + name: "missingName", + recommender: private.VpaRecommenderConfig{ + Image: "bla", + }, + assert: hasError("recommender.name", "name must be specified"), + }, + { + name: "missingImage", + recommender: private.VpaRecommenderConfig{ + Name: "bla", + }, + assert: hasError("recommender.image", "image must be specified"), + }, + { + name: "historyLength", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + HistoryLength: "1", + }, + assert: hasError("recommender.historyLength", "must be a valid duration"), + }, + { + name: "historyResolution", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + HistoryResolution: "1", + }, + assert: hasError("recommender.historyResolution", "must be a valid duration"), + }, + { + name: "recommenderInterval", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + RecommenderInterval: "1", + }, + assert: hasError("recommender.recommenderInterval", "must be a valid duration"), + }, + { + name: "checkpointsTimeout", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + CheckpointsTimeout: "1", + }, + assert: hasError("recommender.checkpointsTimeout", "must be a valid duration"), + }, + { + name: "checkpointsGcInterval", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + CheckpointsGcInterval: "1", + }, + assert: hasError("recommender.checkpointsGcInterval", "must be a valid duration"), + }, + { + name: "memoryAggregationInterval", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + MemoryAggregationInterval: "1", + }, + assert: hasError("recommender.memoryAggregationInterval", "must be a valid duration"), + }, + { + name: "memoryHistogramDecayHalfLife", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + MemoryHistogramDecayHalfLife: "1", + }, + assert: hasError("recommender.memoryHistogramDecayHalfLife", "must be a valid duration"), + }, + { + name: "cpuHistogramDecayHalfLife", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + CpuHistogramDecayHalfLife: "1", + }, + assert: hasError("recommender.cpuHistogramDecayHalfLife", "must be a valid duration"), + }, + { + name: "bad name", + recommender: private.VpaRecommenderConfig{ + Name: "foo/bar", + Image: "bla", + }, + assert: hasError("recommender.name", "invalid name"), + }, + { + name: "bad resources", + recommender: private.VpaRecommenderConfig{ + Name: "foo", + Image: "bla", + Resources: private.ResourceRequirements{ + Requests: map[string]string{ + "cpu": "100m", + "bla": "100Mi", + }, + }, + }, + assert: hasError("recommender.resources.requests[bla]", `supported values: "cpu", "memory"`), + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + errs := validateVpaRecommenderConfig(field.NewPath("recommender"), &tt.recommender) + tt.assert(t, errs) + }) + } + +} diff --git a/internal/dinosaur/pkg/handlers/data_plane_dinosaur.go b/internal/dinosaur/pkg/handlers/data_plane_dinosaur.go index 2141d3c178..50c0e562d6 100644 --- a/internal/dinosaur/pkg/handlers/data_plane_dinosaur.go +++ b/internal/dinosaur/pkg/handlers/data_plane_dinosaur.go @@ -72,14 +72,17 @@ func (h *dataPlaneDinosaurHandler) GetAll(w http.ResponseWriter, r *http.Request Items: []private.ManagedCentral{}, } + gitopsConfig, gitopsConfigErr := h.gitopsConfigProvider.Get() + if gitopsConfigErr != nil { + return nil, errors.GeneralError("failed to get GitOps configuration: %v", gitopsConfigErr) + } + if features.TargetedOperatorUpgrades.Enabled() { - gitopsConfig, err := h.gitopsConfigProvider.Get() - if err != nil { - return nil, errors.GeneralError("failed to get GitOps configuration: %v", err) - } managedDinosaurList.RhacsOperators = gitopsConfig.RHACSOperators.ToAPIResponse() } + managedDinosaurList.VerticalPodAutoscaling = gitopsConfig.VerticalPodAutoscaling + managedCentrals, presentErr := h.presenter.PresentManagedCentrals(r.Context(), centralRequests) if presentErr != nil { return nil, errors.GeneralError("failed to convert central request to managed central: %v", presentErr) diff --git a/openapi/fleet-manager-private.yaml b/openapi/fleet-manager-private.yaml index 38f686b2c8..558487fc87 100644 --- a/openapi/fleet-manager-private.yaml +++ b/openapi/fleet-manager-private.yaml @@ -58,7 +58,7 @@ paths: # This is deliberate to hide the endpoints for unauthorised users description: Auth token is not valid. security: - - Bearer: [] + - Bearer: [ ] operationId: updateAgentClusterStatus summary: Update the status of an agent cluster @@ -98,7 +98,7 @@ paths: # This is deliberate to hide the endpoints for unauthorised users description: Auth token is not valid. security: - - Bearer: [] + - Bearer: [ ] operationId: updateCentralClusterStatus summary: Update the status of Centrals on an agent cluster @@ -135,7 +135,7 @@ paths: # This is deliberate to hide the endpoints for unauthorised users description: Auth token is not valid. security: - - Bearer: [] + - Bearer: [ ] operationId: getCentrals summary: Get the list of ManagedCentrals for the specified agent cluster @@ -172,7 +172,7 @@ paths: # This is deliberate to hide the endpoints for unauthorised users description: Auth token is not valid. security: - - Bearer: [] + - Bearer: [ ] operationId: getCentral summary: Get the ManagedaCentral for the specified agent cluster and centralId @@ -209,7 +209,7 @@ paths: # This is deliberate to hide the endpoints for unauthorised users description: Auth token is not valid. security: - - Bearer: [] + - Bearer: [ ] operationId: getDataPlaneClusterAgentConfig summary: Get the data plane cluster agent configuration @@ -395,13 +395,193 @@ components: - $ref: "#/components/schemas/ManagedCentral" rhacs_operators: $ref: "#/components/schemas/RHACSOperatorConfigs" + verticalPodAutoscaling: + $ref: "#/components/schemas/VerticalPodAutoscaling" + + VerticalPodAutoscaling: + type: object + properties: + recommenders: + type: array + items: + $ref: "#/components/schemas/VPARecommenderConfig" + + LocalObjectReference: + type: object + properties: + name: + type: string + + ResourceList: + type: object + additionalProperties: + type: string + + ResourceRequirements: + type: object + properties: + requests: + $ref: "#/components/schemas/ResourceList" + limits: + $ref: "#/components/schemas/ResourceList" + + Toleration: + type: object + properties: + key: + type: string + operator: + type: string + enum: + - Exists + - Equal + value: + type: string + effect: + type: string + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + tolerationSeconds: + nullable: true + type: integer + format: int64 + + PodNodeSelector: + additionalProperties: + type: string + + VPARecommenderConfig: + type: object + required: + - name + properties: + name: + type: string + image: + type: string + imagePullSecrets: + type: array + items: + $ref: "#/components/schemas/LocalObjectReference" + resources: + $ref: "#/components/schemas/ResourceRequirements" + recommendationMarginFraction: + type: number + format: float + podRecommendationMinCpuMillicores: + type: number + format: float + podRecommendationMinMemoryMb: + type: number + format: float + targetCpuPercentile: + type: number + format: float + recommendationLowerBoundCpuPercentile: + type: number + format: float + recommendationUpperBoundCpuPercentile: + type: number + format: float + targetMemoryPercentile: + type: number + format: float + recommendationLowerBoundMemoryPercentile: + type: number + format: float + recommendationUpperBoundMemoryPercentile: + type: number + format: float + checkpointsTimeout: + type: string + minCheckpoints: + type: integer + memorySaver: + type: boolean + recommenderInterval: + type: string + checkpointsGcInterval: + type: string + prometheusAddress: + type: string + prometheusCadvisorJobName: + type: string + address: + type: string + kubeconfig: + type: string + kubeApiQps: + type: number + format: float + kubeApiBurst: + type: integer + storage: + type: string + historyLength: + type: string + historyResolution: + type: string + prometheusQueryTimeout: + type: string + podLabelPrefix: + type: string + metricForPodLabels: + type: string + podNamespaceLabel: + type: string + podNameLabel: + type: string + containerNamespaceLabel: + type: string + containerPodNameLabel: + type: string + containerNameLabel: + type: string + vpaObjectNamespace: + type: string + memoryAggregationInterval: + type: string + memoryAggregationIntervalCount: + type: integer + memoryHistogramDecayHalfLife: + type: string + cpuHistogramDecayHalfLife: + type: string + cpuIntegerPostProcessorEnabled: + type: boolean + useExternalMetrics: + type: boolean + externalMetricsCpuMetric: + type: string + externalMetricsMemoryMetric: + type: string + oomBumpUpRatio: + type: number + format: float64 + oomMinBumpUpBytes: + type: number + format: float64 + tolerations: + type: array + items: + $ref: "#/components/schemas/Toleration" + nodeSelector: + $ref: "#/components/schemas/PodNodeSelector" + useProxy: + type: boolean + proxyImage: + type: string + logLevel: + type: number RHACSOperatorConfigs: properties: CrdUrls: type: array items: - type: string + type: string RHACSOperatorConfigs: type: array items: diff --git a/scripts/delete-central.sh b/scripts/delete-central.sh new file mode 100755 index 0000000000..65f4264c7b --- /dev/null +++ b/scripts/delete-central.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -eo pipefail +id=${1} + +if [ -z "$id" ]; then + echo "Usage: $0 " + exit 1 +fi + +echo "Deleting central $id" + + +# shellcheck disable=SC1001 +curl -X DELETE -H "Authorization: Bearer $(ocm token)" \ + "http://127.0.0.1:8000/api/rhacs/v1/centrals/${id}\?async\=true" diff --git a/scripts/dev-with-openshift-ci.sh b/scripts/dev-with-openshift-ci.sh new file mode 100755 index 0000000000..f97db7d111 --- /dev/null +++ b/scripts/dev-with-openshift-ci.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +# This script is to enable developers to work with local openshift builds +# It works well with OSD on AWS image flavor + +# What it does +# - It sets up a BuildConfig and an ImageStream for fleet-manager and fleetshard-operator +# - It sets up the annotations on the deployments to use those images +# - It creates a build + +# This assumes that make deploy/bootstrap and make deploy/dev have been run + +CUR_BRANCH=$(git rev-parse --abbrev-ref HEAD) + +# Set up the BuildConfig and ImageStream +oc apply -f - < ... + +set -eou pipefail + +echo "Creating Red Hat pull secret in namespaces" "$@" + +token=$(ocm token) +pull_secret=$(curl -X POST https://api.openshift.com/api/accounts_mgmt/v1/access_token \ + --header "Content-Type:application/json" \ + --header "Authorization: Bearer ${token}") + +for namespace in "$@"; do + + # Create namespace if it does not exist + oc get namespace "$namespace" || oc create namespace "$namespace" + + # Wait for namespace to be created + trial=0 + while [ "$(oc get namespace "$namespace" -o jsonpath='{.status.phase}')" != "Active" ]; do + echo "Waiting for namespace $namespace to be created" + trial=$((trial + 1)) + if [ "$trial" -gt 10 ]; then + echo "Timeout waiting for namespace $namespace to be created" + exit 1 + fi + sleep 5 + done + + echo "Creating RedHat Pull secret in namespace $namespace" + cat <