From e698d1308b81720f773dbcc025cc327f31f62145 Mon Sep 17 00:00:00 2001 From: Ahmad Malik Ibrahim Date: Thu, 1 Aug 2024 11:47:31 -0700 Subject: [PATCH] fix: set oci.ImageOptions.Ref to the correct value to ensure plugin charts are installed (#359) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description In validatorctl, when configuring a custom private registry, pulling the plugin charts fails with this error: ``` 2024-07-31 18:34:22 ERROR Plugin failed to install └ validator-plugin-network: failed to pull chart: failed to fetch image from registry: GET https://toolbox.palette-adv.spectrocloud.com/v2/ahmad/charts/validator-plugin-network/validator-plugin-network/manifests/0.0.21: NOT_FOUND: repository ahmad/charts/validator-plugin-network/validator-plugin-network not found ``` As can be seen above, there is an extra `/validator-plugin-network` tacked on at the end. This PR fixes that. This PR is a pre-requisite to https://github.com/validator-labs/validatorctl/pull/115 ## Test Notes Tested this by: 1. running `helm install validator chart/validator/ -f chart/validator/values.yaml` 2. running `./bin/validator install` and configuring a plugin using the default registry (quay) 3. running `./bin/validator install` and configuring a plugin using a custom private registry (harbor) Everything runs correctly with this change. Prior to this change, test 3 was failing. --- build | 2 +- chart/validator/README.md | 2 +- chart/validator/values.yaml | 8 ++++---- hauler-manifest.yaml | 8 ++++---- internal/controller/validatorconfig_controller.go | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/build b/build index 683974c2..e14cf18d 160000 --- a/build +++ b/build @@ -1 +1 @@ -Subproject commit 683974c2e0e6c2bd18cd6867d4033f2e2cd99c42 +Subproject commit e14cf18d15269794a35476045774bda7a85b8d34 diff --git a/chart/validator/README.md b/chart/validator/README.md index c05273fd..d8d02c5e 100644 --- a/chart/validator/README.md +++ b/chart/validator/README.md @@ -52,7 +52,7 @@ The following table lists the configurable parameters of the Validator chart and | `pluginSecrets.oci.auth` | Don't forget to delete these square brackets if you're specifying credentials here! | `[]` | | `pluginSecrets.oci.pubKeys` | Don't forget to delete these square brackets if you're specifying public keys here! | `[]` | | `helmConfig.registry` | | `"https://validator-labs.github.io"` | -| `plugins` | | `[{"chart": {"name": "validator-plugin-azure", "repository": "validator-plugin-azure", "version": "v0.0.14"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-azure\n tag: v0.0.14\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Leave secret undefined for implicit auth (e.g., WorkloadIdentity credentials)\n secret: {}\n # Specify the name of a secret in your cluster that contains Azure credentials.\n # E.g.: https://github.com/validator-labs/validator/blob/main/chart/validator/templates/plugin-secret-azure.yaml\n # secretName: azure-creds\n\n # Override the service account used by Azure validator (optional, could be used for WorkloadIdentityCredentials on AKS)\n # WARNING: the chosen service account must include all RBAC privileges found in templates/manager-rbac.yaml\n serviceAccountName: \"\""}, {"chart": {"name": "validator-plugin-oci", "repository": "validator-plugin-oci", "version": "v0.0.12"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-oci\n tag: v0.0.12\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-kubescape", "repository": "validator-plugin-kubescape", "version": "v0.0.4"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-kubescape\n tag: v0.0.4\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-aws", "repository": "validator-plugin-aws", "version": "v0.1.2"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-aws\n tag: v0.1.2\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Leave secret undefined for implicit auth (node instance IAM role, IAM roles for Service Accounts, etc.)\n secret: {}\n # Specify the name of a secret in your cluster that contains AWS credentials.\n # E.g.: https://github.com/validator-labs/validator/blob/main/chart/validator/templates/plugin-secret-aws.yaml\n # secretName: aws-creds\n\n # Override the service account used by AWS validator (optional, could be used for IAM roles for Service Accounts)\n # WARNING: the chosen service account must have the same RBAC privileges as seen in templates/manager-rbac.yaml\n serviceAccountName: \"\""}, {"chart": {"name": "validator-plugin-network", "repository": "validator-plugin-network", "version": "v0.0.20"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: true\n capabilities:\n add:\n - NET_RAW\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-network\n tag: v0.0.20\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-maas", "repository": "validator-plugin-maas", "version": "v0.0.5"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-maas\n tag: v0.0.5\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-vsphere", "repository": "validator-plugin-vsphere", "version": "v0.0.28"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=127.0.0.1:8080\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-vsphere\n tag: v0.0.28\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Specify the name of a secret in your cluster that contains vSphere credentials.\n # E.g.: https://github.com/validator-labs/validator/blob/main/chart/validator/templates/plugin-secret-vsphere.yaml\n secretName: vsphere-credentials"}]` | +| `plugins` | | `[{"chart": {"name": "validator-plugin-azure", "repository": "validator-plugin-azure", "version": "v0.0.14"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-azure\n tag: v0.0.14\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Leave secret undefined for implicit auth (e.g., WorkloadIdentity credentials)\n secret: {}\n # Specify the name of a secret in your cluster that contains Azure credentials.\n # E.g.: https://github.com/validator-labs/validator/blob/main/chart/validator/templates/plugin-secret-azure.yaml\n # secretName: azure-creds\n\n # Override the service account used by Azure validator (optional, could be used for WorkloadIdentityCredentials on AKS)\n # WARNING: the chosen service account must include all RBAC privileges found in templates/manager-rbac.yaml\n serviceAccountName: \"\""}, {"chart": {"name": "validator-plugin-oci", "repository": "validator-plugin-oci", "version": "v0.0.12"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-oci\n tag: v0.0.12\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-kubescape", "repository": "validator-plugin-kubescape", "version": "v0.0.4"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-kubescape\n tag: v0.0.4\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-aws", "repository": "validator-plugin-aws", "version": "v0.1.2"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-aws\n tag: v0.1.2\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Leave secret undefined for implicit auth (node instance IAM role, IAM roles for Service Accounts, etc.)\n secret: {}\n # Specify the name of a secret in your cluster that contains AWS credentials.\n # E.g.: https://github.com/validator-labs/validator/blob/main/chart/validator/templates/plugin-secret-aws.yaml\n # secretName: aws-creds\n\n # Override the service account used by AWS validator (optional, could be used for IAM roles for Service Accounts)\n # WARNING: the chosen service account must have the same RBAC privileges as seen in templates/manager-rbac.yaml\n serviceAccountName: \"\""}, {"chart": {"name": "validator-plugin-network", "repository": "validator-plugin-network", "version": "v0.0.21"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: true\n capabilities:\n add:\n - NET_RAW\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-network\n tag: v0.0.21\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-maas", "repository": "validator-plugin-maas", "version": "v0.0.6"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-maas\n tag: v0.0.6\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-vsphere", "repository": "validator-plugin-vsphere", "version": "v0.0.28"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=127.0.0.1:8080\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-vsphere\n tag: v0.0.28\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Specify the name of a secret in your cluster that contains vSphere credentials.\n # E.g.: https://github.com/validator-labs/validator/blob/main/chart/validator/templates/plugin-secret-vsphere.yaml\n secretName: vsphere-credentials"}]` | diff --git a/chart/validator/values.yaml b/chart/validator/values.yaml index 4ce3ab13..e7716dda 100644 --- a/chart/validator/values.yaml +++ b/chart/validator/values.yaml @@ -457,7 +457,7 @@ plugins: - chart: name: validator-plugin-network repository: validator-plugin-network - version: v0.0.20 + version: v0.0.21 values: |- controllerManager: kubeRbacProxy: @@ -494,7 +494,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-network - tag: v0.0.20 + tag: v0.0.21 resources: limits: cpu: 500m @@ -516,7 +516,7 @@ plugins: - chart: name: validator-plugin-maas repository: validator-plugin-maas - version: v0.0.5 + version: v0.0.6 values: |- controllerManager: kubeRbacProxy: @@ -551,7 +551,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-maas - tag: v0.0.5 + tag: v0.0.6 resources: limits: cpu: 500m diff --git a/hauler-manifest.yaml b/hauler-manifest.yaml index ac33f793..c5300925 100644 --- a/hauler-manifest.yaml +++ b/hauler-manifest.yaml @@ -8,8 +8,8 @@ spec: - name: quay.io/validator-labs/validator-plugin-aws:v0.1.2 - name: quay.io/validator-labs/validator-plugin-azure:v0.0.14 - name: quay.io/validator-labs/validator-plugin-kubescape:v0.0.4 - - name: quay.io/validator-labs/validator-plugin-maas:v0.0.5 - - name: quay.io/validator-labs/validator-plugin-network:v0.0.20 + - name: quay.io/validator-labs/validator-plugin-maas:v0.0.6 + - name: quay.io/validator-labs/validator-plugin-network:v0.0.21 - name: quay.io/validator-labs/validator-plugin-oci:v0.0.12 - name: quay.io/validator-labs/validator-plugin-vsphere:v0.0.28 - name: quay.io/validator-labs/validator-certs-init:1.0.0 @@ -39,10 +39,10 @@ spec: version: 0.0.4 - name: validator-plugin-maas repoURL: https://validator-labs.github.io/validator-plugin-maas - version: 0.0.5 + version: 0.0.6 - name: validator-plugin-network repoURL: https://validator-labs.github.io/validator-plugin-network - version: 0.0.20 + version: 0.0.21 - name: validator-plugin-oci repoURL: https://validator-labs.github.io/validator-plugin-oci version: 0.0.12 diff --git a/internal/controller/validatorconfig_controller.go b/internal/controller/validatorconfig_controller.go index e9ee8d94..d3a86eb3 100644 --- a/internal/controller/validatorconfig_controller.go +++ b/internal/controller/validatorconfig_controller.go @@ -202,7 +202,7 @@ func (r *ValidatorConfigReconciler) redeployIfNeeded(ctx context.Context, vc *v1 continue } ociOpts := oci.ImageOptions{ - Ref: fmt.Sprintf("%s/%s/%s:%s", strings.TrimPrefix(opts.Registry, oci.Scheme), opts.Repo, opts.Chart, opts.Version), + Ref: fmt.Sprintf("%s/%s:%s", strings.TrimPrefix(opts.Registry, oci.Scheme), opts.Repo, opts.Version), OutDir: opts.Path, OutFile: opts.Chart, }