From 7308f69759b48d469f59fdb500a458a560617f0a Mon Sep 17 00:00:00 2001 From: Karl Isenberg Date: Thu, 15 Jun 2023 18:44:40 -0700 Subject: [PATCH 1/5] feat: VPA for reconciler - Configure VPA for reconciler deployment using annotation on RootSync/RepoSync: `configsync.gke.io/reconciler-autoscaling-strategy: Auto` - Auto - evict and recreate pods to apply recommended resource values, as needed. - Recommend - monitor and record recommended resource values for each reconciler, but don't automatically apply them. - Disabled - Do not apply any VPA config, and delete it if it exists with the same name as the reconciler. - VPA disabled by default (opt-in for preview and testing) - When VPA is enabled, set smaller resource requests/limits for smaller footprint on initial install. Adding limits helps hasten VPA adjustments by causing OOMKills, instead of waiting for the VPA to evict the pod. - Move regular (non-VPA) defaults out of a ConfigMap and into the reconciler-manager code, next to the new VPA resource defaults. This should make them easier to keep in sync. - test: Install VPA on kind when --vpa is specified - test: Enable the VPA addon in GKE when creating clusters when --vpa is specified - test: Rewrite some e2e tests to handle resource defaults - test: Log reconciler pod resources on test failure to help debug VPA. --- e2e/flags.go | 4 + e2e/nomostest/autoscaling.go | 79 + e2e/nomostest/clean.go | 8 + e2e/nomostest/client.go | 4 + e2e/nomostest/clusters/gke.go | 3 + e2e/nomostest/config_sync.go | 51 +- e2e/nomostest/new.go | 116 +- e2e/nomostest/nt.go | 135 ++ e2e/nomostest/ntopts/multi_repo.go | 20 + e2e/nomostest/ntopts/test_type.go | 8 + e2e/nomostest/testpredicates/predicates.go | 163 +- e2e/testcases/helm_sync_test.go | 24 +- .../override_resource_limits_test.go | 367 ++-- e2e/testcases/reconciler_manager_test.go | 647 +++--- e2e/testcases/stress_test.go | 110 +- e2e/testdata/metrics-server/components.yaml | 263 +++ e2e/testdata/metrics-server/update.sh | 35 + .../vertical-pod-autoscaler/components.yaml | 1752 +++++++++++++++++ .../vertical-pod-autoscaler/update.sh | 53 + go.mod | 1 + go.sum | 2 + .../reconciler-manager-configmap.yaml | 31 - pkg/core/scheme.go | 9 + pkg/kinds/kinds.go | 6 + pkg/metadata/annotations.go | 25 + pkg/metadata/metadata.go | 16 +- .../controllers/garbage_collector.go | 19 + .../controllers/reconciler_base.go | 247 ++- .../controllers/reconciler_base_test.go | 13 +- .../reconciler_container_resources.go | 208 ++ .../controllers/reposync_controller.go | 75 +- .../controllers/reposync_controller_test.go | 117 +- .../controllers/rootsync_controller.go | 68 +- .../rootsync_controller_manager_test.go | 8 +- .../controllers/rootsync_controller_test.go | 295 +-- pkg/util/autopilot.go | 8 + .../vertical-pod-autoscaler/LICENSE | 202 ++ .../pkg/apis/autoscaling.k8s.io/v1/doc.go | 22 + .../apis/autoscaling.k8s.io/v1/register.go | 58 + .../pkg/apis/autoscaling.k8s.io/v1/types.go | 393 ++++ .../v1/zz_generated.deepcopy.go | 479 +++++ vendor/modules.txt | 3 + 42 files changed, 5205 insertions(+), 942 deletions(-) create mode 100644 e2e/nomostest/autoscaling.go create mode 100644 e2e/testdata/metrics-server/components.yaml create mode 100755 e2e/testdata/metrics-server/update.sh create mode 100644 e2e/testdata/vertical-pod-autoscaler/components.yaml create mode 100755 e2e/testdata/vertical-pod-autoscaler/update.sh create mode 100644 pkg/reconcilermanager/controllers/reconciler_container_resources.go create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/LICENSE create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/doc.go create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/register.go create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/zz_generated.deepcopy.go diff --git a/e2e/flags.go b/e2e/flags.go index ff6a4ebb29..77fd63d719 100644 --- a/e2e/flags.go +++ b/e2e/flags.go @@ -65,6 +65,10 @@ var Stress = flag.Bool("stress", false, var KCC = flag.Bool("kcc", false, "If true, run kcc tests.") +// VPA enables running the e2e tests for vertical pod autoscaling. +var VPA = flag.Bool("vpa", true, + "If true, run VPA tests.") + // GceNode enables running the e2e tests for 'gcenode' auth type var GceNode = flag.Bool("gcenode", false, "If true, run test with 'gcenode' auth type.") diff --git a/e2e/nomostest/autoscaling.go b/e2e/nomostest/autoscaling.go new file mode 100644 index 0000000000..812f55141a --- /dev/null +++ b/e2e/nomostest/autoscaling.go @@ -0,0 +1,79 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nomostest + +import ( + "kpt.dev/configsync/pkg/core" + "kpt.dev/configsync/pkg/metadata" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// EnableReconcilerAutoscaling enables reconciler autoscaling on a +// RootSync or RepoSync. The object is annotated locally, but not applied. +// Returns true if a change was made, false if already enabled. +func EnableReconcilerAutoscaling(rs client.Object) bool { + return SetReconcilerAutoscalingStrategy(rs, metadata.ReconcilerAutoscalingStrategyAuto) +} + +// DisableReconcilerAutoscaling disables reconciler autoscaling on a +// RootSync or RepoSync. The object annotated is removed locally, but not applied. +// Returns true if a change was made, false if already enabled. +func DisableReconcilerAutoscaling(rs client.Object) bool { + return RemoveReconcilerAutoscalingStrategy(rs) +} + +// IsReconcilerAutoscalingEnabled returns true if reconciler-autoscaling-strategy +// annotation is set to Foreground. +func IsReconcilerAutoscalingEnabled(rs client.Object) bool { + return HasReconcilerAutoscalingStrategy(rs, metadata.ReconcilerAutoscalingStrategyAuto) +} + +// HasReconcilerAutoscalingStrategy returns true if reconciler-autoscaling-strategy +// annotation is set to the specified policy. Returns false if not set. +func HasReconcilerAutoscalingStrategy(obj client.Object, policy metadata.ReconcilerAutoscalingStrategy) bool { + annotations := obj.GetAnnotations() + // don't panic if nil + if len(annotations) == 0 { + return false + } + foundPolicy, found := annotations[metadata.ReconcilerAutoscalingStrategyAnnotationKey] + return found && foundPolicy == string(policy) +} + +// SetReconcilerAutoscalingStrategy sets the value of the reconciler-autoscaling-strategy +// annotation locally (does not apply). Returns true if the object was modified. +func SetReconcilerAutoscalingStrategy(obj client.Object, policy metadata.ReconcilerAutoscalingStrategy) bool { + if HasReconcilerAutoscalingStrategy(obj, policy) { + return false + } + core.SetAnnotation(obj, metadata.ReconcilerAutoscalingStrategyAnnotationKey, string(policy)) + return true +} + +// RemoveReconcilerAutoscalingStrategy removes the reconciler-autoscaling-strategy +// annotation locally (does not apply). Returns true if the object was modified. +func RemoveReconcilerAutoscalingStrategy(obj client.Object) bool { + annotations := obj.GetAnnotations() + // don't panic if nil + if len(annotations) == 0 { + return false + } + if _, found := annotations[metadata.ReconcilerAutoscalingStrategyAnnotationKey]; !found { + return false + } + delete(annotations, metadata.ReconcilerAutoscalingStrategyAnnotationKey) + obj.SetAnnotations(annotations) + return true +} diff --git a/e2e/nomostest/clean.go b/e2e/nomostest/clean.go index bca1f4ee6b..7f900e7a82 100644 --- a/e2e/nomostest/clean.go +++ b/e2e/nomostest/clean.go @@ -111,6 +111,14 @@ func Clean(nt *NT) error { if err := deleteResourceGroupController(nt); err != nil { return err } + if *e2e.VPA && *e2e.TestCluster == e2e.Kind { + if err := nt.uninstallVerticalPrivateAutoscaler(); err != nil { + return err + } + if err := nt.uninstallMetricsServer(); err != nil { + return err + } + } // Reset any modified system namespaces. if err := resetSystemNamespaces(nt); err != nil { return err diff --git a/e2e/nomostest/client.go b/e2e/nomostest/client.go index a3e3f3f94f..f025aeaa5e 100644 --- a/e2e/nomostest/client.go +++ b/e2e/nomostest/client.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" admissionv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -30,6 +31,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" + autoscalingv1vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "kpt.dev/configsync/e2e" "kpt.dev/configsync/e2e/nomostest/clusters" @@ -82,6 +84,7 @@ func newScheme(t testing.NTB) *runtime.Scheme { apiextensionsv1.SchemeBuilder, appsv1.SchemeBuilder, corev1.SchemeBuilder, + batchv1.SchemeBuilder, configmanagementv1.SchemeBuilder, configsyncv1alpha1.SchemeBuilder, configsyncv1beta1.SchemeBuilder, @@ -91,6 +94,7 @@ func newScheme(t testing.NTB) *runtime.Scheme { rbacv1beta1.SchemeBuilder, resourcegroupv1alpha1.SchemeBuilder.SchemeBuilder, apiregistrationv1.SchemeBuilder, + autoscalingv1vpa.SchemeBuilder, } for _, b := range builders { err := b.AddToScheme(s) diff --git a/e2e/nomostest/clusters/gke.go b/e2e/nomostest/clusters/gke.go index cf38fd55ef..bc10bc94b1 100644 --- a/e2e/nomostest/clusters/gke.go +++ b/e2e/nomostest/clusters/gke.go @@ -242,6 +242,9 @@ func createGKECluster(t testing.NTB, name string) error { if len(addons) > 0 { args = append(args, "--addons", strings.Join(addons, ",")) } + if *e2e.VPA { + args = append(args, "--enable-vertical-pod-autoscaling") + } } if len(scopes) > 0 { args = append(args, "--scopes", strings.Join(scopes, ",")) diff --git a/e2e/nomostest/config_sync.go b/e2e/nomostest/config_sync.go index 2b44bca74a..841893e981 100644 --- a/e2e/nomostest/config_sync.go +++ b/e2e/nomostest/config_sync.go @@ -279,9 +279,12 @@ func parseManifestDir(dirPath string) ([]client.Object, error) { if err != nil { return nil, err } - paths := make([]cmpath.Absolute, len(files)) - for i, f := range files { - paths[i] = readPath.Join(cmpath.RelativeSlash(f.Name())) + paths := make([]cmpath.Absolute, 0, len(files)) + for _, f := range files { + switch filepath.Ext(f.Name()) { + case ".yaml", ".yml", ".json": + paths = append(paths, readPath.Join(cmpath.RelativeSlash(f.Name()))) + } } // Read the manifests cached in the tmpdir. r := reader.File{} @@ -666,6 +669,9 @@ func RootSyncObjectV1Alpha1(name, repoURL string, sourceFormat filesystem.Source // Enable automatic deletion of managed objects by default. // This helps ensure that test artifacts are cleaned up. EnableDeletionPropagation(rs) + // Enable autoscaling by default. + // This helps validate VPA works for all test cases. + EnableReconcilerAutoscaling(rs) return rs } @@ -684,6 +690,11 @@ func RootSyncObjectV1Alpha1FromRootRepo(nt *NT, name string) *v1alpha1.RootSync } else if rs.Spec.Override != nil { rs.Spec.Override.ReconcileTimeout = nil } + if nt.DefaultReconcilerAutoscalingStrategy != nil { + SetReconcilerAutoscalingStrategy(rs, *nt.DefaultReconcilerAutoscalingStrategy) + } else { + DisableReconcilerAutoscaling(rs) + } return rs } @@ -704,6 +715,9 @@ func RootSyncObjectV1Beta1(name, repoURL string, sourceFormat filesystem.SourceF // Enable automatic deletion of managed objects by default. // This helps ensure that test artifacts are cleaned up. EnableDeletionPropagation(rs) + // Enable autoscaling by default. + // This helps validate VPA works for all test cases. + EnableReconcilerAutoscaling(rs) return rs } @@ -722,6 +736,11 @@ func RootSyncObjectV1Beta1FromRootRepo(nt *NT, name string) *v1beta1.RootSync { } else if rs.Spec.Override != nil { rs.Spec.Override.ReconcileTimeout = nil } + if nt.DefaultReconcilerAutoscalingStrategy != nil { + SetReconcilerAutoscalingStrategy(rs, *nt.DefaultReconcilerAutoscalingStrategy) + } else { + DisableReconcilerAutoscaling(rs) + } return rs } @@ -740,6 +759,11 @@ func RootSyncObjectV1Beta1FromOtherRootRepo(nt *NT, syncName, repoName string) * } else if rs.Spec.Override != nil { rs.Spec.Override.ReconcileTimeout = nil } + if nt.DefaultReconcilerAutoscalingStrategy != nil { + SetReconcilerAutoscalingStrategy(rs, *nt.DefaultReconcilerAutoscalingStrategy) + } else { + DisableReconcilerAutoscaling(rs) + } return rs } @@ -765,6 +789,9 @@ func RepoSyncObjectV1Alpha1(nn types.NamespacedName, repoURL string) *v1alpha1.R // Enable automatic deletion of managed objects by default. // This helps ensure that test artifacts are cleaned up. EnableDeletionPropagation(rs) + // Enable autoscaling by default. + // This helps validate VPA works for all test cases. + EnableReconcilerAutoscaling(rs) return rs } @@ -783,6 +810,11 @@ func RepoSyncObjectV1Alpha1FromNonRootRepo(nt *NT, nn types.NamespacedName) *v1a } else if rs.Spec.Override != nil { rs.Spec.Override.ReconcileTimeout = nil } + if nt.DefaultReconcilerAutoscalingStrategy != nil { + SetReconcilerAutoscalingStrategy(rs, *nt.DefaultReconcilerAutoscalingStrategy) + } else { + DisableReconcilerAutoscaling(rs) + } // Enable automatic deletion of managed objects by default. // This helps ensure that test artifacts are cleaned up. EnableDeletionPropagation(rs) @@ -811,6 +843,9 @@ func RepoSyncObjectV1Beta1(nn types.NamespacedName, repoURL string, sourceFormat // Enable automatic deletion of managed objects by default. // This helps ensure that test artifacts are cleaned up. EnableDeletionPropagation(rs) + // Enable autoscaling by default. + // This helps validate VPA works for all test cases. + EnableReconcilerAutoscaling(rs) return rs } @@ -829,6 +864,11 @@ func RepoSyncObjectV1Beta1FromNonRootRepo(nt *NT, nn types.NamespacedName) *v1be } else if rs.Spec.Override != nil { rs.Spec.Override.ReconcileTimeout = nil } + if nt.DefaultReconcilerAutoscalingStrategy != nil { + SetReconcilerAutoscalingStrategy(rs, *nt.DefaultReconcilerAutoscalingStrategy) + } else { + DisableReconcilerAutoscaling(rs) + } // Add dependencies to ensure managed objects can be deleted. if err := SetRepoSyncDependencies(nt, rs); err != nil { nt.T.Fatal(err) @@ -851,6 +891,11 @@ func RepoSyncObjectV1Beta1FromOtherRootRepo(nt *NT, nn types.NamespacedName, rep } else if rs.Spec.Override != nil { rs.Spec.Override.ReconcileTimeout = nil } + if nt.DefaultReconcilerAutoscalingStrategy != nil { + SetReconcilerAutoscalingStrategy(rs, *nt.DefaultReconcilerAutoscalingStrategy) + } else { + DisableReconcilerAutoscaling(rs) + } // Add dependencies to ensure managed objects can be deleted. if err := SetRepoSyncDependencies(nt, rs); err != nil { nt.T.Fatal(err) diff --git a/e2e/nomostest/new.go b/e2e/nomostest/new.go index 353f2735f9..95251ba844 100644 --- a/e2e/nomostest/new.go +++ b/e2e/nomostest/new.go @@ -34,6 +34,7 @@ import ( "kpt.dev/configsync/pkg/api/configmanagement" "kpt.dev/configsync/pkg/api/configsync" "kpt.dev/configsync/pkg/importer/filesystem" + "kpt.dev/configsync/pkg/metadata" "kpt.dev/configsync/pkg/metrics" "kpt.dev/configsync/pkg/testing/fake" ) @@ -63,6 +64,11 @@ func newOptStruct(testName, tmpDir string, t nomostesting.NTB, ntOptions ...ntop MultiRepo: ntopts.MultiRepo{ NamespaceRepos: make(map[types.NamespacedName]ntopts.RepoOpts), RootRepos: map[string]ntopts.RepoOpts{configsync.RootSyncName: {}}, + // Enable autoscaling by default in tests to improve test coverage + // for this preview feature. To override, use + // WithReconcilerAutoscalingStrategy or + // WithoutReconcilerAutoscalingStrategy. + ReconcilerAutoscalingStrategy: pointerToStrategy(metadata.ReconcilerAutoscalingStrategyAuto), // Default to 1m to keep tests fast. // To override, use WithReconcileTimeout. ReconcileTimeout: pointer.Duration(1 * time.Minute), @@ -89,6 +95,10 @@ func newOptStruct(testName, tmpDir string, t nomostesting.NTB, ntOptions ...ntop t.Skip("Test skipped since it is a KCC test") } + if !*e2e.VPA && optsStruct.VPATest { + t.Skip("Test skipped since it is a VPA test") + } + if !*e2e.GceNode && optsStruct.GCENodeTest { t.Skip("Test skipped since it is a test for GCENode auth type, which requires a GKE cluster without workload identity") } @@ -147,32 +157,33 @@ func SharedTestEnv(t nomostesting.NTB, opts *ntopts.New) *NT { sharedNt.Logger.SetNTBForTest(t) nt := &NT{ - Context: sharedNt.Context, - T: t, - Logger: sharedNt.Logger, - Shell: sharedNt.Shell, - ClusterName: sharedNt.ClusterName, - TmpDir: opts.TmpDir, - Config: opts.RESTConfig, - repoSyncPermissions: opts.RepoSyncPermissions, - KubeClient: sharedNt.KubeClient, - Watcher: sharedNt.Watcher, - WatchClient: sharedNt.WatchClient, - IsGKEAutopilot: sharedNt.IsGKEAutopilot, - DefaultWaitTimeout: sharedNt.DefaultWaitTimeout, - DefaultReconcileTimeout: opts.ReconcileTimeout, - kubeconfigPath: sharedNt.kubeconfigPath, - ReconcilerPollingPeriod: sharedNt.ReconcilerPollingPeriod, - HydrationPollingPeriod: sharedNt.HydrationPollingPeriod, - RootRepos: sharedNt.RootRepos, - NonRootRepos: sharedNt.NonRootRepos, - MetricsExpectations: sharedNt.MetricsExpectations, - gitPrivateKeyPath: sharedNt.gitPrivateKeyPath, - caCertPath: sharedNt.caCertPath, - Scheme: sharedNt.Scheme, - RemoteRepositories: sharedNt.RemoteRepositories, - WebhookDisabled: sharedNt.WebhookDisabled, - GitProvider: sharedNt.GitProvider, + Context: sharedNt.Context, + T: t, + Logger: sharedNt.Logger, + Shell: sharedNt.Shell, + ClusterName: sharedNt.ClusterName, + TmpDir: opts.TmpDir, + Config: opts.RESTConfig, + repoSyncPermissions: opts.RepoSyncPermissions, + KubeClient: sharedNt.KubeClient, + Watcher: sharedNt.Watcher, + WatchClient: sharedNt.WatchClient, + IsGKEAutopilot: sharedNt.IsGKEAutopilot, + DefaultWaitTimeout: sharedNt.DefaultWaitTimeout, + DefaultReconcileTimeout: opts.ReconcileTimeout, + DefaultReconcilerAutoscalingStrategy: opts.ReconcilerAutoscalingStrategy, + kubeconfigPath: sharedNt.kubeconfigPath, + ReconcilerPollingPeriod: sharedNt.ReconcilerPollingPeriod, + HydrationPollingPeriod: sharedNt.HydrationPollingPeriod, + RootRepos: sharedNt.RootRepos, + NonRootRepos: sharedNt.NonRootRepos, + MetricsExpectations: sharedNt.MetricsExpectations, + gitPrivateKeyPath: sharedNt.gitPrivateKeyPath, + caCertPath: sharedNt.caCertPath, + Scheme: sharedNt.Scheme, + RemoteRepositories: sharedNt.RemoteRepositories, + WebhookDisabled: sharedNt.WebhookDisabled, + GitProvider: sharedNt.GitProvider, } if opts.SkipConfigSyncInstall { @@ -234,23 +245,24 @@ func FreshTestEnv(t nomostesting.NTB, opts *ntopts.New) *NT { webhookDisabled := false nt := &NT{ - Context: ctx, - T: t, - Logger: logger, - Shell: shell, - ClusterName: opts.ClusterName, - TmpDir: opts.TmpDir, - Config: opts.RESTConfig, - repoSyncPermissions: opts.RepoSyncPermissions, - DefaultReconcileTimeout: opts.ReconcileTimeout, - kubeconfigPath: opts.KubeconfigPath, - RootRepos: make(map[string]*gitproviders.Repository), - NonRootRepos: make(map[types.NamespacedName]*gitproviders.Repository), - MetricsExpectations: testmetrics.NewSyncSetExpectations(t, scheme), - Scheme: scheme, - RemoteRepositories: make(map[types.NamespacedName]*gitproviders.Repository), - WebhookDisabled: &webhookDisabled, - GitProvider: gitproviders.NewGitProvider(t, *e2e.GitProvider, logger), + Context: ctx, + T: t, + Logger: logger, + Shell: shell, + ClusterName: opts.ClusterName, + TmpDir: opts.TmpDir, + Config: opts.RESTConfig, + repoSyncPermissions: opts.RepoSyncPermissions, + DefaultReconcileTimeout: opts.ReconcileTimeout, + DefaultReconcilerAutoscalingStrategy: opts.ReconcilerAutoscalingStrategy, + kubeconfigPath: opts.KubeconfigPath, + RootRepos: make(map[string]*gitproviders.Repository), + NonRootRepos: make(map[types.NamespacedName]*gitproviders.Repository), + MetricsExpectations: testmetrics.NewSyncSetExpectations(t, scheme), + Scheme: scheme, + RemoteRepositories: make(map[types.NamespacedName]*gitproviders.Repository), + WebhookDisabled: &webhookDisabled, + GitProvider: gitproviders.NewGitProvider(t, *e2e.GitProvider, logger), } // TODO: Try speeding up the reconciler and hydration polling. @@ -322,6 +334,19 @@ func FreshTestEnv(t nomostesting.NTB, opts *ntopts.New) *NT { nt.setupConfigConnector() } + // Install VPA on Kind, if requested. + // GKE standard clusters should use the VPA addon. + // GKE autopilot comes with VPA included. + // VPA depends on metrics-server. + if *e2e.VPA && *e2e.TestCluster == e2e.Kind { + if err := nt.installMetricsServer(); err != nil { + nt.T.Fatal(err) + } + if err := nt.installVerticalPrivateAutoscaler(); err != nil { + nt.T.Fatal(err) + } + } + t.Cleanup(func() { DeleteRemoteRepos(nt) }) @@ -487,3 +512,8 @@ func TestDir(t nomostesting.NTB) string { t.Logf("created temporary directory %q", tmpDir) return tmpDir } + +// pointerToStrategy returns a pointer to a ReconcilerAutoscalingStrategy. +func pointerToStrategy(strategy metadata.ReconcilerAutoscalingStrategy) *metadata.ReconcilerAutoscalingStrategy { + return &strategy +} diff --git a/e2e/nomostest/nt.go b/e2e/nomostest/nt.go index 512840915a..4594059619 100644 --- a/e2e/nomostest/nt.go +++ b/e2e/nomostest/nt.go @@ -39,8 +39,11 @@ import ( "kpt.dev/configsync/e2e/nomostest/testshell" "kpt.dev/configsync/e2e/nomostest/testwatcher" "kpt.dev/configsync/pkg/core" + "kpt.dev/configsync/pkg/kinds" + "kpt.dev/configsync/pkg/metadata" "kpt.dev/configsync/pkg/testing/fake" "kpt.dev/configsync/pkg/util" + "kpt.dev/configsync/pkg/util/log" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" @@ -122,6 +125,10 @@ type NT struct { // for object reconciliation. DefaultReconcileTimeout *time.Duration + // DefaultReconcilerAutoscalingStrategy is the default strategy to apply to + // new RootSyncs/RepoSyncs. + DefaultReconcilerAutoscalingStrategy *metadata.ReconcilerAutoscalingStrategy + // RootRepos is the root repositories the cluster is syncing to. // The key is the RootSync name and the value points to the corresponding Repository object. // Each test case was set up with a default RootSync (`root-sync`) installed. @@ -459,6 +466,21 @@ func (nt *NT) PodLogs(namespace, deployment, container string, previousPodLog bo nt.T.Logf("%s\n%s", cmd, out) } +// LogDeploymentPodResources logs the resources of the deployment's pod's containers +func (nt *NT) LogDeploymentPodResources(namespace, deployment string) { + nt.T.Helper() + pod, err := nt.KubeClient.GetDeploymentPod( + deployment, namespace, 30*time.Second) + if err != nil { + nt.T.Error(err) + return + } + nt.T.Logf("Deployment %s/%s pod container resources:", namespace, deployment) + for _, container := range pod.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } +} + // printTestLogs prints test logs and pods information for debugging. func (nt *NT) printTestLogs() { nt.T.Log("[CLEANUP] Printing test logs for current container instances") @@ -487,11 +509,13 @@ func (nt *NT) testLogs(previousPodLog bool) { nt.PodLogs(configmanagement.ControllerNamespace, core.RootReconcilerName(name), reconcilermanager.Reconciler, previousPodLog) //nt.PodLogs(configmanagement.ControllerNamespace, reconcilermanager.NsReconcilerName(ns), reconcilermanager.GitSync, previousPodLog) + nt.LogDeploymentPodResources(configmanagement.ControllerNamespace, core.RootReconcilerName(name)) } for nn := range nt.NonRootRepos { nt.PodLogs(configmanagement.ControllerNamespace, core.NsReconcilerName(nn.Namespace, nn.Name), reconcilermanager.Reconciler, previousPodLog) //nt.PodLogs(configmanagement.ControllerNamespace, reconcilermanager.NsReconcilerName(ns), reconcilermanager.GitSync, previousPodLog) + nt.LogDeploymentPodResources(configmanagement.ControllerNamespace, core.NsReconcilerName(nn.Namespace, nn.Name)) } } @@ -824,6 +848,95 @@ func (nt *NT) setupConfigConnector() { } } +// installMetricsServer applies the metrics-server package. +func (nt *NT) installMetricsServer() error { + nt.T.Log("[SETUP] Installing Metrics Server") + sourcePath := filepath.Join(".", "..", "testdata", "metrics-server") + absPath, err := filepath.Abs(sourcePath) + if err != nil { + return err + } + objs, err := parseManifestDir(absPath) + if err != nil { + return err + } + objs, err = convertToTypedObjects(nt, objs) + if err != nil { + return err + } + return ApplyObjectsAndWait(nt, objs...) +} + +// uninstallMetricsServer deletes the metrics-server package. +func (nt *NT) uninstallMetricsServer() error { + nt.T.Log("[SETUP] Uninstalling Metrics Server") + sourcePath := filepath.Join(".", "..", "testdata", "metrics-server") + absPath, err := filepath.Abs(sourcePath) + if err != nil { + return err + } + objs, err := parseManifestDir(absPath) + if err != nil { + return err + } + objs, err = convertToTypedObjects(nt, objs) + if err != nil { + return err + } + // Delete in reverse order + reverseClientObjectList(objs) + return DeleteObjectsAndWait(nt, objs...) +} + +// installVerticalPrivateAutoscaler applies the vertical-pod-autoscaler package. +func (nt *NT) installVerticalPrivateAutoscaler() error { + nt.T.Log("[SETUP] Installing Vertical Private Autoscaler") + sourcePath := filepath.Join(".", "..", "testdata", "vertical-pod-autoscaler") + absPath, err := filepath.Abs(sourcePath) + if err != nil { + return err + } + objs, err := parseManifestDir(absPath) + if err != nil { + return err + } + objs, err = convertToTypedObjects(nt, objs) + if err != nil { + return err + } + return ApplyObjectsAndWait(nt, objs...) +} + +// uninstallVerticalPrivateAutoscaler deletes the vertical-pod-autoscaler package. +func (nt *NT) uninstallVerticalPrivateAutoscaler() error { + nt.T.Log("[SETUP] Uninstalling Metrics Server") + sourcePath := filepath.Join(".", "..", "testdata", "vertical-pod-autoscaler") + absPath, err := filepath.Abs(sourcePath) + if err != nil { + return err + } + objs, err := parseManifestDir(absPath) + if err != nil { + return err + } + objs, err = convertToTypedObjects(nt, objs) + if err != nil { + return err + } + // Delete in reverse order + reverseClientObjectList(objs) + return DeleteObjectsAndWait(nt, objs...) +} + +func reverseClientObjectList(input []client.Object) { + if len(input) == 0 { + return + } + for i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 { + input[i], input[j] = input[j], input[i] + } +} + // SupportV1Beta1CRDAndRBAC checks if v1beta1 CRD and RBAC resources are supported // in the current testing cluster. // v1beta1 APIs for CRD and RBAC resources are deprecated in K8s 1.22. @@ -956,3 +1069,25 @@ func cloneCloudSourceRepo(nt *NT, repo string) (string, error) { } return cloneDir, nil } + +// ApplyObjectsAndWait applies zero or more objects in serial and waits for reconciliation in parallel. +func ApplyObjectsAndWait(nt *NT, objs ...client.Object) error { + tg := taskgroup.New() + for _, obj := range objs { + nn := client.ObjectKeyFromObject(obj) + gvk, err := kinds.Lookup(obj, nt.Scheme) + if err != nil { + return err + } + nt.T.Logf("[SETUP] applying %s object %s ...", gvk.Kind, nn) + if err := nt.KubeClient.Apply(obj); err != nil { + return errors.Wrapf(err, "unable to apply %s object %s", + gvk.Kind, nn) + } + tg.Go(func() error { + nt.T.Logf("[SETUP] Waiting for apply of %s object %s to reconcile...", gvk.Kind, nn) + return nt.Watcher.WatchForCurrentStatus(gvk, nn.Name, nn.Namespace) + }) + } + return tg.Wait() +} diff --git a/e2e/nomostest/ntopts/multi_repo.go b/e2e/nomostest/ntopts/multi_repo.go index 0b0cdd6b8a..0b5317e21f 100644 --- a/e2e/nomostest/ntopts/multi_repo.go +++ b/e2e/nomostest/ntopts/multi_repo.go @@ -19,6 +19,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/types" + "kpt.dev/configsync/pkg/metadata" ) // RepoOpts defines options for a Repository. @@ -51,6 +52,10 @@ type MultiRepo struct { // RepoSyncPermissions will grant a list of PolicyRules to NS reconcilers RepoSyncPermissions []rbacv1.PolicyRule + + // ReconcilerAutoscalingStrategy sets the reconciler-autoscaling-strategy + // annotation on each R*Sync. + ReconcilerAutoscalingStrategy *metadata.ReconcilerAutoscalingStrategy } // NamespaceRepo tells the test case that a Namespace Repo should be configured @@ -121,3 +126,18 @@ func RepoSyncPermissions(policy ...rbacv1.PolicyRule) Opt { opt.RepoSyncPermissions = append(opt.RepoSyncPermissions, policy...) } } + +// WithReconcilerAutoscalingStrategy specifies the ReconcilerAutoscalingStrategy +// to use on all RootSyncs and RepoSyncs by default. +func WithReconcilerAutoscalingStrategy(strategy metadata.ReconcilerAutoscalingStrategy) func(opt *New) { + return func(opt *New) { + strategyCopy := strategy + opt.ReconcilerAutoscalingStrategy = &strategyCopy + } +} + +// WithoutReconcilerAutoscalingStrategy removes the ReconcilerAutoscalingStrategy +// on all RootSyncs and RepoSyncs by default. +func WithoutReconcilerAutoscalingStrategy(opt *New) { + opt.ReconcilerAutoscalingStrategy = nil +} diff --git a/e2e/nomostest/ntopts/test_type.go b/e2e/nomostest/ntopts/test_type.go index d9e396d6fb..696208c355 100644 --- a/e2e/nomostest/ntopts/test_type.go +++ b/e2e/nomostest/ntopts/test_type.go @@ -25,6 +25,9 @@ type TestType struct { // KCCTest specifies the test is for KCC resources. KCCTest bool + // VPATest specifies the test requires VPA to be installed. + VPATest bool + // GCENodeTest specifies the test is for verifying the gcenode auth type. // It requires a GKE cluster with workload identity disabled. GCENodeTest bool @@ -45,6 +48,11 @@ func KCCTest(opt *New) { opt.KCCTest = true } +// VPATest specifies the test requires VPA to be installed. +func VPATest(opt *New) { + opt.VPATest = true +} + // GCENodeTest specifies the test is for verifying the gcenode auth type. func GCENodeTest(opt *New) { opt.GCENodeTest = true diff --git a/e2e/nomostest/testpredicates/predicates.go b/e2e/nomostest/testpredicates/predicates.go index b7b4f4de27..703ff70f2b 100644 --- a/e2e/nomostest/testpredicates/predicates.go +++ b/e2e/nomostest/testpredicates/predicates.go @@ -26,9 +26,9 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "kpt.dev/configsync/e2e/nomostest/retry" "kpt.dev/configsync/e2e/nomostest/testkubeclient" "kpt.dev/configsync/e2e/nomostest/testutils" @@ -250,36 +250,98 @@ func HasExactlyImage(containerName, expectImageName, expectImageTag, expectImage } } -// HasCorrectResourceRequestsLimits verify a root/namespace reconciler container has the correct resource requests and limits. -func HasCorrectResourceRequestsLimits(containerName string, cpuRequest, cpuLimit, memoryRequest, memoryLimit resource.Quantity) Predicate { +// DeploymentContainerResourcesEqual verifies a reconciler deployment container +// has the expected resource requests and limits. +func DeploymentContainerResourcesEqual(expectedSpec v1beta1.ContainerResourcesSpec) Predicate { return func(o client.Object) error { if o == nil { return ErrObjectNotFound } + if uObj, ok := o.(*unstructured.Unstructured); ok { + rObj, err := kinds.ToTypedObject(uObj, core.Scheme) + if err != nil { + return err + } + o, err = kinds.ObjectAsClientObject(rObj) + if err != nil { + return err + } + } dep, ok := o.(*appsv1.Deployment) if !ok { return WrongTypeErr(o, &appsv1.Deployment{}) } - container := ContainerByName(dep, containerName) - if container == nil { - return fmt.Errorf("expected container not found: %s", containerName) - } - if !equality.Semantic.DeepEqual(container.Resources.Requests[corev1.ResourceCPU], cpuRequest) { - return errors.Errorf("The CPU request of the %q container should be %v, got %v", container.Name, cpuRequest, container.Resources.Requests[corev1.ResourceCPU]) + container := ContainerByName(dep, expectedSpec.ContainerName) + return validateContainerResources(container, expectedSpec) + } +} + +// DeploymentContainerResourcesAllEqual verifies all reconciler deployment +// containers have the expected resource requests and limits. +func DeploymentContainerResourcesAllEqual(expectedByName map[string]v1beta1.ContainerResourcesSpec) Predicate { + return func(o client.Object) error { + if o == nil { + return ErrObjectNotFound } - if !equality.Semantic.DeepEqual(container.Resources.Limits[corev1.ResourceCPU], cpuLimit) { - return errors.Errorf("The CPU limit of the %q container should be %v, got %v", container.Name, cpuLimit, container.Resources.Limits[corev1.ResourceCPU]) + if uObj, ok := o.(*unstructured.Unstructured); ok { + rObj, err := kinds.ToTypedObject(uObj, core.Scheme) + if err != nil { + return err + } + o, err = kinds.ObjectAsClientObject(rObj) + if err != nil { + return err + } } - if !equality.Semantic.DeepEqual(container.Resources.Requests[corev1.ResourceMemory], memoryRequest) { - return errors.Errorf("The memory request of the %q container should be %v, got %v", container.Name, memoryRequest, container.Resources.Requests[corev1.ResourceMemory]) + d, ok := o.(*appsv1.Deployment) + if !ok { + return WrongTypeErr(d, &appsv1.Deployment{}) } - if !equality.Semantic.DeepEqual(container.Resources.Limits[corev1.ResourceMemory], memoryLimit) { - return errors.Errorf("The memory limit of the %q container should be %v, got %v", container.Name, memoryLimit, container.Resources.Limits[corev1.ResourceMemory]) + for _, container := range d.Spec.Template.Spec.Containers { + expectedSpec, ok := expectedByName[container.Name] + if !ok { + return fmt.Errorf("found unexpected container: %q", + container.Name) + } + if err := validateContainerResources(&container, expectedSpec); err != nil { + return err + } } return nil } } +func validateContainerResources(container *corev1.Container, expectedSpec v1beta1.ContainerResourcesSpec) error { + if container == nil { + return fmt.Errorf("expected container not found: %s", expectedSpec.ContainerName) + } + expected := expectedSpec.CPURequest + found := container.Resources.Requests.Cpu() + if found.Cmp(expected) != 0 { + return fmt.Errorf("expected CPU request of the %q container: %s, got: %s", + container.Name, &expected, found) + } + expected = expectedSpec.MemoryRequest + found = container.Resources.Requests.Memory() + if found.Cmp(expected) != 0 { + return fmt.Errorf("expected Memory request of the %q container: %s, got: %s", + container.Name, &expected, found) + } + expected = expectedSpec.CPULimit + found = container.Resources.Limits.Cpu() + if found.Cmp(expected) != 0 { + return fmt.Errorf("expected CPU limit of the %q container: %s, got: %s", + container.Name, &expected, found) + } + expected = expectedSpec.MemoryLimit + found = container.Resources.Limits.Memory() + if found.Cmp(expected) != 0 { + return fmt.Errorf("expected Memory limit of the %q container: %s, got: %s", + container.Name, &expected, found) + } + return nil +} + // NotPendingDeletion ensures o is not pending deletion. // // Check this when the object could be scheduled for deletion, to avoid flaky @@ -643,6 +705,20 @@ func GenerationNotEquals(generation int64) Predicate { } } +// UIDNotEquals checks that the object's UID is NOT the specified value. +func UIDNotEquals(invalidUID types.UID) Predicate { + return func(obj client.Object) error { + if obj == nil { + return ErrObjectNotFound + } + foundUID := obj.GetUID() + if foundUID == invalidUID { + return fmt.Errorf("expected UID to not equal %v, but found %v", invalidUID, foundUID) + } + return nil + } +} + // StatusEquals checks that the object's computed status matches the specified // status. func StatusEquals(scheme *runtime.Scheme, expected status.Status) Predicate { @@ -1035,3 +1111,60 @@ func validateRootSyncCondition(actual *v1beta1.RootSyncCondition, expected *v1be } return nil } + +// ReconcilerAutoscalingStrategyEquals checks that the object's reconciler +// autoscaling strategy matches the specified strategy. +func ReconcilerAutoscalingStrategyEquals(expected metadata.ReconcilerAutoscalingStrategy) Predicate { + return func(obj client.Object) error { + if obj == nil { + return ErrObjectNotFound + } + found := core.GetAnnotation(obj, metadata.ReconcilerAutoscalingStrategyAnnotationKey) + + if found != string(expected) { + return errors.Errorf("expected %s to have reconciler-autoscaling-strategy %q, but got %q", + kinds.ObjectSummary(obj), expected, found) + } + return nil + } +} + +// MissingReconcilerAutoscalingStrategy checks that the object's reconciler +// autoscaling strategy is not specified. +func MissingReconcilerAutoscalingStrategy() Predicate { + return func(obj client.Object) error { + if obj == nil { + return ErrObjectNotFound + } + annotations := obj.GetAnnotations() + if len(annotations) == 0 { + return nil + } + _, found := annotations[metadata.ReconcilerAutoscalingStrategyAnnotationKey] + if found { + return errors.Errorf("expected %s to not have a specified reconciler-autoscaling-strategy, but got %v", + kinds.ObjectSummary(obj), found) + } + return nil + } +} + +// RootSyncSpecOverrideEquals checks that the RootSync's spec.override matches +// the specified OverrideSpec. +func RootSyncSpecOverrideEquals(expected *v1beta1.OverrideSpec) Predicate { + return func(obj client.Object) error { + if obj == nil { + return ErrObjectNotFound + } + rs, ok := obj.(*v1beta1.RootSync) + if !ok { + return WrongTypeErr(obj, &v1beta1.RootSync{}) + } + found := rs.Spec.Override + if !equality.Semantic.DeepEqual(found, expected) { + return errors.Errorf("expected %s to have spec.override: %s, but got %s", + kinds.ObjectSummary(obj), log.AsJSON(expected), log.AsJSON(found)) + } + return nil + } +} diff --git a/e2e/testcases/helm_sync_test.go b/e2e/testcases/helm_sync_test.go index 7c2528f057..00b7d8de7d 100644 --- a/e2e/testcases/helm_sync_test.go +++ b/e2e/testcases/helm_sync_test.go @@ -83,11 +83,13 @@ func TestPublicHelm(t *testing.T) { } if err := nt.Validate("my-wordpress", "wordpress", &appsv1.Deployment{}, testpredicates.DeploymentContainerPullPolicyEquals("wordpress", "Always"), - testpredicates.HasCorrectResourceRequestsLimits("wordpress", - resource.MustParse(expectedCPURequest), - resource.MustParse(expectedCPULimit), - resource.MustParse(expectedMemoryRequest), - resource.MustParse(expectedMemoryLimit)), + testpredicates.DeploymentContainerResourcesEqual(v1beta1.ContainerResourcesSpec{ + ContainerName: "wordpress", + CPURequest: resource.MustParse(expectedCPURequest), + CPULimit: resource.MustParse(expectedCPULimit), + MemoryRequest: resource.MustParse(expectedMemoryRequest), + MemoryLimit: resource.MustParse(expectedMemoryLimit), + }), testpredicates.HasExactlyImage("wordpress", "bitnami/wordpress", "", "sha256:362cb642db481ebf6f14eb0244fbfb17d531a84ecfe099cd3bba6810db56694e"), testpredicates.DeploymentHasEnvVar("wordpress", "WORDPRESS_USERNAME", "test-user"), testpredicates.DeploymentHasEnvVar("wordpress", "WORDPRESS_EMAIL", "test-user@example.com"), @@ -276,11 +278,13 @@ service: } if err := nt.Validate("my-wordpress", "wordpress", &appsv1.Deployment{}, testpredicates.DeploymentContainerPullPolicyEquals("wordpress", "Always"), - testpredicates.HasCorrectResourceRequestsLimits("wordpress", - resource.MustParse(expectedCPURequest), - resource.MustParse(expectedCPULimit), - resource.MustParse(expectedMemoryRequest), - resource.MustParse(expectedMemoryLimit)), + testpredicates.DeploymentContainerResourcesEqual(v1beta1.ContainerResourcesSpec{ + ContainerName: "wordpress", + CPURequest: resource.MustParse(expectedCPURequest), + CPULimit: resource.MustParse(expectedCPULimit), + MemoryRequest: resource.MustParse(expectedMemoryRequest), + MemoryLimit: resource.MustParse(expectedMemoryLimit), + }), testpredicates.HasExactlyImage("wordpress", "bitnami/wordpress", "", "sha256:362cb642db481ebf6f14eb0244fbfb17d531a84ecfe099cd3bba6810db56694e"), testpredicates.DeploymentHasEnvVar("wordpress", "WORDPRESS_USERNAME", "test-user-1"), testpredicates.DeploymentHasEnvVar("wordpress", "WORDPRESS_EMAIL", "test-user@example.com"), diff --git a/e2e/testcases/override_resource_limits_test.go b/e2e/testcases/override_resource_limits_test.go index 411424ca29..a28b39fc03 100644 --- a/e2e/testcases/override_resource_limits_test.go +++ b/e2e/testcases/override_resource_limits_test.go @@ -15,14 +15,13 @@ package e2e import ( - "io/ioutil" "testing" "time" + "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" + "kpt.dev/configsync/e2e" "kpt.dev/configsync/e2e/nomostest" "kpt.dev/configsync/e2e/nomostest/ntopts" nomostesting "kpt.dev/configsync/e2e/nomostest/testing" @@ -35,84 +34,43 @@ import ( "kpt.dev/configsync/pkg/core" "kpt.dev/configsync/pkg/kinds" "kpt.dev/configsync/pkg/reconcilermanager" + "kpt.dev/configsync/pkg/reconcilermanager/controllers" "kpt.dev/configsync/pkg/testing/fake" - "sigs.k8s.io/yaml" ) -func defaultResourceRequestsLimits(nt *nomostest.NT) (reconcilerRequests, reconcilerLimits, gitSyncRequests, gitSyncLimits corev1.ResourceList) { - path := "../../manifests/templates/reconciler-manager-configmap.yaml" - contents, err := ioutil.ReadFile(path) - if err != nil { - nt.T.Fatalf("Failed to read file (%s): %v", path, err) - } - - var cm corev1.ConfigMap - if err := yaml.Unmarshal(contents, &cm); err != nil { - nt.T.Fatalf("Failed to parse the ConfigMap object in %s: %v", path, err) - } - - key := "deployment.yaml" - deployContents, ok := cm.Data[key] - if !ok { - nt.T.Fatalf("The `data` field of the ConfigMap object in %s does not include the %q key", path, key) - } - - var deploy appsv1.Deployment - if err := yaml.Unmarshal([]byte(deployContents), &deploy); err != nil { - nt.T.Fatalf("Failed to parse the Deployment object in the `data` field of the ConfigMap object in %s: %v", path, err) - } - - for _, container := range deploy.Spec.Template.Spec.Containers { - if container.Name == reconcilermanager.Reconciler || container.Name == reconcilermanager.GitSync { - if container.Resources.Requests.Cpu().IsZero() || container.Resources.Requests.Memory().IsZero() { - nt.T.Fatalf("The %s container in %s should define CPU/memory requests", container.Name, path) - } - } - if container.Name == reconcilermanager.Reconciler { - reconcilerRequests = container.Resources.Requests - reconcilerLimits = container.Resources.Limits - } - if container.Name == reconcilermanager.GitSync { - gitSyncRequests = container.Resources.Requests - gitSyncLimits = container.Resources.Limits - } - } - return -} - func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { nt := nomostest.New(t, nomostesting.OverrideAPI, ntopts.SkipAutopilotCluster, + // Disable autoscaling to make resources predictable + ntopts.WithoutReconcilerAutoscalingStrategy, ntopts.NamespaceRepo(backendNamespace, configsync.RepoSyncName), ntopts.NamespaceRepo(frontendNamespace, configsync.RepoSyncName)) - if err := nt.WatchForAllSyncs(); err != nil { - nt.T.Fatal(err) - } - rootReconcilerNN := types.NamespacedName{ - Name: nomostest.DefaultRootReconcilerName, - Namespace: v1.NSConfigManagementSystem, - } - backendReconcilerNN := types.NamespacedName{ - Name: core.NsReconcilerName(backendNamespace, configsync.RepoSyncName), - Namespace: v1.NSConfigManagementSystem, - } - frontendReconcilerNN := types.NamespacedName{ - Name: core.NsReconcilerName(frontendNamespace, configsync.RepoSyncName), - Namespace: v1.NSConfigManagementSystem, + rootSyncNN := nomostest.RootSyncNN(configsync.RootSyncName) + rootReconcilerNN := core.RootReconcilerObjectKey(rootSyncNN.Name) + backendReconcilerNN := core.NsReconcilerObjectKey(backendNamespace, configsync.RepoSyncName) + frontendReconcilerNN := core.NsReconcilerObjectKey(frontendNamespace, configsync.RepoSyncName) + + // Get RootSync + rootSyncObj := &v1alpha1.RootSync{} + err := nt.Validate(rootSyncNN.Name, rootSyncNN.Namespace, rootSyncObj) + if err != nil { + nt.T.Fatal(err) } // Get the default CPU/memory requests and limits of the reconciler container and the git-sync container - reconcilerResourceRequests, reconcilerResourceLimits, gitSyncResourceRequests, gitSyncResourceLimits := defaultResourceRequestsLimits(nt) - defaultReconcilerCPURequest, defaultReconcilerMemRequest := reconcilerResourceRequests[corev1.ResourceCPU], reconcilerResourceRequests[corev1.ResourceMemory] - defaultReconcilerCPULimits, defaultReconcilerMemLimits := reconcilerResourceLimits[corev1.ResourceCPU], reconcilerResourceLimits[corev1.ResourceMemory] - defaultGitSyncCPURequest, defaultGitSyncMemRequest := gitSyncResourceRequests[corev1.ResourceCPU], gitSyncResourceRequests[corev1.ResourceMemory] - defaultGitSyncCPULimits, defaultGitSyncMemLimits := gitSyncResourceLimits[corev1.ResourceCPU], gitSyncResourceLimits[corev1.ResourceMemory] + var defaultResources map[string]v1beta1.ContainerResourcesSpec + if *e2e.VPA && nomostest.IsReconcilerAutoscalingEnabled(rootSyncObj) { + defaultResources = controllers.ReconcilerContainerResourceAutoscaleDefaults() + } else { + defaultResources = controllers.ReconcilerContainerResourceDefaults() + } // Verify root-reconciler uses the default resource requests and limits rootReconcilerDeployment := &appsv1.Deployment{} - err := nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, rootReconcilerDeployment, - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + err = nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, rootReconcilerDeployment, + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -120,8 +78,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify ns-reconciler-backend uses the default resource requests and limits nsReconcilerBackendDeployment := &appsv1.Deployment{} err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, nsReconcilerBackendDeployment, - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -129,8 +88,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify ns-reconciler-frontend uses the default resource requests and limits nsReconcilerFrontendDeployment := &appsv1.Deployment{} err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, nsReconcilerFrontendDeployment, - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -151,15 +111,24 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { nt.MustMergePatch(rootSync, `{"spec": {"override": {"resources": [{"containerName": "reconciler", "cpuRequest": "500m", "cpuLimit": "800m", "memoryRequest": "400Mi", "memoryLimit": "411Mi"}]}}}`) rootReconcilerDeploymentGeneration++ + updatedRootReconcilerResources := v1beta1.ContainerResourcesSpec{ + ContainerName: reconcilermanager.Reconciler, + CPURequest: resource.MustParse("500m"), + CPULimit: resource.MustParse("800m"), + MemoryRequest: resource.MustParse("400Mi"), + MemoryLimit: resource.MustParse("411Mi"), + } + // Verify the reconciler container of root-reconciler uses the new resource request and limits, and the git-sync container uses the default resource requests and limits. err = nt.Watcher.WatchObject(kinds.Deployment(), rootReconcilerNN.Name, rootReconcilerNN.Namespace, []testpredicates.Predicate{ testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("800m"), resource.MustParse("400Mi"), resource.MustParse("411Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits), + testpredicates.DeploymentContainerResourcesEqual(updatedRootReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), }, - testwatcher.WatchTimeout(30*time.Second)) + testwatcher.WatchTimeout(30*time.Second), + ) if err != nil { nt.T.Fatal(err) } @@ -167,8 +136,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify ns-reconciler-backend uses the default resource requests and limits err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -176,8 +146,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify ns-reconciler-frontend uses the default resource requests and limits err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -234,26 +205,39 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify the resource requests and limits of root-reconciler are not affected by the resource changes of ns-reconciler-backend and ns-reconciler-fronend err = nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("800m"), resource.MustParse("400Mi"), resource.MustParse("411Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(updatedRootReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } + updatedBackendReconcilerResources := v1beta1.ContainerResourcesSpec{} + require.NoError(nt.T, v1alpha1.Convert_v1alpha1_ContainerResourcesSpec_To_v1beta1_ContainerResourcesSpec(&repoSyncBackend.Spec.Override.Resources[0], &updatedBackendReconcilerResources, nil)) + updatedBackendGitSyncResources := v1beta1.ContainerResourcesSpec{} + require.NoError(nt.T, v1alpha1.Convert_v1alpha1_ContainerResourcesSpec_To_v1beta1_ContainerResourcesSpec(&repoSyncBackend.Spec.Override.Resources[1], &updatedBackendGitSyncResources, nil)) + // Verify ns-reconciler-backend uses the new resource requests and limits err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("1000m"), resource.MustParse("500Mi"), resource.MustParse("555Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("600m"), resource.MustParse("1"), resource.MustParse("600Mi"), resource.MustParse("666Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedBackendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedBackendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } + updatedFrontendReconcilerResources := v1beta1.ContainerResourcesSpec{} + require.NoError(nt.T, v1alpha1.Convert_v1alpha1_ContainerResourcesSpec_To_v1beta1_ContainerResourcesSpec(&repoSyncFrontend.Spec.Override.Resources[0], &updatedFrontendReconcilerResources, nil)) + updatedFrontendGitSyncResources := v1beta1.ContainerResourcesSpec{} + require.NoError(nt.T, v1alpha1.Convert_v1alpha1_ContainerResourcesSpec_To_v1beta1_ContainerResourcesSpec(&repoSyncFrontend.Spec.Override.Resources[1], &updatedFrontendGitSyncResources, nil)) + // Verify ns-reconciler-frontend uses the new resource requests and limits err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -262,15 +246,24 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { nt.MustMergePatch(rootSync, `{"spec": {"override": {"resources": [{"containerName": "git-sync", "cpuLimit": "333m"}]}}}`) rootReconcilerDeploymentGeneration++ + updatedRootReconcilerGitSyncResources := v1beta1.ContainerResourcesSpec{ + ContainerName: reconcilermanager.GitSync, + CPURequest: defaultResources[reconcilermanager.GitSync].CPURequest, + CPULimit: resource.MustParse("333m"), + MemoryRequest: defaultResources[reconcilermanager.GitSync].MemoryRequest, + MemoryLimit: defaultResources[reconcilermanager.GitSync].MemoryLimit, + } + // Verify the reconciler container root-reconciler uses the default resource requests and limits, and the git-sync container uses the new resource limits. err = nt.Watcher.WatchObject(kinds.Deployment(), rootReconcilerNN.Name, rootReconcilerNN.Namespace, []testpredicates.Predicate{ testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, resource.MustParse("333m"), defaultGitSyncMemRequest, defaultGitSyncMemLimits), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(updatedRootReconcilerGitSyncResources), }, - testwatcher.WatchTimeout(30*time.Second)) + testwatcher.WatchTimeout(30*time.Second), + ) if err != nil { nt.T.Fatal(err) } @@ -278,8 +271,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify the resource limits of ns-reconciler-backend are not affected by the resource limit change of root-reconciler err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("1000m"), resource.MustParse("500Mi"), resource.MustParse("555Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("600m"), resource.MustParse("1"), resource.MustParse("600Mi"), resource.MustParse("666Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedBackendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedBackendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -287,8 +281,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify the resource limits of ns-reconciler-frontend are not affected by the resource limit change of root-reconciler err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -302,8 +297,8 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { rootReconcilerNN.Name, rootReconcilerNN.Namespace, []testpredicates.Predicate{ testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), }, testwatcher.WatchTimeout(30*time.Second)) if err != nil { @@ -313,8 +308,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify the resource requests and limits of ns-reconciler-backend are not affected by the resource limit change of root-reconciler err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("1000m"), resource.MustParse("500Mi"), resource.MustParse("555Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("600m"), resource.MustParse("1"), resource.MustParse("600Mi"), resource.MustParse("666Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedBackendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedBackendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -322,8 +318,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify the resource requests and limits of ns-reconciler-frontend are not affected by the resource limit change of root-reconciler err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -340,8 +337,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify ns-reconciler-backend uses the default resource requests and limits err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -349,8 +347,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify root-reconciler uses the default resource requests and limits err = nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -358,8 +357,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify the resource requests and limits of ns-reconciler-frontend are not affected by the resource limit change of ns-reconciler-backend err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -376,8 +376,9 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { // Verify ns-reconciler-frontend uses the default resource requests and limits err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -385,37 +386,37 @@ func TestOverrideReconcilerResourcesV1Alpha1(t *testing.T) { func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { nt := nomostest.New(t, nomostesting.OverrideAPI, ntopts.SkipAutopilotCluster, + // Disable autoscaling to make resources predictable + ntopts.WithoutReconcilerAutoscalingStrategy, ntopts.NamespaceRepo(backendNamespace, configsync.RepoSyncName), ntopts.NamespaceRepo(frontendNamespace, configsync.RepoSyncName)) - if err := nt.WatchForAllSyncs(); err != nil { - nt.T.Fatal(err) - } - rootReconcilerNN := types.NamespacedName{ - Name: nomostest.DefaultRootReconcilerName, - Namespace: v1.NSConfigManagementSystem, - } - backendReconcilerNN := types.NamespacedName{ - Name: core.NsReconcilerName(backendNamespace, configsync.RepoSyncName), - Namespace: v1.NSConfigManagementSystem, - } - frontendReconcilerNN := types.NamespacedName{ - Name: core.NsReconcilerName(frontendNamespace, configsync.RepoSyncName), - Namespace: v1.NSConfigManagementSystem, + rootSyncNN := nomostest.RootSyncNN(configsync.RootSyncName) + rootReconcilerNN := core.RootReconcilerObjectKey(rootSyncNN.Name) + backendReconcilerNN := core.NsReconcilerObjectKey(backendNamespace, configsync.RepoSyncName) + frontendReconcilerNN := core.NsReconcilerObjectKey(frontendNamespace, configsync.RepoSyncName) + + // Get RootSync + rootSyncObj := &v1beta1.RootSync{} + err := nt.Validate(rootSyncNN.Name, rootSyncNN.Namespace, rootSyncObj) + if err != nil { + nt.T.Fatal(err) } // Get the default CPU/memory requests and limits of the reconciler container and the git-sync container - reconcilerResourceRequests, reconcilerResourceLimits, gitSyncResourceRequests, gitSyncResourceLimits := defaultResourceRequestsLimits(nt) - defaultReconcilerCPURequest, defaultReconcilerMemRequest := reconcilerResourceRequests[corev1.ResourceCPU], reconcilerResourceRequests[corev1.ResourceMemory] - defaultReconcilerCPULimits, defaultReconcilerMemLimits := reconcilerResourceLimits[corev1.ResourceCPU], reconcilerResourceLimits[corev1.ResourceMemory] - defaultGitSyncCPURequest, defaultGitSyncMemRequest := gitSyncResourceRequests[corev1.ResourceCPU], gitSyncResourceRequests[corev1.ResourceMemory] - defaultGitSyncCPULimits, defaultGitSyncMemLimits := gitSyncResourceLimits[corev1.ResourceCPU], gitSyncResourceLimits[corev1.ResourceMemory] + var defaultResources map[string]v1beta1.ContainerResourcesSpec + if *e2e.VPA && nomostest.IsReconcilerAutoscalingEnabled(rootSyncObj) { + defaultResources = controllers.ReconcilerContainerResourceAutoscaleDefaults() + } else { + defaultResources = controllers.ReconcilerContainerResourceDefaults() + } // Verify root-reconciler uses the default resource requests and limits rootReconcilerDeployment := &appsv1.Deployment{} - err := nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, rootReconcilerDeployment, - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + err = nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, rootReconcilerDeployment, + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -423,8 +424,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify ns-reconciler-backend uses the default resource requests and limits nsReconcilerBackendDeployment := &appsv1.Deployment{} err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, nsReconcilerBackendDeployment, - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -432,8 +434,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify ns-reconciler-frontend uses the default resource requests and limits nsReconcilerFrontendDeployment := &appsv1.Deployment{} err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, nsReconcilerFrontendDeployment, - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -454,13 +457,21 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { nt.MustMergePatch(rootSync, `{"spec": {"override": {"resources": [{"containerName": "reconciler", "cpuRequest": "500m", "cpuLimit": "800m", "memoryRequest": "400Mi", "memoryLimit": "411Mi"}]}}}`) rootReconcilerDeploymentGeneration++ + updatedRootReconcilerResources := v1beta1.ContainerResourcesSpec{ + ContainerName: reconcilermanager.Reconciler, + CPURequest: resource.MustParse("500m"), + CPULimit: resource.MustParse("800m"), + MemoryRequest: resource.MustParse("400Mi"), + MemoryLimit: resource.MustParse("411Mi"), + } + // Verify the reconciler container of root-reconciler uses the new resource request and limits, and the git-sync container uses the default resource requests and limits. err = nt.Watcher.WatchObject(kinds.Deployment(), rootReconcilerNN.Name, rootReconcilerNN.Namespace, []testpredicates.Predicate{ testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("800m"), resource.MustParse("400Mi"), resource.MustParse("411Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits), + testpredicates.DeploymentContainerResourcesEqual(updatedRootReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), }, testwatcher.WatchTimeout(30*time.Second)) if err != nil { @@ -470,8 +481,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify ns-reconciler-backend uses the default resource requests and limits err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -479,8 +491,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify ns-reconciler-frontend uses the default resource requests and limits err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -537,26 +550,35 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify the resource requests and limits of root-reconciler are not affected by the resource changes of ns-reconciler-backend and ns-reconciler-fronend err = nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("800m"), resource.MustParse("400Mi"), resource.MustParse("411Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(updatedRootReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } + updatedBackendReconcilerResources := repoSyncBackend.Spec.Override.Resources[0] + updatedBackendGitSyncResources := repoSyncBackend.Spec.Override.Resources[1] + // Verify ns-reconciler-backend uses the new resource requests and limits err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("1000m"), resource.MustParse("500Mi"), resource.MustParse("555Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("600m"), resource.MustParse("1"), resource.MustParse("600Mi"), resource.MustParse("666Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedBackendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedBackendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } + updatedFrontendReconcilerResources := repoSyncFrontend.Spec.Override.Resources[0] + updatedFrontendGitSyncResources := repoSyncFrontend.Spec.Override.Resources[1] + // Verify ns-reconciler-frontend uses the new resource requests and limits err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -565,15 +587,24 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { nt.MustMergePatch(rootSync, `{"spec": {"override": {"resources": [{"containerName": "git-sync", "cpuLimit": "333m"}]}}}`) rootReconcilerDeploymentGeneration++ + updatedRootReconcilerGitSyncResources := v1beta1.ContainerResourcesSpec{ + ContainerName: reconcilermanager.GitSync, + CPURequest: defaultResources[reconcilermanager.GitSync].CPURequest, + CPULimit: resource.MustParse("333m"), + MemoryRequest: defaultResources[reconcilermanager.GitSync].MemoryRequest, + MemoryLimit: defaultResources[reconcilermanager.GitSync].MemoryLimit, + } + // Verify the reconciler container root-reconciler uses the default resource requests and limits, and the git-sync container uses the new resource limits. err = nt.Watcher.WatchObject(kinds.Deployment(), rootReconcilerNN.Name, rootReconcilerNN.Namespace, []testpredicates.Predicate{ testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, resource.MustParse("333m"), defaultGitSyncMemRequest, defaultGitSyncMemLimits), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(updatedRootReconcilerGitSyncResources), }, - testwatcher.WatchTimeout(30*time.Second)) + testwatcher.WatchTimeout(30*time.Second), + ) if err != nil { nt.T.Fatal(err) } @@ -581,8 +612,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify the resource limits of ns-reconciler-backend are not affected by the resource limit change of root-reconciler err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("1000m"), resource.MustParse("500Mi"), resource.MustParse("555Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("600m"), resource.MustParse("1"), resource.MustParse("600Mi"), resource.MustParse("666Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedBackendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedBackendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -590,8 +622,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify the resource limits of ns-reconciler-frontend are not affected by the resource limit change of root-reconciler err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -605,8 +638,8 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { rootReconcilerNN.Name, rootReconcilerNN.Namespace, []testpredicates.Predicate{ testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), }, testwatcher.WatchTimeout(30*time.Second)) if err != nil { @@ -616,8 +649,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify the resource requests and limits of ns-reconciler-backend are not affected by the resource limit change of root-reconciler err = nt.Validate(backendReconcilerNN.Name, backendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("500m"), resource.MustParse("1000m"), resource.MustParse("500Mi"), resource.MustParse("555Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("600m"), resource.MustParse("1"), resource.MustParse("600Mi"), resource.MustParse("666Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedBackendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedBackendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -625,8 +659,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify the resource requests and limits of ns-reconciler-frontend are not affected by the resource limit change of root-reconciler err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -643,8 +678,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify ns-reconciler-backend uses the default resource requests and limits err = nt.Validate(core.NsReconcilerName(backendNamespace, configsync.RepoSyncName), v1.NSConfigManagementSystem, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerBackendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -652,8 +688,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify root-reconciler uses the default resource requests and limits err = nt.Validate(rootReconcilerNN.Name, rootReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(rootReconcilerDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } @@ -661,8 +698,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify the resource requests and limits of ns-reconciler-frontend are not affected by the resource limit change of ns-reconciler-backend err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, resource.MustParse("511m"), resource.MustParse("2000m"), resource.MustParse("511Mi"), resource.MustParse("544Mi")), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, resource.MustParse("611m"), resource.MustParse("2"), resource.MustParse("611Mi"), resource.MustParse("644Mi"))) + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendReconcilerResources), + testpredicates.DeploymentContainerResourcesEqual(updatedFrontendGitSyncResources), + ) if err != nil { nt.T.Fatal(err) } @@ -679,8 +717,9 @@ func TestOverrideReconcilerResourcesV1Beta1(t *testing.T) { // Verify ns-reconciler-frontend uses the default resource requests and limits err = nt.Validate(frontendReconcilerNN.Name, frontendReconcilerNN.Namespace, &appsv1.Deployment{}, testpredicates.GenerationEquals(nsReconcilerFrontendDeploymentGeneration), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.Reconciler, defaultReconcilerCPURequest, defaultReconcilerCPULimits, defaultReconcilerMemRequest, defaultReconcilerMemLimits), - testpredicates.HasCorrectResourceRequestsLimits(reconcilermanager.GitSync, defaultGitSyncCPURequest, defaultGitSyncCPULimits, defaultGitSyncMemRequest, defaultGitSyncMemLimits)) + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.Reconciler]), + testpredicates.DeploymentContainerResourcesEqual(defaultResources[reconcilermanager.GitSync]), + ) if err != nil { nt.T.Fatal(err) } diff --git a/e2e/testcases/reconciler_manager_test.go b/e2e/testcases/reconciler_manager_test.go index 1a7ea20f45..e22dd0e70c 100644 --- a/e2e/testcases/reconciler_manager_test.go +++ b/e2e/testcases/reconciler_manager_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" jserializer "k8s.io/apimachinery/pkg/runtime/serializer/json" @@ -44,32 +45,14 @@ import ( "kpt.dev/configsync/pkg/reconcilermanager" "kpt.dev/configsync/pkg/reconcilermanager/controllers" "kpt.dev/configsync/pkg/testing/fake" - "kpt.dev/configsync/pkg/util" + "kpt.dev/configsync/pkg/util/log" kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" ) -const ( - initialFirstCPU = 10 - initialFirstMemory = 100 - initialTotalCPU = 80 - initialAdjustedTotalCPU = 250 - initialTotalMemory = 600 - autopilotCPUIncrements = 250 - memoryMB = 1048576 - expectedFirstContainerCPU1 = 180 - expectedFirstContainerCPU2 = 430 - expectedFirstContainerMemory1 = 100 - expectedFirstContainerMemory2 = 200 - updatedFirstContainerCPULimit = "500m" - updatedFirstContainerMemoryLimit = "500Mi" - expectedFirstContainerMemoryLimit1 = "100Mi" - expectedFirstContainerCPULimit1 = "180m" -) - -// TestReconcilerManagerNormalTeardown validates that when a RootSync or RepoSync is -// deleted, the reconciler-manager finalizer handles deletion of the reconciler -// and its dependencies managed by the reconciler-manager. +// TestReconcilerManagerNormalTeardown validates that when a RootSync or +// RepoSync is deleted, the reconciler-manager finalizer handles deletion of the +// reconciler and its dependencies managed by the reconciler-manager. func TestReconcilerManagerNormalTeardown(t *testing.T) { testNamespace := "teardown" nt := nomostest.New(t, nomostesting.ACMController, @@ -366,8 +349,8 @@ func TestManagingReconciler(t *testing.T) { nt.T.Log("Verify the reconciler-manager does not revert the change") generation++ // generation bumped by 1 because reconicler-manager should not revert this change err = nt.Watcher.WatchObject(kinds.Deployment(), nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{testpredicates.HasGenerationAtLeast(generation), firstContainerTerminationMessagePathIs("dev/termination-message"), - firstContainerStdinIs(true), hasTolerations(modifiedTolerations), hasPriorityClassName("system-node-critical")}) + []testpredicates.Predicate{testpredicates.HasGenerationAtLeast(generation), firstContainerTerminationMessagePathEquals("dev/termination-message"), + firstContainerStdinEquals(true), hasTolerations(modifiedTolerations), hasPriorityClassName("system-node-critical")}) if err != nil { nt.T.Fatal(err) } @@ -381,8 +364,8 @@ func TestManagingReconciler(t *testing.T) { }) generation++ // generation bumped by 1 because reconciler-manager should not revert this change err = nt.Watcher.WatchObject(kinds.Deployment(), nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{testpredicates.HasGenerationAtLeast(generation), firstContainerTerminationMessagePathIs("dev/termination-log"), - firstContainerStdinIs(false), hasTolerations(originalTolerations), hasPriorityClassName("")}) + []testpredicates.Predicate{testpredicates.HasGenerationAtLeast(generation), firstContainerTerminationMessagePathEquals("dev/termination-log"), + firstContainerStdinEquals(false), hasTolerations(originalTolerations), hasPriorityClassName("")}) if err != nil { nt.T.Fatal(err) } @@ -524,7 +507,7 @@ func resetReconcilerDeploymentManifests(nt *nomostest.NT, containerName string, } } -func firstContainerTerminationMessagePathIs(terminationMessagePath string) testpredicates.Predicate { +func firstContainerTerminationMessagePathEquals(terminationMessagePath string) testpredicates.Predicate { return func(o client.Object) error { if o == nil { return testpredicates.ErrObjectNotFound @@ -539,7 +522,8 @@ func firstContainerTerminationMessagePathIs(terminationMessagePath string) testp return nil } } -func firstContainerStdinIs(stdin bool) testpredicates.Predicate { + +func firstContainerStdinEquals(stdin bool) testpredicates.Predicate { return func(o client.Object) error { if o == nil { return testpredicates.ErrObjectNotFound @@ -554,6 +538,7 @@ func firstContainerStdinIs(stdin bool) testpredicates.Predicate { return nil } } + func hasTolerations(tolerations []corev1.Toleration) testpredicates.Predicate { return func(o client.Object) error { if o == nil { @@ -571,6 +556,7 @@ func hasTolerations(tolerations []corev1.Toleration) testpredicates.Predicate { return nil } } + func hasPriorityClassName(priorityClassName string) testpredicates.Predicate { return func(o client.Object) error { if o == nil { @@ -603,40 +589,6 @@ func hasReplicas(replicas int32) testpredicates.Predicate { } } -func firstContainerMemoryLimitIs(memoryLimit string) testpredicates.Predicate { - return func(o client.Object) error { - if o == nil { - return testpredicates.ErrObjectNotFound - } - d, ok := o.(*appsv1.Deployment) - if !ok { - return testpredicates.WrongTypeErr(d, &appsv1.Deployment{}) - } - if d.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String() != memoryLimit { - return fmt.Errorf("expected memory limit of the first container: %s, got: %s", - memoryLimit, d.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()) - } - return nil - } -} - -func firstContainerCPULimitIs(cpuLimit string) testpredicates.Predicate { - return func(o client.Object) error { - if o == nil { - return testpredicates.ErrObjectNotFound - } - d, ok := o.(*appsv1.Deployment) - if !ok { - return testpredicates.WrongTypeErr(d, &appsv1.Deployment{}) - } - if d.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String() != cpuLimit { - return fmt.Errorf("expected CPU limit of the first container: %s, got: %s", - cpuLimit, d.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()) - } - return nil - } -} - func gitCredsVolumeDeleted(volumesCount int) testpredicates.Predicate { return func(o client.Object) error { if o == nil { @@ -705,7 +657,7 @@ func templateForSSHAuthType() testpredicates.Predicate { } } -func totalContainerMemoryRequestIs(memoryRequest int64) testpredicates.Predicate { +func totalContainerMemoryRequestEquals(expected resource.Quantity) testpredicates.Predicate { return func(o client.Object) error { if o == nil { return testpredicates.ErrObjectNotFound @@ -714,27 +666,16 @@ func totalContainerMemoryRequestIs(memoryRequest int64) testpredicates.Predicate if !ok { return testpredicates.WrongTypeErr(d, &appsv1.Deployment{}) } - memoryTotal := getTotalContainerMemoryRequest(d) - - if int64(memoryTotal) != (memoryRequest * memoryMB) { - return fmt.Errorf("expected total memory request of all containers: %d, got: %d", - memoryRequest, memoryTotal) + total := totalContainerMemoryRequests(d.Spec.Template.Spec.Containers) + if total.Cmp(expected) != 0 { + return fmt.Errorf("expected total Memory request of all containers: %d, got: %d", + expected.Value(), total.Value()) } return nil } } -func getTotalContainerMemoryRequest(d *appsv1.Deployment) int { - memoryTotal := 0 - - for _, container := range d.Spec.Template.Spec.Containers { - memoryTotal += int(container.Resources.Requests.Memory().Value()) - } - - return memoryTotal -} - -func totalContainerCPURequestIs(expectedCPURequest int64) testpredicates.Predicate { +func totalContainerCPURequestEquals(expected resource.Quantity) testpredicates.Predicate { return func(o client.Object) error { if o == nil { return testpredicates.ErrObjectNotFound @@ -743,27 +684,16 @@ func totalContainerCPURequestIs(expectedCPURequest int64) testpredicates.Predica if !ok { return testpredicates.WrongTypeErr(d, &appsv1.Deployment{}) } - actualCPUTotal := getTotalContainerCPURequest(d) - - if int64(actualCPUTotal) != expectedCPURequest { + total := totalContainerCPURequests(d.Spec.Template.Spec.Containers) + if total.Cmp(expected) != 0 { return fmt.Errorf("expected total CPU request of all containers: %d, got: %d", - expectedCPURequest, actualCPUTotal) + expected.MilliValue(), total.MilliValue()) } return nil } } -func getTotalContainerCPURequest(d *appsv1.Deployment) int { - cpuTotal := 0 - - for _, container := range d.Spec.Template.Spec.Containers { - cpuTotal += int(container.Resources.Requests.Cpu().MilliValue()) - } - - return cpuTotal -} - -func firstContainerCPURequestIs(cpuRequest int64) testpredicates.Predicate { +func firstContainerNameEquals(expected string) testpredicates.Predicate { return func(o client.Object) error { if o == nil { return testpredicates.ErrObjectNotFound @@ -772,389 +702,288 @@ func firstContainerCPURequestIs(cpuRequest int64) testpredicates.Predicate { if !ok { return testpredicates.WrongTypeErr(d, &appsv1.Deployment{}) } - if d.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() != cpuRequest { - return fmt.Errorf("expected CPU request of the first container: %d, got: %d", - cpuRequest, d.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()) + found := d.Spec.Template.Spec.Containers[0].Name + if found != expected { + return fmt.Errorf("expected name of the first container: %q, got: %q", + expected, found) } return nil } } -func firstContainerMemoryRequestIs(memoryRequest int64) testpredicates.Predicate { - memoryRequest *= memoryMB - return func(o client.Object) error { - if o == nil { - return testpredicates.ErrObjectNotFound - } - d, ok := o.(*appsv1.Deployment) - if !ok { - return testpredicates.WrongTypeErr(d, &appsv1.Deployment{}) - } - if d.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().Value() != memoryRequest { - return fmt.Errorf("expected memory request of the first container: %d, got: %d", - memoryRequest, d.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().MilliValue()) - } - return nil +func totalContainerCPURequests(containers []corev1.Container) resource.Quantity { + total := resource.MustParse("0m") + for _, container := range containers { + total.Add(*container.Resources.Requests.Cpu()) } + return total } -func TestAutopilotReconcilerAdjustment(t *testing.T) { - nt := nomostest.New(t, nomostesting.ACMController, ntopts.Unstructured) - - // push DRY configs to sync source to enable hydration-controller - nt.T.Log("Add the namespace-repo root directory to enable hydration") - nt.Must(nt.RootRepos[configsync.RootSyncName].Copy("../testdata/hydration/namespace-repo", ".")) - nt.Must(nt.RootRepos[configsync.RootSyncName].CommitAndPush("add DRY configs to the repository")) - nt.T.Log("Update RootSync to sync from the namespace-repo directory") - rs := fake.RootSyncObjectV1Beta1(configsync.RootSyncName) - nt.MustMergePatch(rs, `{"spec": {"git": {"dir": "namespace-repo"}}}`) - syncDirMap := map[types.NamespacedName]string{ - nomostest.RootSyncNN(configsync.RootSyncName): "namespace-repo", +func totalContainerMemoryRequests(containers []corev1.Container) resource.Quantity { + total := resource.MustParse("0Mi") + for _, container := range containers { + total.Add(*container.Resources.Requests.Memory()) } - if err := nt.WatchForAllSyncs(nomostest.WithSyncDirectoryMap(syncDirMap)); err != nil { + return total +} + +func TestAutopilotReconcilerAdjustment(t *testing.T) { + nt := nomostest.New(t, nomostesting.ACMController, ntopts.Unstructured, + // Disable Autoscaling so we can validate the Autopilot & Standard behavior. + // Autoscaling behavior is inconsistent enough that we can't really + // predict exactly what the resulting resource values will be. + ntopts.WithoutReconcilerAutoscalingStrategy) + + rootSyncNN := nomostest.RootSyncNN(configsync.RootSyncName) + reconcilerNN := core.RootReconcilerObjectKey(rootSyncNN.Name) + + // Get RootSync + rootSyncObj := &v1beta1.RootSync{} + err := nt.Validate(rootSyncNN.Name, rootSyncNN.Namespace, rootSyncObj, + // Confirm autoscaling is disabled + testpredicates.MissingReconcilerAutoscalingStrategy(), + // Confirm there are no resource overrides + testpredicates.RootSyncSpecOverrideEquals(&v1beta1.OverrideSpec{ + ReconcileTimeout: &metav1.Duration{Duration: *nt.DefaultReconcileTimeout}, + }), + ) + if err != nil { nt.T.Fatal(err) } + // Get reconciler Deployment reconcilerDeployment := &appsv1.Deployment{} - if err := nt.Validate(nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, reconcilerDeployment); err != nil { + if err := nt.Validate(reconcilerNN.Name, reconcilerNN.Namespace, reconcilerDeployment); err != nil { nt.T.Fatal(err) } firstContainerName := reconcilerDeployment.Spec.Template.Spec.Containers[0].Name - generation := reconcilerDeployment.Generation - var expectedTotalCPU int64 - var expectedTotalMemory int64 - var expectedFirstContainerCPU int64 - var expectedFirstContainerMemory int64 - var expectedFirstContainerMemoryLimit string - var expectedFirstContainerCPULimit string - firstContainerCPURequest := reconcilerDeployment.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() - firstContainerMemoryRequest := reconcilerDeployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().Value() / memoryMB - input := map[string]corev1.ResourceRequirements{} - output := map[string]corev1.ResourceRequirements{} - - // default container resource requests defined in the reconciler template: manifests/templates/reconciler-manager-configmap.yaml, total CPU/memory: 80m/600Mi - // - hydration-controller: 10m/100Mi (disabled by default) - // - reconciler: 50m/200Mi - // - git-sync: 10m/200Mi - // - otel-agent: 10m/100Mi - // initial generation = 1, total memory 629145600 (600Mi) - if nt.IsGKEAutopilot { - // with autopilot adjustment, CPU of container[0] is increased to 180m - // bringing the total to 250m - expectedTotalCPU = initialAdjustedTotalCPU - expectedFirstContainerCPU = expectedFirstContainerCPU1 + // default container resource requests defined in code: + // pkg/reconcilermanager/controllers/reconciler_container_resources.go + // Default values depend on whether VPA is being used or not. + var expectedResources map[string]v1beta1.ContainerResourcesSpec + if *e2e.VPA && nomostest.IsReconcilerAutoscalingEnabled(rootSyncObj) { + expectedResources = controllers.ReconcilerContainerResourceAutoscaleDefaults() } else { - expectedTotalCPU = initialTotalCPU - expectedFirstContainerCPU = initialFirstCPU + expectedResources = controllers.ReconcilerContainerResourceDefaults() } - expectedTotalMemory = initialTotalMemory - nt.T.Log("Validating initial container request value") - err := nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), - firstContainerCPURequestIs(expectedFirstContainerCPU), - }, + if _, found := expectedResources[firstContainerName]; !found { + nt.T.Fatalf("expected the default resource map to include %q, but it was missing: %+v", firstContainerName, expectedResources) + } + + // Compute expected totals from initial values. + // This ensures the totals only account for the containers present, + // even if expectedResources includes additional containers. + expectedTotalResources := v1beta1.ContainerResourcesSpec{ + CPURequest: totalContainerCPURequests(reconcilerDeployment.Spec.Template.Spec.Containers), + MemoryRequest: totalContainerMemoryRequests(reconcilerDeployment.Spec.Template.Spec.Containers), + } + + // Autopilot increases the CPU of the first container, + // if the total CPU is less than 250m. + if nt.IsGKEAutopilot { + minimumTotalCPURequests := resource.MustParse("250m") + if expectedTotalResources.CPURequest.Cmp(minimumTotalCPURequests) < 0 { + // Compute difference + diff := minimumTotalCPURequests.DeepCopy() + diff.Sub(expectedTotalResources.CPURequest) + // Add difference to first container + // Go doesn't allow modifying a struct field in a map directly, + // so read, update, and write it back. + updated := expectedResources[firstContainerName] + updated.CPURequest.Add(diff) + expectedResources[firstContainerName] = updated + // Update total + expectedTotalResources.CPURequest = minimumTotalCPURequests + } + } + + nt.T.Log("Validating container resources - 1") + reconcilerDeployment = &appsv1.Deployment{} + err = nt.Validate(reconcilerNN.Name, reconcilerNN.Namespace, reconcilerDeployment, + testpredicates.HasGenerationAtLeast(generation), + totalContainerCPURequestEquals(expectedTotalResources.CPURequest), + totalContainerMemoryRequestEquals(expectedTotalResources.MemoryRequest), + firstContainerNameEquals(firstContainerName), + testpredicates.DeploymentContainerResourcesAllEqual(expectedResources), ) if err != nil { + nt.T.Log("Reconciler container specs (expected):") + for containerName, containerSpec := range expectedResources { + nt.T.Logf("%s: %s", containerName, log.AsJSON(containerSpec)) + } + nt.T.Log("Reconciler container specs (found):") + for _, container := range reconcilerDeployment.Spec.Template.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) - // increase CPU and memory request to above current request autopilot increment - nt.T.Log("Increasing CPU and memory request to above current request autopilot increment") - updatedFirstContainerCPURequest := firstContainerCPURequest + 10 - updatedFirstContainerMemoryRequest := firstContainerMemoryRequest + 100 - nt.MustMergePatch(rs, fmt.Sprintf(`{"spec":{"override":{"resources":[{"containerName":"%s","memoryRequest":"%dMi", "cpuRequest":"%dm"}]}}}`, - firstContainerName, updatedFirstContainerMemoryRequest, updatedFirstContainerCPURequest)) - // generation = 2 - generation++ + generation = reconcilerDeployment.GetGeneration() + nt.T.Log("Increasing CPU and Memory request on the RootSync spec.override") + updated := expectedResources[firstContainerName] + updated.CPURequest.Add(resource.MustParse("10m")) + updated.MemoryRequest.Add(resource.MustParse("10Mi")) + nt.MustMergePatch(rootSyncObj, + fmt.Sprintf(`{"spec":{"override":{"resources":[{"containerName":%q,"memoryRequest":%q, "cpuRequest":%q}]}}}`, + firstContainerName, &updated.MemoryRequest, &updated.CPURequest)) + + // Update expectations + expectedResources[firstContainerName] = updated if nt.IsGKEAutopilot { - // hydration-controller input: 190m/200Mi - // with autopilot adjustment, CPU of container[0]/hydration-controller is increased to 430m - // bringing the total to 500m - expectedTotalCPU += autopilotCPUIncrements - expectedFirstContainerCPU = expectedFirstContainerCPU2 - expectedFirstContainerMemory = expectedFirstContainerMemory2 + // Round up to the nearest multiple of 250m. + updated := expectedResources[firstContainerName] + updated.CPURequest.Sub(resource.MustParse("10m")) + updated.CPURequest.Add(resource.MustParse("250m")) + expectedResources[firstContainerName] = updated + // Increase the expected total resources + expectedTotalResources.CPURequest.Add(resource.MustParse("250m")) } else { - expectedTotalCPU = initialTotalCPU + 10 - expectedFirstContainerCPU = updatedFirstContainerCPURequest - expectedFirstContainerMemory = updatedFirstContainerMemoryRequest + // Increase the expected total resources + expectedTotalResources.CPURequest.Add(resource.MustParse("10m")) } - expectedTotalMemory += 100 - // Waiting for reconciliation and validation - nomostest.Wait(nt.T, "the first container resource requests to be increased", nt.DefaultWaitTimeout, func() error { - rd := &appsv1.Deployment{} - err := nt.Validate(nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, rd, - testpredicates.HasGenerationAtLeast(generation), totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory)) - if err != nil { - return err - } - if nt.IsGKEAutopilot { - input, output, err = util.AutopilotResourceMutation(rd.Annotations[metadata.AutoPilotAnnotation]) - if err != nil { - return err - } - } - generation = rd.GetGeneration() - return nil - }) + // Autopilot doesn't adjust memory + expectedTotalResources.MemoryRequest.Add(resource.MustParse("10Mi")) - // manually increase first container CPU and memory request to above current user override request - nt.T.Log("Manually update the first container CPU and Memory request") - manualFirstContainerCPURequest := updatedFirstContainerCPURequest + 10 - manualFirstContainerMemoryRequest := updatedFirstContainerMemoryRequest + 10 + // Wait for overrides to be applied + // Note: This depends on the Syncing condition reflecting the current RSync generation. + if err := nt.WatchForAllSyncs(); err != nil { + nt.T.Fatal(err) + } - mustUpdateRootReconciler(nt, func(d *appsv1.Deployment) { - d.Spec.Template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ - "cpu": resource.MustParse(fmt.Sprintf("%dm", manualFirstContainerCPURequest)), - "memory": resource.MustParse(fmt.Sprintf("%dMi", manualFirstContainerMemoryRequest)), - } + nt.T.Log("Wait for the reconciler deployment to be updated once") + generation++ // patched by reconciler-manager + err = nt.Watcher.WatchObject(kinds.Deployment(), reconcilerNN.Name, reconcilerNN.Namespace, []testpredicates.Predicate{ + testpredicates.HasGenerationAtLeast(generation), }) - nt.T.Log("Verify the reconciler-manager does revert the manual memory/CPU request change") - generation += 2 - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), - firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory), - }, - ) if err != nil { nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) - // decrease CPU to below autopilot increment but above current request - nt.T.Log("Decreasing CPU request to above current request but below autopilot increment") - if nt.IsGKEAutopilot { - inputRequest := input[firstContainerName].Requests - inputCPU := inputRequest.Cpu().MilliValue() - outputRequest := output[firstContainerName].Requests - outputCPU := outputRequest.Cpu().MilliValue() - updatedFirstContainerCPURequest = (inputCPU + outputCPU) / 2 - // autopilot will not adjust CPU and generation - expectedFirstContainerCPU = expectedFirstContainerCPU2 - expectedFirstContainerMemory = expectedFirstContainerMemory2 - } else { - // since standard cluster doesnt have resource adjustment annotation - // validation data are assigned separately than autopilot cluster - updatedFirstContainerCPURequest -= 10 - expectedTotalCPU -= 10 - expectedFirstContainerCPU = updatedFirstContainerCPURequest - expectedFirstContainerMemory = updatedFirstContainerMemoryRequest - generation++ - } - nt.MustMergePatch(rs, fmt.Sprintf(`{"spec":{"override":{"resources":[{"containerName":"%s","memoryRequest":"%dMi", "cpuRequest":"%dm"}]}}}`, - firstContainerName, updatedFirstContainerMemoryRequest, updatedFirstContainerCPURequest)) - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), - firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory), - }, + nt.T.Log("Verify the reconciler-manager applied the override memory/CPU request change") + reconcilerDeployment = &appsv1.Deployment{} + err = nt.Validate(reconcilerNN.Name, reconcilerNN.Namespace, reconcilerDeployment, + testpredicates.HasGenerationAtLeast(generation), + totalContainerCPURequestEquals(expectedTotalResources.CPURequest), + totalContainerMemoryRequestEquals(expectedTotalResources.MemoryRequest), + firstContainerNameEquals(firstContainerName), + testpredicates.DeploymentContainerResourcesAllEqual(expectedResources), ) if err != nil { + nt.T.Log("Reconciler container specs (expected):") + for containerName, containerSpec := range expectedResources { + nt.T.Logf("%s: %s", containerName, log.AsJSON(containerSpec)) + } + nt.T.Log("Reconciler container specs (found):") + for _, container := range reconcilerDeployment.Spec.Template.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) + generation = reconcilerDeployment.GetGeneration() - // Manually decrease cpu request to below current cpu request, but the output cpu request after autopilot adjustment are greater than current cpu request - nt.T.Log("Manually decrease the first container CPU request") - manualFirstContainerCPURequest = updatedFirstContainerCPURequest - 10 - if nt.IsGKEAutopilot { - generation++ - } else { - generation += 2 - } + nt.T.Log("Increasing CPU and Memory request on the reconciler Deployment spec.template.spec.containers") + updated = expectedResources[firstContainerName] + updated.CPURequest.Add(resource.MustParse("10m")) + updated.MemoryRequest.Add(resource.MustParse("10Mi")) mustUpdateRootReconciler(nt, func(d *appsv1.Deployment) { d.Spec.Template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ - "cpu": resource.MustParse(fmt.Sprintf("%dm", manualFirstContainerCPURequest)), + corev1.ResourceCPU: updated.CPURequest, + corev1.ResourceMemory: updated.MemoryRequest, } }) - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), - firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory), - }, - ) - if err != nil { - nt.T.Fatal(err) - } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) - // Manually decrease cpu and memory requests to below current requests, and the output resource requests after autopilot adjustment still below current requests - nt.T.Log("Manually decrease the first container CPU and Memory request") - manualFirstContainerCPURequest = initialFirstCPU - manualFirstContainerMemoryRequest = initialFirstMemory - generation += 2 - mustUpdateRootReconciler(nt, func(d *appsv1.Deployment) { - d.Spec.Template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ - "cpu": resource.MustParse(fmt.Sprintf("%dm", manualFirstContainerCPURequest)), - "memory": resource.MustParse(fmt.Sprintf("%dMi", manualFirstContainerMemoryRequest)), - } + + // Don't update expectations. + // We expect the manual changes to be reverted. + + nt.T.Log("Wait for the reconciler deployment to be updated twice") + generation += 2 // manual update + reconciler-manager revert + err = nt.Watcher.WatchObject(kinds.Deployment(), reconcilerNN.Name, reconcilerNN.Namespace, []testpredicates.Predicate{ + testpredicates.HasGenerationAtLeast(generation), }) - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), - firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory), - }, - ) if err != nil { nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) - // decrease cpu and memory request to below current request autopilot increment - nt.T.Log("Reverting CPU and memory back to initial value") - updatedFirstContainerCPURequest = initialFirstCPU - updatedFirstContainerMemoryRequest = initialFirstMemory - nt.MustMergePatch(rs, fmt.Sprintf(`{"spec":{"override":{"resources":[{"containerName":"%s","memoryRequest":"%dMi", "cpuRequest":"%dm"}]}}}`, - firstContainerName, updatedFirstContainerMemoryRequest, updatedFirstContainerCPURequest)) - // increment generation for both standard and autopilot - generation++ - if nt.IsGKEAutopilot { - // hydration-controller input: 10m/100Mi - // with autopilot adjustment, first container is adjusted to 180m - // bringing the total CPU request to 250 - expectedTotalCPU -= autopilotCPUIncrements - expectedTotalMemory = initialTotalMemory - expectedFirstContainerCPU = expectedFirstContainerCPU1 - expectedFirstContainerMemory = expectedFirstContainerMemory1 - } else { - expectedFirstContainerCPU = updatedFirstContainerCPURequest - expectedFirstContainerMemory = updatedFirstContainerMemoryRequest - expectedTotalCPU = initialTotalCPU - expectedTotalMemory = initialTotalMemory - } - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - totalContainerCPURequestIs(expectedTotalCPU), - totalContainerMemoryRequestIs(expectedTotalMemory), - firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory), - }, + nt.T.Log("Verify the reconciler-manager reverted the manual memory/CPU request change") + reconcilerDeployment = &appsv1.Deployment{} + err = nt.Validate(reconcilerNN.Name, reconcilerNN.Namespace, reconcilerDeployment, + testpredicates.HasGenerationAtLeast(generation), + totalContainerCPURequestEquals(expectedTotalResources.CPURequest), + totalContainerMemoryRequestEquals(expectedTotalResources.MemoryRequest), + firstContainerNameEquals(firstContainerName), + testpredicates.DeploymentContainerResourcesAllEqual(expectedResources), ) if err != nil { + nt.T.Log("Reconciler container specs (expected):") + for containerName, containerSpec := range expectedResources { + nt.T.Logf("%s: %s", containerName, log.AsJSON(containerSpec)) + } + nt.T.Log("Reconciler container specs (found):") + for _, container := range reconcilerDeployment.Spec.Template.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) + generation = reconcilerDeployment.GetGeneration() - // limit change - // The reconciler-manager allows manual change to resource limits on non-Autopilot clusters only if the resource limits are neither defined by us in reonciler-manager-configmap - // nor users through spec.override.resources field. - // All the changes to resource limits on Autopilot cluster will be ignored, since the resource limits are always same with resource requests on Autopilot cluster - - // Manually update resource limits before override resource limits through API - nt.T.Log("Manually update the first container CPU and Memory limit") - manualFirstContainerCPULimit := "600m" - manualFirstContainerMemoryLimit := "600Mi" - mustUpdateRootReconciler(nt, func(d *appsv1.Deployment) { - d.Spec.Template.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ - "cpu": resource.MustParse(manualFirstContainerCPULimit), - "memory": resource.MustParse(manualFirstContainerMemoryLimit), - } - }) - nt.T.Log("Verify the reconciler-manager does not revert the manual resource limits change when user does not override the memory limit ") + nt.T.Log("Decreasing CPU request on the RootSync spec.override") + updated = expectedResources[firstContainerName] + // Reduce CPU, but not by enough to change the value when rounded up. + updated.CPURequest.Sub(resource.MustParse("10m")) if nt.IsGKEAutopilot { - expectedFirstContainerMemoryLimit = expectedFirstContainerMemoryLimit1 - expectedFirstContainerCPULimit = expectedFirstContainerCPULimit1 + // Update expectations to ignore override change (round up) + expected := updated // copy + expected.CPURequest.Add(resource.MustParse("10m")) + expectedResources[firstContainerName] = expected + // No update to total resources + // Expect reconciler-manager NOT to update the reconciler Deployment } else { - expectedFirstContainerMemoryLimit = manualFirstContainerMemoryLimit - expectedFirstContainerCPULimit = manualFirstContainerCPULimit + // Update expectations to match override change + expectedResources[firstContainerName] = updated + // Increase the expected total resources + expectedTotalResources.CPURequest.Sub(resource.MustParse("10m")) + // Expect reconciler-manager to update the reconciler Deployment + generation++ } + nt.MustMergePatch(rootSyncObj, + fmt.Sprintf(`{"spec":{"override":{"resources":[{"containerName":%q,"memoryRequest":%q, "cpuRequest":%q}]}}}`, + firstContainerName, &updated.MemoryRequest, &updated.CPURequest)) - generation++ // generation bumped by 1 because the memory limits are still not owned by reconciler manager - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - firstContainerMemoryLimitIs(expectedFirstContainerMemoryLimit), - firstContainerCPULimitIs(expectedFirstContainerCPULimit), - }, - ) - if err != nil { + // Wait for overrides to be applied + // Note: This depends on the Syncing condition reflecting the current RSync generation. + if err := nt.WatchForAllSyncs(); err != nil { nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) - - // override the resource limits through spec.override.resources field - nt.T.Log("Updating the container limits") - nt.MustMergePatch(rs, fmt.Sprintf(`{"spec":{"override":{"resources":[{"containerName":"%s","memoryLimit":"%s", "cpuLimit":"%s"}]}}}`, - firstContainerName, updatedFirstContainerMemoryLimit, updatedFirstContainerCPULimit)) - if nt.IsGKEAutopilot { - // overriding the limit should not trigger any changes - expectedFirstContainerCPU = expectedFirstContainerCPU1 - expectedFirstContainerMemory = expectedFirstContainerMemory1 - expectedFirstContainerMemoryLimit = expectedFirstContainerMemoryLimit1 - expectedFirstContainerCPULimit = expectedFirstContainerCPULimit1 - - } else { - expectedFirstContainerMemoryLimit = updatedFirstContainerMemoryLimit - expectedFirstContainerCPULimit = updatedFirstContainerCPULimit - generation++ - } - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - firstContainerCPURequestIs(expectedFirstContainerCPU), - firstContainerMemoryRequestIs(expectedFirstContainerMemory), - firstContainerMemoryLimitIs(expectedFirstContainerMemoryLimit), - firstContainerCPULimitIs(expectedFirstContainerCPULimit), - }, - ) + nt.T.Log("Wait for the reconciler-manager to update the reconciler deployment CPU request, only on non-autopilot cluster") + err = nt.Watcher.WatchObject(kinds.Deployment(), reconcilerNN.Name, reconcilerNN.Namespace, []testpredicates.Predicate{ + testpredicates.HasGenerationAtLeast(generation), + }) if err != nil { nt.T.Fatal(err) } - generation = getDeploymentGeneration(nt, nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem) - //manually update the resource limits after user override the resource limits through spec.override field, in this case, reconciler-manager own the resource limits field - nt.T.Log("Manually update the first container CPU and Memory limit") - mustUpdateRootReconciler(nt, func(d *appsv1.Deployment) { - d.Spec.Template.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ - "cpu": resource.MustParse(manualFirstContainerCPULimit), - "memory": resource.MustParse(manualFirstContainerMemoryLimit), - } - }) - nt.T.Log("Verify the reconciler-manager does revert the manual resource limits change after user override the resource limits through API") - if nt.IsGKEAutopilot { - generation++ - } else { - generation += 2 - } - err = nt.Watcher.WatchObject(kinds.Deployment(), - nomostest.DefaultRootReconcilerName, v1.NSConfigManagementSystem, - []testpredicates.Predicate{ - testpredicates.HasGenerationAtLeast(generation), - firstContainerMemoryLimitIs(expectedFirstContainerMemoryLimit), - firstContainerCPULimitIs(expectedFirstContainerCPULimit), - }, + nt.T.Log("Verify the reconciler-manager changed the reconciler CPU request, only on non-autopilot cluster") + reconcilerDeployment = &appsv1.Deployment{} + err = nt.Validate(reconcilerNN.Name, reconcilerNN.Namespace, reconcilerDeployment, + testpredicates.HasGenerationAtLeast(generation), + totalContainerCPURequestEquals(expectedTotalResources.CPURequest), + totalContainerMemoryRequestEquals(expectedTotalResources.MemoryRequest), + firstContainerNameEquals(firstContainerName), + testpredicates.DeploymentContainerResourcesAllEqual(expectedResources), ) if err != nil { + nt.T.Log("Reconciler container specs (expected):") + for containerName, containerSpec := range expectedResources { + nt.T.Logf("%s: %s", containerName, log.AsJSON(containerSpec)) + } + nt.T.Log("Reconciler container specs (found):") + for _, container := range reconcilerDeployment.Spec.Template.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } nt.T.Fatal(err) } } diff --git a/e2e/testcases/stress_test.go b/e2e/testcases/stress_test.go index a0917634c3..40854b5a52 100644 --- a/e2e/testcases/stress_test.go +++ b/e2e/testcases/stress_test.go @@ -25,10 +25,12 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + autoscalingv1vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/utils/pointer" "kpt.dev/configsync/e2e/nomostest" "kpt.dev/configsync/e2e/nomostest/ntopts" @@ -42,6 +44,8 @@ import ( "kpt.dev/configsync/pkg/metadata" "kpt.dev/configsync/pkg/reconcilermanager" "kpt.dev/configsync/pkg/testing/fake" + "kpt.dev/configsync/pkg/util/log" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -125,7 +129,7 @@ func TestStressCRD(t *testing.T) { } nt.T.Logf("Verify that there are exactly 1000 CronTab CRs managed by Config Sync on the cluster") - crList := &unstructured.UnstructuredList{} + crList := &metav1.PartialObjectMetadataList{} crList.SetGroupVersionKind(crontabGVK) if err := nt.KubeClient.List(crList, client.MatchingLabels{metadata.ManagedByKey: metadata.ManagedByValue}); err != nil { nt.T.Error(err) @@ -165,19 +169,116 @@ func TestStressLargeNamespace(t *testing.T) { } nt.T.Log("Verify there are 5000 ConfigMaps in the namespace") - cmList := &corev1.ConfigMapList{} + cmList := &metav1.PartialObjectMetadataList{} + cmList.SetGroupVersionKind(kinds.ConfigMap()) + if err := nt.KubeClient.List(cmList, &client.ListOptions{Namespace: ns}, client.MatchingLabels{labelKey: labelValue}); err != nil { + nt.T.Error(err) + } + if len(cmList.Items) != 5000 { + nt.T.Errorf("The %s namespace should include 5000 ConfigMaps having the `%s: %s` label exactly, found %v instead", ns, labelKey, labelValue, len(cmList.Items)) + } +} + +// TestStressLargeNamespaceAutoscaling tests that Config Sync can sync a +// namespace including 5000 resources successfully, when autoscaling it set to +// Auto, with the smaller initial resource requests. +// Ideally, the reconciler should be OOMKilled and/or evicted at least once and +// be replaced with more CPU/Mem. +func TestStressLargeNamespaceAutoscaling(t *testing.T) { + nt := nomostest.New(t, nomostesting.Reconciliation1, ntopts.Unstructured, ntopts.StressTest, + ntopts.WithReconcileTimeout(configsync.DefaultReconcileTimeout)) + nt.T.Log("Stop the CS webhook by removing the webhook configuration") + nomostest.StopWebhook(nt) + + nt.T.Log("Enable autoscaling") + rootSync := fake.RootSyncObjectV1Beta1(configsync.RootSyncName) + if err := nt.KubeClient.Get(rootSync.Name, rootSync.Namespace, rootSync); err != nil { + nt.T.Fatal(err) + } + core.SetAnnotation(rootSync, metadata.ReconcilerAutoscalingStrategyAnnotationKey, string(metadata.ReconcilerAutoscalingStrategyAuto)) + reconcilerResourceSpec := v1beta1.ContainerResourcesSpec{ + ContainerName: "reconciler", + CPURequest: resource.MustParse("10m"), + CPULimit: resource.MustParse("1"), + MemoryRequest: resource.MustParse("5Mi"), + MemoryLimit: resource.MustParse("10Mi"), + } + rootSync.Spec.Override.Resources = []v1beta1.ContainerResourcesSpec{ + reconcilerResourceSpec, + } + if err := nt.KubeClient.Update(rootSync); err != nil { + nt.T.Fatal(err) + } + + // Wait for the reconciler Deployment to reflect the RootSync changes + reconcilerKey := core.RootReconcilerObjectKey(configsync.RootSyncName) + err := nt.Watcher.WatchObject(kinds.Deployment(), reconcilerKey.Name, reconcilerKey.Namespace, []testpredicates.Predicate{ + testpredicates.DeploymentContainerResourcesEqual(reconcilerResourceSpec), + testpredicates.StatusEquals(nt.Scheme, kstatus.CurrentStatus), + }) + if err != nil { + nt.T.Fatal(err) + } + + if err := nt.WatchForAllSyncs(); err != nil { + nt.T.Fatal(err) + } + reconcilerPod, err := nt.KubeClient.GetDeploymentPod(reconcilerKey.Name, reconcilerKey.Namespace, nt.DefaultWaitTimeout) + if err != nil { + nt.T.Fatal(err) + } + nt.T.Log("Reconciler container specs (before):") + for _, container := range reconcilerPod.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } + + ns := "my-ns-1" + nt.Must(nt.RootRepos[configsync.RootSyncName].Add("acme/ns.yaml", fake.NamespaceObject(ns))) + + labelKey := "StressTestName" + labelValue := "TestStressLargeNamespace" + for i := 1; i <= 5000; i++ { + nt.Must(nt.RootRepos[configsync.RootSyncName].Add(fmt.Sprintf("acme/cm-%d.yaml", i), fake.ConfigMapObject( + core.Name(fmt.Sprintf("cm-%d", i)), core.Namespace(ns), core.Label(labelKey, labelValue)))) + } + nt.Must(nt.RootRepos[configsync.RootSyncName].CommitAndPush("Add 5000 ConfigMaps and 1 Namespace")) + err = nt.WatchForAllSyncs(nomostest.WithTimeout(10 * time.Minute)) + if err != nil { + nt.T.Fatal(err) + } + + nt.T.Log("Verify there are 5000 ConfigMaps in the namespace") + cmList := &metav1.PartialObjectMetadataList{} + cmList.SetGroupVersionKind(kinds.ConfigMap()) if err := nt.KubeClient.List(cmList, &client.ListOptions{Namespace: ns}, client.MatchingLabels{labelKey: labelValue}); err != nil { nt.T.Error(err) } if len(cmList.Items) != 5000 { nt.T.Errorf("The %s namespace should include 5000 ConfigMaps having the `%s: %s` label exactly, found %v instead", ns, labelKey, labelValue, len(cmList.Items)) } + + reconcilerPod, err = nt.KubeClient.GetDeploymentPod(reconcilerKey.Name, reconcilerKey.Namespace, nt.DefaultWaitTimeout) + if err != nil { + nt.T.Fatal(err) + } + nt.T.Log("Reconciler container specs (after):") + for _, container := range reconcilerPod.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } + + vpa := &autoscalingv1vpa.VerticalPodAutoscaler{} + if err := nt.KubeClient.Get(reconcilerKey.Name, reconcilerKey.Namespace, vpa); err != nil { + nt.T.Fatal(err) + } + nt.T.Log("Reconciler VPA recommendations:") + nt.T.Log(log.AsYAML(vpa.Status.Recommendation.ContainerRecommendations)) } // TestStressFrequentGitCommits adds 100 Git commits, and verifies that Config Sync can sync the changes in these commits successfully. func TestStressFrequentGitCommits(t *testing.T) { - nt := nomostest.New(t, nomostesting.Reconciliation1, ntopts.Unstructured, ntopts.StressTest, + nt := nomostest.New(t, nomostesting.Reconciliation1, ntopts.Unstructured, + ntopts.StressTest, ntopts.VPATest, ntopts.WithReconcileTimeout(configsync.DefaultReconcileTimeout)) nt.T.Log("Stop the CS webhook by removing the webhook configuration") nomostest.StopWebhook(nt) @@ -204,7 +305,8 @@ func TestStressFrequentGitCommits(t *testing.T) { } nt.T.Logf("Verify that there are exactly 100 ConfigMaps under the %s namespace", ns) - cmList := &corev1.ConfigMapList{} + cmList := &metav1.PartialObjectMetadataList{} + cmList.SetGroupVersionKind(kinds.ConfigMap()) if err := nt.KubeClient.List(cmList, &client.ListOptions{Namespace: ns}, client.MatchingLabels{labelKey: labelValue}); err != nil { nt.T.Error(err) } diff --git a/e2e/testdata/metrics-server/components.yaml b/e2e/testdata/metrics-server/components.yaml new file mode 100644 index 0000000000..bba2de0088 --- /dev/null +++ b/e2e/testdata/metrics-server/components.yaml @@ -0,0 +1,263 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +# Source: metrics-server/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-metrics-server + namespace: default + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +--- +# Source: metrics-server/templates/clusterrole-aggregated-reader.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:metrics-server-aggregated-reader + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +# Source: metrics-server/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:test-metrics-server + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + - configmaps + verbs: + - get + - list + - watch +--- +# Source: metrics-server/templates/clusterrolebinding-auth-delegator.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-metrics-server:system:auth-delegator + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: test-metrics-server + namespace: default +--- +# Source: metrics-server/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:test-metrics-server + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:test-metrics-server +subjects: + - kind: ServiceAccount + name: test-metrics-server + namespace: default +--- +# Source: metrics-server/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-metrics-server-auth-reader + namespace: kube-system + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: test-metrics-server + namespace: default +--- +# Source: metrics-server/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-metrics-server + namespace: default + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test +--- +# Source: metrics-server/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-metrics-server + namespace: default + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + template: + metadata: + labels: + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + spec: + schedulerName: + serviceAccountName: test-metrics-server + priorityClassName: "system-cluster-critical" + containers: + - name: metrics-server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + image: registry.k8s.io/metrics-server/metrics-server:v0.6.3 + imagePullPolicy: IfNotPresent + args: + - --secure-port=10250 + - --cert-dir=/tmp + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + ports: + - name: https + protocol: TCP + containerPort: 10250 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + initialDelaySeconds: 0 + periodSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + volumeMounts: + - name: tmp + mountPath: /tmp + volumes: + - name: tmp + emptyDir: {} +--- +# Source: metrics-server/templates/apiservice.yaml +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + helm.sh/chart: metrics-server-3.10.0 + app.kubernetes.io/name: metrics-server + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.6.3" + app.kubernetes.io/managed-by: Helm +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: test-metrics-server + namespace: default + port: 443 + version: v1beta1 + versionPriority: 100 diff --git a/e2e/testdata/metrics-server/update.sh b/e2e/testdata/metrics-server/update.sh new file mode 100755 index 0000000000..f9cc3fa914 --- /dev/null +++ b/e2e/testdata/metrics-server/update.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Update Metrics Server YAML from GitHub + +set -euo pipefail + +pkg_dir="$(dirname "$(realpath "$0")")" +cd "${pkg_dir}" + +TARGET_REGISTRY="registry.k8s.io/metrics-server" # TODO: re-push to CI registry +TARGET_IMAGE_TAG="v0.6.3" +DEFAULT_REGISTRY="registry.k8s.io/metrics-server" + +# Download and render the template yaml, replace the images, and write to disk. +# WARNING: Only use insecure TLS for testing! +# TODO: How do you specify version in helm? +helm template "test" \ + --set "args={--kubelet-insecure-tls}" \ + --repo https://kubernetes-sigs.github.io/metrics-server \ + metrics-server \ + | sed -e "s,${DEFAULT_REGISTRY}/\([a-z-]*\):.*,${TARGET_REGISTRY}/\1:${TARGET_IMAGE_TAG}," \ + > components.yaml diff --git a/e2e/testdata/vertical-pod-autoscaler/components.yaml b/e2e/testdata/vertical-pod-autoscaler/components.yaml new file mode 100644 index 0000000000..8d6bf45d08 --- /dev/null +++ b/e2e/testdata/vertical-pod-autoscaler/components.yaml @@ -0,0 +1,1752 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Source: vertical-pod-autoscaler/templates/admission-controller/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-vertical-pod-autoscaler-admission-controller + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/recommender/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-vertical-pod-autoscaler-recommender + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: recommender + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/updater/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-vertical-pod-autoscaler-updater + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: updater + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/admission-controller/tls-secret.yaml +# +apiVersion: v1 +kind: Secret +metadata: + name: test-vertical-pod-autoscaler-admission-controller-tls + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller + namespace: default +type: Opaque +data: + ca.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaRENDQWt5Z0F3SUJBZ0lSQUpzRlNRZVhyTE1PWGJGL2RUajFydTB3RFFZSktvWklodmNOQVFFTEJRQXcKUERFNk1EZ0dBMVVFQXhNeGRHVnpkQzEyWlhKMGFXTmhiQzF3YjJRdFlYVjBiM05qWVd4bGNpMWhaRzFwYzNOcApiMjR0WTI5dWRISnZiR3hsY2pBZUZ3MHlNekEzTVRRd01qTTRNRFZhRncweU5EQTNNVE13TWpNNE1EVmFNRHd4Ck9qQTRCZ05WQkFNVE1YUmxjM1F0ZG1WeWRHbGpZV3d0Y0c5a0xXRjFkRzl6WTJGc1pYSXRZV1J0YVhOemFXOXUKTFdOdmJuUnliMnhzWlhJd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUMyeWRhegpVNnpKUnpaUFgxWnJRYkxLZHZvRFNrRURMV0UwODJlRFltY2kxWHo2aHVJVHJ0MTdGU2NlbEszNjlMbEprM29wCkNDKzVkbGQrSUVOakV3VkR1MUtwQ1ZWVVhzeTFlMXpqdEhFMXhTTUpOYUg3akhBdk96WUdkbEp1SmsxOWEweDIKNTVTNDJhd2phUk9WQldVNitOTVZ1emFObGc3MHdlZ0xtMUpLVXRwN3lreUFZcDF3OHZrMjYwWnc3c0lONmtWegpFdzhCdTRaYkhlNmVGblNTRmJBbzA1dTZTL3d4Z0p4d3NDOWsrQ0xLb0RKNXh1L3pDMDMwaWZ0Ujd4ZDFrVnVNCkhuN2treGxiOU5nUjNBQ1RzeDM4MWg5QWdzc0xLSzJEUUNDMkttOG4xaDkvY1JLMVIveDl0WWE0dkk1QU9aV2YKZC9yajRVd25LamYyTDJ4ZkFnTUJBQUdqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVQpCZ2dyQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVClJlRWtTcmpzejg3MHl5YWd0OW9hSUk1Smg3Y3dEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRFFFQkpEaC9uYXEKOHR1K2NnS2QxQTRqckYrRmdJblB3MDhTQXJMc0htMjdMcUFsQm9lN1FEZ0JwcUIzeWdjMzlJaEVNUVpNUklpdgo1cUlLZGVGWk9uYkVmOVJxa2thM2RQZmVsSGluaS8yV1FIZUY1S2tWb3IrQnp1MTZOMnJzYndXRlRLL0NidGpoClovQm1aYnY4TWp3RUVMM2pxVGdVVkd5Y3J2ZFBwd25HOUxiN28wNWZ5b3lJbXF3VGFlZmFxN2YyeFBYZHRKMlcKYko0dXZaVldTSDRyazh4S3pVbnRlQnI2c0NIUExFblNjWXJKc1RKakpNWWJPdllhSFFOcjcrZVB0QWwydEEyQgo5SXowME5IZ3FZTXVvZ1A0NGtSai9MeFJPTEtzU09XS3dTbEQybWRsMTFROGhQdDZBUWs2TFQ3UzZ5YXp6MzRaCnB5UTNwNzNqeFRBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURiakNDQWxhZ0F3SUJBZ0lRSHc4bDhnSkU2SjVWbmlEMjBhOE9kakFOQmdrcWhraUc5dzBCQVFzRkFEQTgKTVRvd09BWURWUVFERXpGMFpYTjBMWFpsY25ScFkyRnNMWEJ2WkMxaGRYUnZjMk5oYkdWeUxXRmtiV2x6YzJsdgpiaTFqYjI1MGNtOXNiR1Z5TUI0WERUSXpNRGN4TkRBeU16Z3dObG9YRFRJME1EY3hNekF5TXpnd05sb3dJakVnCk1CNEdBMVVFQXhNWGRuQmhMWGRsWW1odmIyc3VaR1ZtWVhWc2RDNXpkbU13Z2dFaU1BMEdDU3FHU0liM0RRRUIKQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUMxem5yM1Bka0N4VG1ZcnViNXIxWjY5OThUTHFmdU9tZjJZMVJMSDhXSwpubjdqcDI4Unh2UElHWkFsWVB5NEU1QVJvNDdMTXJrTlFpVkM1Unl2YjZVdWs0aEV3WEEwZzZkSHFmV09ZN0ErCk9TVDVKQkwvMWN0Nm1Ec3NQRmRvSzZ1a0hYU05vSFdCVUtQWFJlZVI3ai9GUytDSW9EWVZrMExRcEc2Slp1aCsKUVdKSmhpYW5ROTgyN3ZTdWpVZkNqUmRQMnU0YlRVRzlrSzR4ZlFvMW9OYWVWMWZRZ1dFV0d3YTMzMGFuUk1UNApRMGRUWWZ2SzB4ZElneWJkWEpwR1VvTkNPL0s1Sm43aUN4YjBrUmkxMWk2cGVjY0lpVzhJUkNWZjZwaTZiQWVPCkJPUVdPdDl3L01mNzFWbmp1Q093cEMxM2tQblBvZEdEWVRwaUlZY0doYkkvQWdNQkFBR2pnWVV3Z1lJd0RnWUQKVlIwUEFRSC9CQVFEQWdXZ01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFNQmdOVgpIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkVYaEpFcTQ3TS9POU1zbW9MZmFHaUNPU1llM01DSUdBMVVkCkVRUWJNQm1DRjNad1lTMTNaV0pvYjI5ckxtUmxabUYxYkhRdWMzWmpNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUIKQVFCZXR2ejU0b0JCbERQQ0xsdmY4c1lPVkxWYUhtUWJiNTNITzR1N0lsOTRTVWF3TnVOeDJ5bStVeUQ5MjhsRgpjN0lhMFZVSGZFS0w5WHF5UlZQL1Q1RzhtT1cyY3M4ZE5TeGtISWFUR3kyc0U0VUhvaXdwRjZnYlkyNUlSU0ZyClJnM3VRNmhiNDVGYkowbEdqWGJZNVZUU2ZjZCtvUW1wSDFCSUQwbEMvcEVUV2hUVi9wQ0lYN3Q3UW43R21WbSsKZFBLWHB0U0EvU2d1OTVwVFAzNXJDYXZheWxMeFBBVEJ6bjNmRytpRlZHdWJwQVlSTjc1TmczeDZ5MkRYZE9ITgpwcENScG1BVStoOCt3MnFzZGFaYlBERWl5djR3WHpTaFp3WnRROFp2STd0Zm5UU2R5L2lIcnNUbTNHY2hDMmhvCmxhR2ZhRFNTNy92aGphUkQybDF3N0lZUwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + tls.key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdGM1Njl6M1pBc1U1bUs3bSthOVdldmZmRXk2bjdqcG45bU5VU3gvRmlwNSs0NmR2CkVjYnp5Qm1RSldEOHVCT1FFYU9PeXpLNURVSWxRdVVjcjIrbExwT0lSTUZ3TklPblI2bjFqbU93UGpraytTUVMKLzlYTGVwZzdMRHhYYUN1cnBCMTBqYUIxZ1ZDajEwWG5rZTQveFV2Z2lLQTJGWk5DMEtSdWlXYm9ma0ZpU1lZbQpwMFBmTnU3MHJvMUh3bzBYVDlydUcwMUJ2WkN1TVgwS05hRFdubGRYMElGaEZoc0d0OTlHcDBURStFTkhVMkg3Cnl0TVhTSU1tM1Z5YVJsS0RRanZ5dVNaKzRnc1c5SkVZdGRZdXFYbkhDSWx2Q0VRbFgrcVl1bXdIamdUa0ZqcmYKY1B6SCs5Vlo0N2dqc0tRdGQ1RDV6NkhSZzJFNllpR0hCb1d5UHdJREFRQUJBb0lCQUdia1BvOWlmMGlrR2Q2aApUYk1OV21JcG1pUWZDWWMzb3RLOGtGNGIwdzZLZU1HYVpxK202eHpaNnN4Y3dXbmtHR3NESjdNME42cWJQcjlICkx4N1NqcWQzYzIyR0ZGeUVEYzN2RUR2UGkxVUx2ak1LQ1g2a25vMzVUYlBSU3A1MzZLSHUwYUc5cUFMUmdvZzQKMWIrdkdJUXVWeHE3b1NCQXdWV1hXYitGY0JyVExRU1oyZWxTY0pHWk43cng2ZlNvWWUvSHJ6bGhRMUZCVmk5MwpYTG1IaW9mQ1F3dFZnYkZ2eXdiYU9YeHVKbmxIRnF6UGU3VTdNZzVmbUZaYmIvK1EySDE4NEdMY1VXUk1sN3RpCnhQa3Fsa0dvWXRIR2loV3QrWDhJc2tNMkY5ZVNqUW05bXYzdTlxMm1jdEt0dW5od2hoZE5WdjRFNlhENDhlK0gKYWkzdVprRUNnWUVBNitEQkV0OFF5N3V4UlBYS1F5a3hQTWZvenhNWlRreXd0OFgyMFZxajIyTnJIZVZRbFdKZgpHVkR0ZEFNWThiL1dFNWhwNHo5L1FSMEJPdWNXbzdZTXFxLzFJbnpZKzZBSmtFZUdVQnhDWndNZmtaQ1JGK3k0CjUvS2ZCdW5QWXVwUW5STzA5U2VlcUhTeDdtOWl3cEZIZFVzM0JVcEVtMFg5aXdELzlWRUYyL0VDZ1lFQXhWRGgKNlN5S1RsUW1GZlIzdVNzcnZkOU5RQURKWkJ1UzViS1FiQkFsaGlrNzZhWlV4cjFtandFUGYraG9mUkp0TEw3Lwp0bFgzd05ob2hGajVyYWdHTGl0WHRmQzByeHdWOU1ka0dEeTBpcGVGck5qRkhSWTNEYTlSMCtrTEFEdW9wbVV4CkJ4bHBkMDRRVXo5dWlWL2o1cUF6WHNiYUtHKzBDSTA1OXB2Y1lTOENnWUVBazd6Nk1TKzEvV1ZaNnE5b09CaWMKMWt6LzdITXg3bkRxWHJDSG55aithSE1rckZ6czFDUUJISUlydC9ZWXNaSk9LNGZUQ3ByQ3dhM0JEM3VlR252MApBeFNpblM5RldKVXNhRW5rUGxqMDNrQitqVlhYQ3YwaDgyVnJaWkVjYkFBRzdkdGlCQXNFdk00YWhCVVA3bjNBCkhCODRId0FkYUlPMkFsSmwvdWo0bW9FQ2dZQXBXTUtPSHZRNFo2KzNhQXZ5ZEZadEtpN0EyNmxYM2VOUldMS2sKd2xscW5KZGVaYWxjSC9JdXd6VWhRMjBwMEVPcVhiZXQyS1BhK0Q0bWhaS0VUakpGV0hXL3ZWblBXOTVKOWNKRwp4ZW03SlBjdytoZUwrY3k5aEdCc1BHZyt1MEdpNUk5NmdsdTBVVnBnWjJsZFJnZTJteGxJb2ZpekFuMUNVWDIwCjdJSW9wd0tCZ0ZXTS84R0czY3loYXp4bk9DcUVaVGs2NmNreVZLOXBFaWxPODBiWHpSUFBLczN6SU9hNUVJaHoKWWduNG5tNHdJRm4wV09lRTdybW0rbE9xNkxseTdYMjVMeTFQWTBaQkhLT3dsVXJkQ0NGMnIwc0xZd3BBQmNRdQpZU0xubC9RU3FNUGtTUnJsc3lnM1k4Zlp3a0VNU1U3SUpDaE5SenlHWG9GaHBGdkEvS3BzCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" +--- +# Source: vertical-pod-autoscaler/templates/admission-controller/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test-vertical-pod-autoscaler-admission-controller + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller +rules: + # system:vpa-target-reader + - apiGroups: + - "*" + resources: + - "*/scale" + verbs: + - get + - watch + - apiGroups: + - "" + resources: + - replicationcontrollers + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch + # system:vpa-admission-controller + - apiGroups: + - "" + resources: + - pods + - configmaps + - nodes + - limitranges + verbs: + - get + - list + - watch + - apiGroups: + - "admissionregistration.k8s.io" + resources: + - mutatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - apiGroups: + - "poc.autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create + - update + - get + - list + - watch +--- +# Source: vertical-pod-autoscaler/templates/recommender/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test-vertical-pod-autoscaler-recommender + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: recommender +rules: + # system:metrics-reader + - apiGroups: + - "metrics.k8s.io" + resources: + - pods + verbs: + - get + - list + # system:vpa-actor + - apiGroups: + - "" + resources: + - pods + - nodes + - limitranges + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - apiGroups: + - "poc.autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - patch + - apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - patch + # system:vpa-checkpoint-actor + - apiGroups: + - "poc.autoscaling.k8s.io" + resources: + - verticalpodautoscalercheckpoints + verbs: + - get + - list + - watch + - create + - patch + - delete + - apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalercheckpoints + verbs: + - get + - list + - watch + - create + - patch + - delete + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + # system:vpa-target-reader + - apiGroups: + - "*" + resources: + - "*/scale" + verbs: + - get + - watch + - apiGroups: + - "" + resources: + - replicationcontrollers + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch +--- +# Source: vertical-pod-autoscaler/templates/updater/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test-vertical-pod-autoscaler-updater + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: updater +rules: + # system:vpa-actor + - apiGroups: + - "" + resources: + - pods + - nodes + - limitranges + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - apiGroups: + - "poc.autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - patch + - apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - patch + # system:vpa-target-reader + - apiGroups: + - "*" + resources: + - "*/scale" + verbs: + - get + - watch + - apiGroups: + - "" + resources: + - replicationcontrollers + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch + # system:evictioner + - apiGroups: + - "apps" + - "extensions" + resources: + - replicasets + verbs: + - get + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create + # system:vpa-status-reader + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list + - watch +--- +# Source: vertical-pod-autoscaler/templates/admission-controller/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-vertical-pod-autoscaler-admission-controller + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-vertical-pod-autoscaler-admission-controller +subjects: + - kind: ServiceAccount + name: test-vertical-pod-autoscaler-admission-controller + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/recommender/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-vertical-pod-autoscaler-recommender + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: recommender +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-vertical-pod-autoscaler-recommender +subjects: + - kind: ServiceAccount + name: test-vertical-pod-autoscaler-recommender + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/updater/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-vertical-pod-autoscaler-updater + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: updater +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-vertical-pod-autoscaler-updater +subjects: + - kind: ServiceAccount + name: test-vertical-pod-autoscaler-updater + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/admission-controller/metrics-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-vertical-pod-autoscaler-admission-controller-metrics + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller + namespace: default +spec: + type: ClusterIP + ports: + - port: 8944 + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: admission-controller +--- +# Source: vertical-pod-autoscaler/templates/admission-controller/service.yaml +apiVersion: v1 +kind: Service +metadata: + # name: test-vertical-pod-autoscaler-admission-controller + name: vpa-webhook + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller + namespace: default +spec: + type: ClusterIP + ports: + # - port: + - port: 443 + targetPort: https + protocol: TCP + name: https + selector: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: admission-controller +--- +# Source: vertical-pod-autoscaler/templates/recommender/metrics-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-vertical-pod-autoscaler-recommender-metrics + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: recommender + namespace: default +spec: + type: ClusterIP + ports: + - port: 8942 + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: recommender +--- +# Source: vertical-pod-autoscaler/templates/updater/metrics-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-vertical-pod-autoscaler-updater-metrics + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: updater + namespace: default +spec: + type: ClusterIP + ports: + - port: 8943 + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: updater +--- +# Source: vertical-pod-autoscaler/templates/admission-controller/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-vertical-pod-autoscaler-admission-controller + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-controller + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: admission-controller + template: + metadata: + labels: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: admission-controller + annotations: + checksum/tls-secret: 2dddb4ade2aa61f1b9cf5dfd527bf727f571005c6e053f8a464dc7ebd4ed3842 + spec: + serviceAccountName: test-vertical-pod-autoscaler-admission-controller + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: admission-controller + securityContext: {} + image: registry.k8s.io/autoscaling/vpa-admission-controller:0.14.0 + imagePullPolicy: IfNotPresent + args: + - --client-ca-file=/tls-secret/ca.crt + - --tls-cert-file=/tls-secret/tls.crt + - --tls-private-key=/tls-secret/tls.key + - --v=2 + env: + - name: NAMESPACE + value: default + ports: + - name: https + containerPort: 8000 + protocol: TCP + - name: http-metrics + containerPort: 8944 + protocol: TCP + livenessProbe: + httpGet: + path: /health-check + port: http-metrics + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health-check + port: http-metrics + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + resources: {} + volumeMounts: + - name: tls-secret + mountPath: /tls-secret + readOnly: true + volumes: + - name: tls-secret + secret: + secretName: test-vertical-pod-autoscaler-admission-controller-tls +--- +# Source: vertical-pod-autoscaler/templates/recommender/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-vertical-pod-autoscaler-recommender + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: recommender + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: recommender + template: + metadata: + labels: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: recommender + spec: + serviceAccountName: test-vertical-pod-autoscaler-recommender + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: recommender + securityContext: {} + image: registry.k8s.io/autoscaling/vpa-recommender:0.14.0 + imagePullPolicy: IfNotPresent + args: + - --v=2 + ports: + - name: http-metrics + containerPort: 8942 + protocol: TCP + livenessProbe: + httpGet: + path: /health-check + port: http-metrics + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health-check + port: http-metrics + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + resources: {} +--- +# Source: vertical-pod-autoscaler/templates/updater/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-vertical-pod-autoscaler-updater + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: updater + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: updater + template: + metadata: + labels: + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/component: updater + spec: + serviceAccountName: test-vertical-pod-autoscaler-updater + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: updater + securityContext: {} + image: registry.k8s.io/autoscaling/vpa-updater:0.14.0 + imagePullPolicy: IfNotPresent + args: + - --v=2 + env: + - name: NAMESPACE + value: default + ports: + - name: http-metrics + containerPort: 8943 + protocol: TCP + livenessProbe: + httpGet: + path: /health-check + port: http-metrics + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health-check + port: http-metrics + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + resources: {} +--- +# Source: vertical-pod-autoscaler/templates/crds/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-vertical-pod-autoscaler-crds + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: crds + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/crds/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-vertical-pod-autoscaler-crds + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: crds + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + namespace: default +data: + verticalpodautoscalercheckpoints.yaml: | + apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + name: verticalpodautoscalercheckpoints.autoscaling.k8s.io + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes/kubernetes/pull/63797 + spec: + group: autoscaling.k8s.io + names: + kind: VerticalPodAutoscalerCheckpoint + listKind: VerticalPodAutoscalerCheckpointList + plural: verticalpodautoscalercheckpoints + shortNames: + - vpacheckpoint + singular: verticalpodautoscalercheckpoint + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VerticalPodAutoscalerCheckpoint is the checkpoint of the internal + state of VPA that is used for recovery after recommender's restart. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the checkpoint. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.' + properties: + containerName: + description: Name of the checkpointed container. + type: string + vpaObjectName: + description: Name of the VPA object that stored VerticalPodAutoscalerCheckpoint + object. + type: string + type: object + status: + description: Data of the checkpoint. + properties: + cpuHistogram: + description: Checkpoint of histogram for consumption of CPU. + properties: + bucketWeights: + description: Map from bucket index to bucket weight. + type: object + x-kubernetes-preserve-unknown-fields: true + referenceTimestamp: + description: Reference timestamp for samples collected within + this histogram. + format: date-time + nullable: true + type: string + totalWeight: + description: Sum of samples to be used as denominator for weights + from BucketWeights. + type: number + type: object + firstSampleStart: + description: Timestamp of the fist sample from the histograms. + format: date-time + nullable: true + type: string + lastSampleStart: + description: Timestamp of the last sample from the histograms. + format: date-time + nullable: true + type: string + lastUpdateTime: + description: The time when the status was last refreshed. + format: date-time + nullable: true + type: string + memoryHistogram: + description: Checkpoint of histogram for consumption of memory. + properties: + bucketWeights: + description: Map from bucket index to bucket weight. + type: object + x-kubernetes-preserve-unknown-fields: true + referenceTimestamp: + description: Reference timestamp for samples collected within + this histogram. + format: date-time + nullable: true + type: string + totalWeight: + description: Sum of samples to be used as denominator for weights + from BucketWeights. + type: number + type: object + totalSamplesCount: + description: Total number of samples in the histograms. + type: integer + version: + description: Version of the format of the stored data. + type: string + type: object + type: object + served: true + storage: true + - name: v1beta2 + schema: + openAPIV3Schema: + description: VerticalPodAutoscalerCheckpoint is the checkpoint of the internal + state of VPA that is used for recovery after recommender's restart. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the checkpoint. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.' + properties: + containerName: + description: Name of the checkpointed container. + type: string + vpaObjectName: + description: Name of the VPA object that stored VerticalPodAutoscalerCheckpoint + object. + type: string + type: object + status: + description: Data of the checkpoint. + properties: + cpuHistogram: + description: Checkpoint of histogram for consumption of CPU. + properties: + bucketWeights: + description: Map from bucket index to bucket weight. + type: object + x-kubernetes-preserve-unknown-fields: true + referenceTimestamp: + description: Reference timestamp for samples collected within + this histogram. + format: date-time + nullable: true + type: string + totalWeight: + description: Sum of samples to be used as denominator for weights + from BucketWeights. + type: number + type: object + firstSampleStart: + description: Timestamp of the fist sample from the histograms. + format: date-time + nullable: true + type: string + lastSampleStart: + description: Timestamp of the last sample from the histograms. + format: date-time + nullable: true + type: string + lastUpdateTime: + description: The time when the status was last refreshed. + format: date-time + nullable: true + type: string + memoryHistogram: + description: Checkpoint of histogram for consumption of memory. + properties: + bucketWeights: + description: Map from bucket index to bucket weight. + type: object + x-kubernetes-preserve-unknown-fields: true + referenceTimestamp: + description: Reference timestamp for samples collected within + this histogram. + format: date-time + nullable: true + type: string + totalWeight: + description: Sum of samples to be used as denominator for weights + from BucketWeights. + type: number + type: object + totalSamplesCount: + description: Total number of samples in the histograms. + type: integer + version: + description: Version of the format of the stored data. + type: string + type: object + type: object + served: true + storage: false + verticalpodautoscalers.yaml: | + apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + name: verticalpodautoscalers.autoscaling.k8s.io + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes/kubernetes/pull/63797 + spec: + group: autoscaling.k8s.io + names: + kind: VerticalPodAutoscaler + listKind: VerticalPodAutoscalerList + plural: verticalpodautoscalers + shortNames: + - vpa + singular: verticalpodautoscaler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.updatePolicy.updateMode + name: Mode + type: string + - jsonPath: .status.recommendation.containerRecommendations[0].target.cpu + name: CPU + type: string + - jsonPath: .status.recommendation.containerRecommendations[0].target.memory + name: Mem + type: string + - jsonPath: .status.conditions[?(@.type=='RecommendationProvided')].status + name: Provided + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VerticalPodAutoscaler is the configuration for a vertical pod + autoscaler, which automatically manages pod resources based on historical + and real time resource utilization. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the behavior of the autoscaler. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.' + properties: + recommenders: + description: Recommender responsible for generating recommendation + for this object. List should be empty (then the default recommender + will generate the recommendation) or contain exactly one recommender. + items: + description: VerticalPodAutoscalerRecommenderSelector points to + a specific Vertical Pod Autoscaler recommender. In the future + it might pass parameters to the recommender. + properties: + name: + description: Name of the recommender responsible for generating + recommendation for this object. + type: string + required: + - name + type: object + type: array + resourcePolicy: + description: Controls how the autoscaler computes recommended resources. + The resource policy may be used to set constraints on the recommendations + for individual containers. If not specified, the autoscaler computes + recommended resources for all containers in the pod, without additional + constraints. + properties: + containerPolicies: + description: Per-container resource policies. + items: + description: ContainerResourcePolicy controls how autoscaler + computes the recommended resources for a specific container. + properties: + containerName: + description: Name of the container or DefaultContainerResourcePolicy, + in which case the policy is used by the containers that + don't have their own policy specified. + type: string + controlledResources: + description: Specifies the type of recommendations that + will be computed (and possibly applied) by VPA. If not + specified, the default of [ResourceCPU, ResourceMemory] + will be used. + items: + description: ResourceName is the name identifying various + resources in a ResourceList. + type: string + type: array + controlledValues: + description: Specifies which resource values should be controlled. + The default is "RequestsAndLimits". + enum: + - RequestsAndLimits + - RequestsOnly + type: string + maxAllowed: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Specifies the maximum amount of resources that + will be recommended for the container. The default is + no maximum. + type: object + minAllowed: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Specifies the minimal amount of resources that + will be recommended for the container. The default is + no minimum. + type: object + mode: + description: Whether autoscaler is enabled for the container. + The default is "Auto". + enum: + - Auto + - "Off" + type: string + type: object + type: array + type: object + targetRef: + description: TargetRef points to the controller managing the set of + pods for the autoscaler to control - e.g. Deployment, StatefulSet. + VerticalPodAutoscaler can be targeted at controller implementing + scale subresource (the pod set is retrieved from the controller's + ScaleStatus) or some well known controllers (e.g. for DaemonSet + the pod set is read from the controller's spec). If VerticalPodAutoscaler + cannot use specified target it will report ConfigUnsupported condition. + Note that VerticalPodAutoscaler does not require full implementation + of scale subresource - it will not use it to modify the replica + count. The only thing retrieved is a label selector matching pods + grouped by the target resource. + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + updatePolicy: + description: Describes the rules on how changes are applied to the + pods. If not specified, all fields in the `PodUpdatePolicy` are + set to their default values. + properties: + minReplicas: + description: Minimal number of replicas which need to be alive + for Updater to attempt pod eviction (pending other checks like + PDB). Only positive values are allowed. Overrides global '--min-replicas' + flag. + format: int32 + type: integer + updateMode: + description: Controls when autoscaler applies changes to the pod + resources. The default is 'Auto'. + enum: + - "Off" + - Initial + - Recreate + - Auto + type: string + type: object + required: + - targetRef + type: object + status: + description: Current information about the autoscaler. + properties: + conditions: + description: Conditions is the set of conditions required for this + autoscaler to scale its target, and indicates whether or not those + conditions are met. + items: + description: VerticalPodAutoscalerCondition describes the state + of a VerticalPodAutoscaler at a certain point. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another + format: date-time + type: string + message: + description: message is a human-readable explanation containing + details about the transition + type: string + reason: + description: reason is the reason for the condition's last transition. + type: string + status: + description: status is the status of the condition (True, False, + Unknown) + type: string + type: + description: type describes the current condition + type: string + required: + - status + - type + type: object + type: array + recommendation: + description: The most recently computed amount of resources recommended + by the autoscaler for the controlled pods. + properties: + containerRecommendations: + description: Resources recommended by the autoscaler for each + container. + items: + description: RecommendedContainerResources is the recommendation + of resources computed by autoscaler for a specific container. + Respects the container resource policy if present in the spec. + In particular the recommendation is not produced for containers + with `ContainerScalingMode` set to 'Off'. + properties: + containerName: + description: Name of the container. + type: string + lowerBound: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Minimum recommended amount of resources. Observes + ContainerResourcePolicy. This amount is not guaranteed + to be sufficient for the application to operate in a stable + way, however running with less resources is likely to + have significant impact on performance/availability. + type: object + target: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Recommended amount of resources. Observes ContainerResourcePolicy. + type: object + uncappedTarget: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: The most recent recommended resources target + computed by the autoscaler for the controlled pods, based + only on actual resource usage, not taking into account + the ContainerResourcePolicy. May differ from the Recommendation + if the actual resource usage causes the target to violate + the ContainerResourcePolicy (lower than MinAllowed or + higher that MaxAllowed). Used only as status indication, + will not affect actual resource assignment. + type: object + upperBound: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Maximum recommended amount of resources. Observes + ContainerResourcePolicy. Any resources allocated beyond + this value are likely wasted. This value may be larger + than the maximum amount of application is actually capable + of consuming. + type: object + required: + - target + type: object + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: {} + - deprecated: true + deprecationWarning: autoscaling.k8s.io/v1beta2 API is deprecated + name: v1beta2 + schema: + openAPIV3Schema: + description: VerticalPodAutoscaler is the configuration for a vertical pod + autoscaler, which automatically manages pod resources based on historical + and real time resource utilization. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the behavior of the autoscaler. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.' + properties: + resourcePolicy: + description: Controls how the autoscaler computes recommended resources. + The resource policy may be used to set constraints on the recommendations + for individual containers. If not specified, the autoscaler computes + recommended resources for all containers in the pod, without additional + constraints. + properties: + containerPolicies: + description: Per-container resource policies. + items: + description: ContainerResourcePolicy controls how autoscaler + computes the recommended resources for a specific container. + properties: + containerName: + description: Name of the container or DefaultContainerResourcePolicy, + in which case the policy is used by the containers that + don't have their own policy specified. + type: string + maxAllowed: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Specifies the maximum amount of resources that + will be recommended for the container. The default is + no maximum. + type: object + minAllowed: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Specifies the minimal amount of resources that + will be recommended for the container. The default is + no minimum. + type: object + mode: + description: Whether autoscaler is enabled for the container. + The default is "Auto". + enum: + - Auto + - "Off" + type: string + type: object + type: array + type: object + targetRef: + description: TargetRef points to the controller managing the set of + pods for the autoscaler to control - e.g. Deployment, StatefulSet. + VerticalPodAutoscaler can be targeted at controller implementing + scale subresource (the pod set is retrieved from the controller's + ScaleStatus) or some well known controllers (e.g. for DaemonSet + the pod set is read from the controller's spec). If VerticalPodAutoscaler + cannot use specified target it will report ConfigUnsupported condition. + Note that VerticalPodAutoscaler does not require full implementation + of scale subresource - it will not use it to modify the replica + count. The only thing retrieved is a label selector matching pods + grouped by the target resource. + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + updatePolicy: + description: Describes the rules on how changes are applied to the + pods. If not specified, all fields in the `PodUpdatePolicy` are + set to their default values. + properties: + updateMode: + description: Controls when autoscaler applies changes to the pod + resources. The default is 'Auto'. + enum: + - "Off" + - Initial + - Recreate + - Auto + type: string + type: object + required: + - targetRef + type: object + status: + description: Current information about the autoscaler. + properties: + conditions: + description: Conditions is the set of conditions required for this + autoscaler to scale its target, and indicates whether or not those + conditions are met. + items: + description: VerticalPodAutoscalerCondition describes the state + of a VerticalPodAutoscaler at a certain point. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another + format: date-time + type: string + message: + description: message is a human-readable explanation containing + details about the transition + type: string + reason: + description: reason is the reason for the condition's last transition. + type: string + status: + description: status is the status of the condition (True, False, + Unknown) + type: string + type: + description: type describes the current condition + type: string + required: + - status + - type + type: object + type: array + recommendation: + description: The most recently computed amount of resources recommended + by the autoscaler for the controlled pods. + properties: + containerRecommendations: + description: Resources recommended by the autoscaler for each + container. + items: + description: RecommendedContainerResources is the recommendation + of resources computed by autoscaler for a specific container. + Respects the container resource policy if present in the spec. + In particular the recommendation is not produced for containers + with `ContainerScalingMode` set to 'Off'. + properties: + containerName: + description: Name of the container. + type: string + lowerBound: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Minimum recommended amount of resources. Observes + ContainerResourcePolicy. This amount is not guaranteed + to be sufficient for the application to operate in a stable + way, however running with less resources is likely to + have significant impact on performance/availability. + type: object + target: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Recommended amount of resources. Observes ContainerResourcePolicy. + type: object + uncappedTarget: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: The most recent recommended resources target + computed by the autoscaler for the controlled pods, based + only on actual resource usage, not taking into account + the ContainerResourcePolicy. May differ from the Recommendation + if the actual resource usage causes the target to violate + the ContainerResourcePolicy (lower than MinAllowed or + higher that MaxAllowed). Used only as status indication, + will not affect actual resource assignment. + type: object + upperBound: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Maximum recommended amount of resources. Observes + ContainerResourcePolicy. Any resources allocated beyond + this value are likely wasted. This value may be larger + than the maximum amount of application is actually capable + of consuming. + type: object + required: + - target + type: object + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: false +--- +# Source: vertical-pod-autoscaler/templates/tests/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-vertical-pod-autoscaler-tests + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: tests + annotations: + helm.sh/hook: test + namespace: default +data: + test_all.py: | + import requests + + + def test_admission_controller_service_connection(): + # url = "https://test-vertical-pod-autoscaler-admission-controller.default.svc:/" + url = "https://vpa-webhook.default.svc:443/" + verify = "/admission-controller-tls-secret/ca.crt" + + response = requests.get(url, verify=verify) + + assert response.status_code == 200 + + + def test_admission_controller_metrics_service_connection(): + url = "http://test-vertical-pod-autoscaler-admission-controller-metrics:8944/metrics" + + response = requests.get(url) + + assert response.status_code == 200 + + + def test_recommender_metrics_service_connection(): + url = "http://test-vertical-pod-autoscaler-recommender-metrics:8942/metrics" + + response = requests.get(url) + + assert response.status_code == 200 + + + def test_updater_metrics_service_connection(): + url = "http://test-vertical-pod-autoscaler-updater-metrics:8943/metrics" + + response = requests.get(url) + + assert response.status_code == 200 +--- +# Source: vertical-pod-autoscaler/templates/crds/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test-vertical-pod-autoscaler-crds + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: crds + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded +rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - patch +--- +# Source: vertical-pod-autoscaler/templates/crds/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-vertical-pod-autoscaler-crds + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: crds + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-vertical-pod-autoscaler-crds +subjects: + - kind: ServiceAccount + name: test-vertical-pod-autoscaler-crds + namespace: default +--- +# Source: vertical-pod-autoscaler/templates/tests/pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-vertical-pod-autoscaler-tests + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: tests + annotations: + helm.sh/hook: test + namespace: default +spec: + containers: + - name: tests + image: ghcr.io/cowboysysop/pytest:1.0.35 + imagePullPolicy: IfNotPresent + volumeMounts: + - name: tests + mountPath: /tests + readOnly: true + - name: admission-controller-tls-secret + mountPath: /admission-controller-tls-secret + readOnly: true + workingDir: /tests + restartPolicy: Never + volumes: + - name: tests + configMap: + name: test-vertical-pod-autoscaler-tests + - name: admission-controller-tls-secret + secret: + secretName: test-vertical-pod-autoscaler-admission-controller-tls +--- +# Source: vertical-pod-autoscaler/templates/crds/job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: test-vertical-pod-autoscaler-crds + labels: + helm.sh/chart: vertical-pod-autoscaler-7.2.0 + app.kubernetes.io/name: vertical-pod-autoscaler + app.kubernetes.io/instance: test + app.kubernetes.io/version: "0.14.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: crds + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + namespace: default +spec: + template: + spec: + serviceAccountName: test-vertical-pod-autoscaler-crds + containers: + - name: kubectl + image: docker.io/bitnami/kubectl:1.26.3 + imagePullPolicy: IfNotPresent + args: + - apply + - --filename=/config/verticalpodautoscalercheckpoints.yaml + - --filename=/config/verticalpodautoscalers.yaml + volumeMounts: + - name: config + mountPath: /config + readOnly: true + restartPolicy: Never + volumes: + - name: config + configMap: + name: test-vertical-pod-autoscaler-crds diff --git a/e2e/testdata/vertical-pod-autoscaler/update.sh b/e2e/testdata/vertical-pod-autoscaler/update.sh new file mode 100755 index 0000000000..bb20ac4140 --- /dev/null +++ b/e2e/testdata/vertical-pod-autoscaler/update.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Update VPA YAML from GitHub + +set -euo pipefail + +vpa_dir="$(dirname "$(realpath "$0")")" +cd "${vpa_dir}" + +DEFAULT_REGISTRY="registry.k8s.io/autoscaling" +TARGET_REGISTRY="registry.k8s.io/autoscaling" # TODO: re-push to CI registry +TARGET_IMAGE_TAG="0.14.0" + +# TARGET_GIT_TAG="vertical-pod-autoscaler-0.14.0" +# URL_PREFIX="https://raw.githubusercontent.com/kubernetes/autoscaler/${TARGET_GIT_TAG}/vertical-pod-autoscaler/deploy/" +# COMPONENTS="vpa-v1-crd-gen vpa-rbac updater-deployment recommender-deployment admission-controller-deployment" +# For each component, download the yaml, replace the images, and write to disk. +# for component in ${COMPONENTS}; do +# echo "Downloading ${component}.yaml" +# wget "${URL_PREFIX}${component}.yaml" -q -O - \ +# | sed -e "s,${DEFAULT_REGISTRY}/\([a-z-]*\):.*,${TARGET_REGISTRY}/\1:${TARGET_IMAGE_TAG}," \ +# > "${component}.yaml" +# done + +# Download and render the template yaml, replace the images, and write to disk. +# Installing from YAML source requires generating and signing a TLS cert, which +# is much easier to do with helm, but the upstream repo authors don't want to +# maintain a helm chart. So we're using a third-party one here, for testing. +# https://artifacthub.io/packages/helm/cowboysysop/vertical-pod-autoscaler +helm template "test" \ + --namespace "default" \ + --set ".Release.Namespace=default" \ + --repo https://cowboysysop.github.io/charts \ + --version "7.2.0" \ + vertical-pod-autoscaler \ + | sed -e "s,${DEFAULT_REGISTRY}/\([a-z-]*\):.*,${TARGET_REGISTRY}/\1:${TARGET_IMAGE_TAG}," \ + > components.yaml + +# Set namespace (because helm template won't do it...) +kpt fn eval components.yaml -i set-namespace:v0.4.1 -- namespace=default diff --git a/go.mod b/go.mod index 4e7c6a752e..912f11c94a 100644 --- a/go.mod +++ b/go.mod @@ -41,6 +41,7 @@ require ( k8s.io/api v0.26.7 k8s.io/apiextensions-apiserver v0.26.7 k8s.io/apimachinery v0.26.7 + k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0 k8s.io/cli-runtime v0.26.7 k8s.io/client-go v0.26.7 k8s.io/cluster-registry v0.0.6 diff --git a/go.sum b/go.sum index bd64876a04..bbc83f45b6 100644 --- a/go.sum +++ b/go.sum @@ -654,6 +654,8 @@ k8s.io/apimachinery v0.26.7 h1:590jSBwaSHCAFCqltaEogY/zybFlhGsnLteLpuF2wig= k8s.io/apimachinery v0.26.7/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= k8s.io/apiserver v0.26.7 h1:NX/zBZZn4R+Cq6shwyn8Pn8REd0yJJ16dbtv9WkEVEU= k8s.io/apiserver v0.26.7/go.mod h1:r0wDRWHI7VL/KlQLTkJJBVGZ3KeNfv+VetlyRtr86xs= +k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0 h1:pH6AsxeBZcyX6KBqcnl7SPIJqbN1d59RrEBuIE6Rq6c= +k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0/go.mod h1:LraL5kR2xX7jb4VMCG6/tUH4I75uRHlnzC0VWQHcyWk= k8s.io/cli-runtime v0.26.7 h1:ZhAV9RK9wzXUeMKVvtVVX8jnsJcxw6hcSAu4K3eQbEo= k8s.io/cli-runtime v0.26.7/go.mod h1:THp0KBlPxRk4SdpeoBmsuxJwNrwfpTT4+oDaNqhpv0c= k8s.io/client-go v0.26.7 h1:hyU9aKHlwVOykgyxzGYkrDSLCc4+mimZVyUJjPyUn1E= diff --git a/manifests/templates/reconciler-manager-configmap.yaml b/manifests/templates/reconciler-manager-configmap.yaml index 82fcba103b..f6ab1a09a6 100644 --- a/manifests/templates/reconciler-manager-configmap.yaml +++ b/manifests/templates/reconciler-manager-configmap.yaml @@ -75,10 +75,6 @@ data: drop: - NET_RAW runAsUser: 65533 - resources: - requests: - cpu: "10m" - memory: "100Mi" - name: reconciler image: RECONCILER_IMAGE_NAME command: @@ -97,10 +93,6 @@ data: readOnly: true - name: kube mountPath: /.kube - resources: - requests: - cpu: "50m" - memory: "200Mi" securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -125,17 +117,9 @@ data: drop: - NET_RAW runAsUser: 65533 - resources: - requests: - cpu: "10m" - memory: "200Mi" - name: gcenode-askpass-sidecar image: gcr.io/config-management-release/gcenode-askpass-sidecar:v1.0.4 args: ["--port=9102", "--logtostderr"] - resources: - requests: - cpu: 10m - memory: 20Mi imagePullPolicy: IfNotPresent terminationMessagePolicy: File terminationMessagePath: /dev/termination-log @@ -159,10 +143,6 @@ data: drop: - NET_RAW runAsUser: 65533 - resources: - requests: - cpu: "10m" - memory: "200Mi" - name: helm-sync image: HELM_SYNC_IMAGE_NAME args: ["--root=/repo/source", "--dest=rev", "--max-sync-failures=30", "--error-file=error.json"] @@ -180,23 +160,12 @@ data: drop: - NET_RAW runAsUser: 65533 - resources: - requests: - cpu: "50m" - memory: "200Mi" - name: otel-agent image: gcr.io/config-management-release/otelcontribcol:v0.54.0-gke.1 command: - /otelcontribcol args: - "--config=/conf/otel-agent-config.yaml" - resources: - limits: - cpu: 1 - memory: 1Gi - requests: - cpu: 10m - memory: 100Mi securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true diff --git a/pkg/core/scheme.go b/pkg/core/scheme.go index b0dd98388e..840b144853 100644 --- a/pkg/core/scheme.go +++ b/pkg/core/scheme.go @@ -17,6 +17,7 @@ package core import ( admissionv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" + autoscalingv1hpa "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -26,6 +27,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + autoscalingv1vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/client-go/kubernetes/scheme" clusterregistry "k8s.io/cluster-registry/pkg/apis/clusterregistry/v1alpha1" k8sadmissionv1 "k8s.io/kubernetes/pkg/apis/admission/v1" @@ -60,6 +62,7 @@ var Scheme = scheme.Scheme func init() { mustRegisterKubernetesResources() mustRegisterAPIExtensionsResources() + mustRegisterAutoscalingResources() // Config Sync types utilruntime.Must(clusterregistry.AddToScheme(scheme.Scheme)) @@ -122,3 +125,9 @@ func mustRegisterAPIExtensionsResources() { utilruntime.Must(scheme.Scheme.SetVersionPriority(apiextensionsv1.SchemeGroupVersion, apiextensionsv1beta1.SchemeGroupVersion)) } + +func mustRegisterAutoscalingResources() { + utilruntime.Must(autoscalingv1hpa.AddToScheme(scheme.Scheme)) + utilruntime.Must(autoscalingv1vpa.AddToScheme(scheme.Scheme)) + // autoscaling API has no generated defaults or conversions +} diff --git a/pkg/kinds/kinds.go b/pkg/kinds/kinds.go index ebaf3026c5..7b6937cd58 100644 --- a/pkg/kinds/kinds.go +++ b/pkg/kinds/kinds.go @@ -26,6 +26,7 @@ import ( rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" + autoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "kpt.dev/configsync/pkg/api/configmanagement" v1 "kpt.dev/configsync/pkg/api/configmanagement/v1" "kpt.dev/configsync/pkg/api/configsync" @@ -263,3 +264,8 @@ func APIService() schema.GroupVersionKind { func ValidatingWebhookConfiguration() schema.GroupVersionKind { return admissionv1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") } + +// VerticalPodAutoscaler returns the VerticalPodAutoscaler kind. +func VerticalPodAutoscaler() schema.GroupVersionKind { + return autoscalingv1.SchemeGroupVersion.WithKind("VerticalPodAutoscaler") +} diff --git a/pkg/metadata/annotations.go b/pkg/metadata/annotations.go index 8c7ed0599e..ed075d278d 100644 --- a/pkg/metadata/annotations.go +++ b/pkg/metadata/annotations.go @@ -145,6 +145,11 @@ const ( // reconciler-manager will create the reconciler with the hydration-controller // sidecar container. RequiresRenderingAnnotationKey = configsync.ConfigSyncPrefix + "requires-rendering" + + // ReconcilerAutoscalingStrategyAnnotationKey is the annotation key set on + // RootSync/RepoSync objects to indicate whether to autoscale the reconciler + // deployment and what strategy to use. + ReconcilerAutoscalingStrategyAnnotationKey = configsync.ConfigSyncPrefix + "reconciler-autoscaling-strategy" ) // Lifecycle annotations @@ -228,3 +233,23 @@ const ( // This is the default behavior if the annotation is not specified. DeletionPropagationPolicyOrphan = DeletionPropagationPolicy("Orphan") ) + +// ReconcilerAutoscalingStrategy is the type used to identify value enums to use +// with the reconciler-autoscaling-strategy annotation. +type ReconcilerAutoscalingStrategy string + +const ( + // ReconcilerAutoscalingStrategyAuto indicates that a VPA config should be + // applied for the reconciler Deployment, if the VPA CRD is installed. + ReconcilerAutoscalingStrategyAuto = ReconcilerAutoscalingStrategy("Auto") + + // ReconcilerAutoscalingStrategyRecommend indicates that a VPA config should + // be applied for the reconciler Deployment, if the VPA CRD is installed, + // but it should be configured only to monitor and make resource + // recommendations, not to apply them. + ReconcilerAutoscalingStrategyRecommend = ReconcilerAutoscalingStrategy("Recommend") + + // ReconcilerAutoscalingStrategyDisabled indicates that a VPA config should + // NOT be applied for the reconciler Deployment. + ReconcilerAutoscalingStrategyDisabled = ReconcilerAutoscalingStrategy("Disabled") +) diff --git a/pkg/metadata/metadata.go b/pkg/metadata/metadata.go index e738705a18..f2cb27081c 100644 --- a/pkg/metadata/metadata.go +++ b/pkg/metadata/metadata.go @@ -48,12 +48,13 @@ func GetNomosAnnotationKeys() []string { // in the source repository. // These annotations are set by Config Sync users. var sourceAnnotations = map[string]bool{ - NamespaceSelectorAnnotationKey: true, - LegacyClusterSelectorAnnotationKey: true, - ClusterNameSelectorAnnotationKey: true, - ResourceManagementKey: true, - LifecycleMutationAnnotation: true, - DeletionPropagationPolicyAnnotationKey: true, + NamespaceSelectorAnnotationKey: true, + LegacyClusterSelectorAnnotationKey: true, + ClusterNameSelectorAnnotationKey: true, + ResourceManagementKey: true, + LifecycleMutationAnnotation: true, + DeletionPropagationPolicyAnnotationKey: true, + ReconcilerAutoscalingStrategyAnnotationKey: true, } // IsSourceAnnotation returns true if the annotation is a ConfigSync source @@ -73,7 +74,8 @@ func IsConfigSyncAnnotationKey(k string) bool { return HasConfigSyncPrefix(k) || strings.HasPrefix(k, LifecycleMutationAnnotation) || k == OwningInventoryKey || - k == DeletionPropagationPolicyAnnotationKey + k == DeletionPropagationPolicyAnnotationKey || + k == ReconcilerAutoscalingStrategyAnnotationKey } // isConfigSyncAnnotation returns whether an annotation is a Config Sync annotation. diff --git a/pkg/reconcilermanager/controllers/garbage_collector.go b/pkg/reconcilermanager/controllers/garbage_collector.go index c8727cfcb4..14457c8e81 100644 --- a/pkg/reconcilermanager/controllers/garbage_collector.go +++ b/pkg/reconcilermanager/controllers/garbage_collector.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + autoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "kpt.dev/configsync/pkg/api/configsync" "kpt.dev/configsync/pkg/core" "kpt.dev/configsync/pkg/kinds" @@ -261,3 +262,21 @@ func (r *RootSyncReconciler) deleteClusterRoleBinding(ctx context.Context, recon } return nil } + +func (r *reconcilerBase) deleteVerticalPodAutoscaler(ctx context.Context, reconcilerRef types.NamespacedName) error { + vpaRef := reconcilerRef + vpaEnabled, err := r.isVPAEnabled() + if err != nil { + return err + } + if !vpaEnabled { + r.logger(ctx).Info("Managed object delete skipped - not enabled", + logFieldObjectRef, vpaRef.String(), + logFieldObjectKind, "VerticalPodAutoscaler") + return nil + } + obj := &autoscalingv1.VerticalPodAutoscaler{} + obj.Name = vpaRef.Name + obj.Namespace = vpaRef.Namespace + return r.cleanup(ctx, obj) +} diff --git a/pkg/reconcilermanager/controllers/reconciler_base.go b/pkg/reconcilermanager/controllers/reconciler_base.go index be511de9e5..4659b07b21 100644 --- a/pkg/reconcilermanager/controllers/reconciler_base.go +++ b/pkg/reconcilermanager/controllers/reconciler_base.go @@ -24,14 +24,17 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" + autoscaling "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + autoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/client-go/dynamic" "k8s.io/utils/pointer" "kpt.dev/configsync/pkg/api/configsync" @@ -80,8 +83,8 @@ const ( defaultGitSyncLogLevel = 5 ) -// The fields in reconcilerManagerAllowList are the fields that reconciler manager allow -// users or other controllers to modify. +// The fields in reconcilerManagerAllowList are the fields that reconciler manager +// allows users or other controllers to modify on the reconciler Deployment. var reconcilerManagerAllowList = []string{ "$.spec.template.spec.containers[*].terminationMessagePath", "$.spec.template.spec.containers[*].terminationMessagePolicy", @@ -108,7 +111,7 @@ type reconcilerBase struct { watcher client.WithWatch // non-caching dynamicClient dynamic.Interface scheme *runtime.Scheme - isAutopilotCluster *bool + autopilot *bool reconcilerPollingPeriod time.Duration hydrationPollingPeriod time.Duration membership *hubv1.Membership @@ -196,7 +199,7 @@ func (r *reconcilerBase) upsertDeployment(ctx context.Context, reconcilerRef typ if err := mutateObject(reconcilerDeployment); err != nil { return nil, controllerutil.OperationResultNone, err } - appliedObj, op, err := r.createOrPatchDeployment(ctx, reconcilerDeployment) + appliedObj, op, err := r.applyDeployment(ctx, reconcilerDeployment) if op != controllerutil.OperationResultNone { r.logger(ctx).Info("Managed object upsert successful", @@ -207,29 +210,30 @@ func (r *reconcilerBase) upsertDeployment(ctx context.Context, reconcilerRef typ return appliedObj, op, err } -// createOrPatchDeployment() first call Get() on the object. If the -// object does not exist, Create() will be called. If it does exist, Patch() -// will be called. -func (r *reconcilerBase) createOrPatchDeployment(ctx context.Context, declared *appsv1.Deployment) (*unstructured.Unstructured, controllerutil.OperationResult, error) { +// applyDeployment applies the declared deployment. +// If it exists before apply, and has the GKE Autopilot adjustment annotation, +// then the declared deployment is modified to match the resource adjustments, +// to avoid fighting with the autopilot mutating webhook. +func (r *reconcilerBase) applyDeployment(ctx context.Context, declared *appsv1.Deployment) (*unstructured.Unstructured, controllerutil.OperationResult, error) { id := core.ID{ ObjectKey: client.ObjectKeyFromObject(declared), GroupKind: kinds.Deployment().GroupKind(), } - forcePatch := true + patchOpts := metav1.PatchOptions{FieldManager: reconcilermanager.ManagerName, Force: pointer.Bool(true)} deploymentClient := r.dynamicClient.Resource(kinds.DeploymentResource()).Namespace(id.Namespace) currentDeploymentUnstructured, err := deploymentClient.Get(ctx, id.Name, metav1.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { return nil, controllerutil.OperationResultNone, NewObjectOperationErrorWithID(err, id, OperationGet) } - r.logger(ctx).V(3).Info("Managed object not found, creating", + r.logger(ctx).Info("Managed object not found, creating", logFieldObjectRef, id.ObjectKey.String(), logFieldObjectKind, id.Kind) data, err := json.Marshal(declared) if err != nil { return nil, controllerutil.OperationResultNone, fmt.Errorf("failed to marshal declared deployment object to byte array: %w", err) } - appliedObj, err := deploymentClient.Patch(ctx, id.Name, types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: reconcilermanager.ManagerName, Force: &forcePatch}) + appliedObj, err := deploymentClient.Patch(ctx, id.Name, types.ApplyPatchType, data, patchOpts) if err != nil { return nil, controllerutil.OperationResultNone, NewObjectOperationErrorWithID(err, id, OperationPatch) } @@ -238,31 +242,28 @@ func (r *reconcilerBase) createOrPatchDeployment(ctx context.Context, declared * currentGeneration := currentDeploymentUnstructured.GetGeneration() currentUID := currentDeploymentUnstructured.GetUID() - if r.isAutopilotCluster == nil { - isAutopilot, err := util.IsGKEAutopilotCluster(r.client) - if err != nil { - return nil, controllerutil.OperationResultNone, fmt.Errorf("unable to determine if it is an Autopilot cluster: %w", err) - } - r.isAutopilotCluster = &isAutopilot - } - dep, err := compareDeploymentsToCreatePatchData(*r.isAutopilotCluster, declared, currentDeploymentUnstructured, reconcilerManagerAllowList, r.scheme) + dep, err := r.compareDeploymentsToCreatePatchData(declared, currentDeploymentUnstructured, reconcilerManagerAllowList) if err != nil { return nil, controllerutil.OperationResultNone, err } if dep.adjusted { mutator := "Autopilot" - r.logger(ctx).V(3).Info("Managed object container resources updated", + r.logger(ctx).Info("Managed object container resources adjusted by autopilot", logFieldObjectRef, id.ObjectKey.String(), logFieldObjectKind, id.Kind, "mutator", mutator) } if dep.same { + r.logger(ctx).Info("Managed object apply skipped, no diff", + logFieldObjectRef, id.ObjectKey.String(), + logFieldObjectKind, id.Kind) return nil, controllerutil.OperationResultNone, nil } - r.logger(ctx).V(3).Info("Managed object found, patching", + r.logger(ctx).Info("Managed object found, patching", logFieldObjectRef, id.ObjectKey.String(), - logFieldObjectKind, id.Kind) - appliedObj, err := deploymentClient.Patch(ctx, id.Name, types.ApplyPatchType, dep.dataToPatch, metav1.PatchOptions{FieldManager: reconcilermanager.ManagerName, Force: &forcePatch}) + logFieldObjectKind, id.Kind, + "patchJSON", string(dep.dataToPatch)) + appliedObj, err := deploymentClient.Patch(ctx, id.Name, types.ApplyPatchType, dep.dataToPatch, patchOpts) if err != nil { // Let the next reconciliation retry the patch operation for valid request. if !apierrors.IsInvalid(err) { @@ -280,7 +281,7 @@ func (r *reconcilerBase) createOrPatchDeployment(ctx context.Context, declared * if err != nil { return nil, controllerutil.OperationResultNone, fmt.Errorf("failed to marshal declared deployment object to byte array: %w", err) } - appliedObj, err = deploymentClient.Patch(ctx, id.Name, types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: reconcilermanager.ManagerName, Force: &forcePatch}) + appliedObj, err = deploymentClient.Patch(ctx, id.Name, types.ApplyPatchType, data, patchOpts) if err != nil { return nil, controllerutil.OperationResultNone, NewObjectOperationErrorWithID(err, id, OperationPatch) } @@ -291,6 +292,18 @@ func (r *reconcilerBase) createOrPatchDeployment(ctx context.Context, declared * return appliedObj, controllerutil.OperationResultUpdated, nil } +func (r *reconcilerBase) isAutopilot() (bool, error) { + if r.autopilot != nil { + return *r.autopilot, nil + } + autopilot, err := util.IsGKEAutopilotCluster(r.client) + if err != nil { + return false, fmt.Errorf("unable to determine if it is an Autopilot cluster: %w", err) + } + r.autopilot = &autopilot + return autopilot, nil +} + // deleteDeploymentFields delete all the fields in allowlist from unstructured object and convert the unstructured object to Deployment object func deleteDeploymentFields(allowList []string, unstructuredDeployment *unstructured.Unstructured) (*appsv1.Deployment, error) { for _, path := range allowList { @@ -312,30 +325,33 @@ type deploymentProcessResult struct { } // compareDeploymentsToCreatePatchData checks if current deployment is same with declared deployment when ignore the fields in allowlist. If not, it creates a byte array used for PATCH later -func compareDeploymentsToCreatePatchData(isAutopilot bool, declared *appsv1.Deployment, currentDeploymentUnstructured *unstructured.Unstructured, allowList []string, scheme *runtime.Scheme) (*deploymentProcessResult, error) { +func (r *reconcilerBase) compareDeploymentsToCreatePatchData(declared *appsv1.Deployment, currentDeploymentUnstructured *unstructured.Unstructured, allowList []string) (*deploymentProcessResult, error) { + isAutopilot, err := r.isAutopilot() + if err != nil { + return nil, err + } processedCurrent, err := deleteDeploymentFields(allowList, currentDeploymentUnstructured) if err != nil { - return &deploymentProcessResult{}, err + return nil, err } adjusted, err := adjustContainerResources(isAutopilot, declared, processedCurrent) if err != nil { - return &deploymentProcessResult{}, err + return nil, err } - - unObjDeclared, err := kinds.ToUnstructured(declared, scheme) + uObjDeclared, err := kinds.ToUnstructured(declared, r.scheme) if err != nil { - return &deploymentProcessResult{}, err + return nil, err } - processedDeclared, err := deleteDeploymentFields(allowList, unObjDeclared) + processedDeclared, err := deleteDeploymentFields(allowList, uObjDeclared) if err != nil { - return &deploymentProcessResult{}, err + return nil, err } if equality.Semantic.DeepEqual(processedCurrent.Labels, processedDeclared.Labels) && equality.Semantic.DeepEqual(processedCurrent.Spec, processedDeclared.Spec) { return &deploymentProcessResult{true, adjusted, nil}, nil } - data, err := json.Marshal(unObjDeclared) + data, err := json.Marshal(uObjDeclared) if err != nil { - return &deploymentProcessResult{}, err + return nil, err } return &deploymentProcessResult{false, adjusted, data}, nil } @@ -470,41 +486,31 @@ func mountConfigMapValuesFiles(templateSpec *corev1.PodSpec, c *corev1.Container } } -func mutateContainerResource(c *corev1.Container, override *v1beta1.OverrideSpec) { - if override == nil { +func mutateContainerLogLevel(c *corev1.Container, override []v1beta1.ContainerLogLevelOverride) { + if len(override) == 0 { return } - - for _, override := range override.Resources { - if override.ContainerName == c.Name { - if !override.CPURequest.IsZero() { - if c.Resources.Requests == nil { - c.Resources.Requests = corev1.ResourceList{} - } - c.Resources.Requests[corev1.ResourceCPU] = override.CPURequest - } - if !override.CPULimit.IsZero() { - if c.Resources.Limits == nil { - c.Resources.Limits = corev1.ResourceList{} - } - c.Resources.Limits[corev1.ResourceCPU] = override.CPULimit - } - if !override.MemoryRequest.IsZero() { - if c.Resources.Requests == nil { - c.Resources.Requests = corev1.ResourceList{} - } - c.Resources.Requests[corev1.ResourceMemory] = override.MemoryRequest - } - if !override.MemoryLimit.IsZero() { - if c.Resources.Limits == nil { - c.Resources.Limits = corev1.ResourceList{} - } - c.Resources.Limits[corev1.ResourceMemory] = override.MemoryLimit - } + for i, arg := range c.Args { + if strings.HasPrefix(arg, "-v=") { + c.Args = removeArg(c.Args, i) + break } } + c.Args = append(c.Args, fmt.Sprintf("-v=%d", containerLogLevel(c.Name, override))) +} - c.Args = append(c.Args, fmt.Sprintf("-v=%d", containerLogLevel(c.Name, override.LogLevels))) +func removeArg(args []string, i int) []string { + if i == 0 { + // remove first arg + args = args[i+1:] + } else if i == len(args)-1 { + // remove last arg + args = args[:i] + } else { + // remove middle arg + args = append(args[:i], args[i+1:]...) + } + return args } // containerLogLevel will determine the log level value for any reconciler deployment container @@ -591,17 +597,46 @@ func (r *reconcilerBase) validateCACertSecret(ctx context.Context, namespace, ca return errors.Wrapf(err, "Secret %s get failed", caCertSecretRefName) } if _, ok := secret.Data[CACertSecretKey]; !ok { - return fmt.Errorf("caCertSecretRef was set, but %s key is not present in %s Secret", CACertSecretKey, caCertSecretRefName) + return errors.Errorf("caCertSecretRef was set, but %s key is not present in %s Secret", CACertSecretKey, caCertSecretRefName) } } return nil } +func (r *reconcilerBase) validateAnnotations(_ context.Context, rs client.Object) error { + autoscalingStrategy := reconcilerAutoscalingStrategy(rs) + switch autoscalingStrategy { + case metadata.ReconcilerAutoscalingStrategyAuto, + metadata.ReconcilerAutoscalingStrategyRecommend, + metadata.ReconcilerAutoscalingStrategyDisabled: + // valid + default: + return errors.Errorf("annotation %q has invalid value %q, must be one of %q, %q, or %q", + metadata.ReconcilerAutoscalingStrategyAnnotationKey, + autoscalingStrategy, + metadata.ReconcilerAutoscalingStrategyAuto, + metadata.ReconcilerAutoscalingStrategyRecommend, + metadata.ReconcilerAutoscalingStrategyDisabled) + } + return nil +} + +func reconcilerAutoscalingStrategy(rs client.Object) metadata.ReconcilerAutoscalingStrategy { + autoscalingStrategy := metadata.ReconcilerAutoscalingStrategy( + core.GetAnnotation(rs, metadata.ReconcilerAutoscalingStrategyAnnotationKey)) + if len(autoscalingStrategy) == 0 { + // Default to Disabled, if unspecified. + // TODO: Default to Auto by default when we're confident it will work for most users. + autoscalingStrategy = metadata.ReconcilerAutoscalingStrategyDisabled + } + return autoscalingStrategy +} + // addTypeInformationToObject looks up and adds GVK to a runtime.Object based upon the loaded Scheme func (r *reconcilerBase) addTypeInformationToObject(obj runtime.Object) error { gvk, err := kinds.Lookup(obj, r.scheme) if err != nil { - return fmt.Errorf("missing apiVersion or kind and cannot assign it; %w", err) + return errors.Wrap(err, "missing apiVersion or kind and cannot assign it") } obj.GetObjectKind().SetGroupVersionKind(gvk) return nil @@ -672,3 +707,83 @@ func (r *reconcilerBase) setupOrTeardown(ctx context.Context, syncObj client.Obj return nil } + +// upsertVerticalPodAutoscaler creates or updates the VPA for the reconciler +// Deployment if the VPA API is enabled and the strategy is either auto or +// recommend. If the strategy is disabled and the VPA API is enabled, then the +// VPA will be deleted, if it exists. +// Returns true if the VPA was created, updated, or already up-to-date. +func (r *reconcilerBase) upsertVerticalPodAutoscaler(ctx context.Context, strategy metadata.ReconcilerAutoscalingStrategy, reconcilerRef types.NamespacedName, labelMap map[string]string) (client.ObjectKey, bool, error) { + vpaRef := reconcilerRef + vpaEnabled, err := r.isVPAEnabled() + if err != nil { + return vpaRef, false, err + } + switch strategy { + case metadata.ReconcilerAutoscalingStrategyDisabled: + // delete if VPA is installed + if !vpaEnabled { + // nothing to delete - CRD not installed + return vpaRef, false, nil + } + return vpaRef, false, r.deleteVerticalPodAutoscaler(ctx, reconcilerRef) + case metadata.ReconcilerAutoscalingStrategyAuto, metadata.ReconcilerAutoscalingStrategyRecommend: + // upsert if VPA is installed + if !vpaEnabled { + r.logger(ctx).Info("Managed object upsert skipped - VerticalPodAutoscaler CRD/APIService not installed", + logFieldObjectRef, vpaRef.String(), + logFieldObjectKind, "VerticalPodAutoscaler") + return vpaRef, false, nil + } // else continue to upsert + default: + // shouldn't happen - invalid strategy should be caught by validation + return vpaRef, false, errors.Errorf("invalid reconciler autoscaling strategy: %v", strategy) + } + vpa := &autoscalingv1.VerticalPodAutoscaler{} + vpa.Name = vpaRef.Name + vpa.Namespace = vpaRef.Namespace + op, err := CreateOrUpdate(ctx, r.client, vpa, func() error { + core.AddLabels(vpa, labelMap) + vpa.Spec.TargetRef = &autoscaling.CrossVersionObjectReference{ + // TODO: APIVersion is optional but is it useful? + Kind: kinds.Deployment().Kind, + Name: reconcilerRef.Name, + } + var updateMode autoscalingv1.UpdateMode + switch strategy { + case metadata.ReconcilerAutoscalingStrategyAuto: + updateMode = autoscalingv1.UpdateModeAuto + default: // Recommend + updateMode = autoscalingv1.UpdateModeOff + } + vpa.Spec.UpdatePolicy = &autoscalingv1.PodUpdatePolicy{ + UpdateMode: &updateMode, + // VPA is allowed to evict the last reconciler pod, + // because there's only one replica. + MinReplicas: pointer.Int32(1), + } + return nil + }) + if err != nil { + return vpaRef, false, err + } + if op != controllerutil.OperationResultNone { + r.logger(ctx).Info("Managed object upsert successful", + logFieldObjectRef, vpaRef.String(), + logFieldObjectKind, "VerticalPodAutoscaler", + logFieldOperation, op) + } + return vpaRef, (strategy == metadata.ReconcilerAutoscalingStrategyAuto), nil +} + +func (r *reconcilerBase) isVPAEnabled() (bool, error) { + vpaGVK := kinds.VerticalPodAutoscaler() + _, err := r.client.RESTMapper().RESTMapping(vpaGVK.GroupKind(), vpaGVK.Version) + if err != nil { + if meta.IsNoMatchError(err) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/pkg/reconcilermanager/controllers/reconciler_base_test.go b/pkg/reconcilermanager/controllers/reconciler_base_test.go index 561dda3683..14a3c79f0f 100644 --- a/pkg/reconcilermanager/controllers/reconciler_base_test.go +++ b/pkg/reconcilermanager/controllers/reconciler_base_test.go @@ -15,6 +15,7 @@ package controllers import ( + "context" "encoding/json" "fmt" "path/filepath" @@ -33,6 +34,7 @@ import ( "kpt.dev/configsync/pkg/kinds" "kpt.dev/configsync/pkg/metadata" "kpt.dev/configsync/pkg/reconcilermanager" + syncerFake "kpt.dev/configsync/pkg/syncer/syncertest/fake" "kpt.dev/configsync/pkg/testing/fake" "kpt.dev/configsync/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" @@ -611,7 +613,16 @@ func TestCompareDeploymentsToCreatePatchData(t *testing.T) { } else { testCurrent = tc.current.DeepCopy() } - dep, err := compareDeploymentsToCreatePatchData(tc.isAutopilot, testDeclared, testCurrent, reconcilerManagerAllowList, core.Scheme) + fakeClient := syncerFake.NewClient(t, core.Scheme) + if tc.isAutopilot { + err := fakeClient.Create(context.Background(), util.FakeAutopilotWebhookObject()) + require.NoError(t, err) + } + r := &reconcilerBase{ + scheme: fakeClient.Scheme(), + client: fakeClient, + } + dep, err := r.compareDeploymentsToCreatePatchData(testDeclared, testCurrent, reconcilerManagerAllowList) require.NoError(t, err) require.Equal(t, tc.expectedSame, dep.same) }) diff --git a/pkg/reconcilermanager/controllers/reconciler_container_resources.go b/pkg/reconcilermanager/controllers/reconciler_container_resources.go new file mode 100644 index 0000000000..7bff8c725b --- /dev/null +++ b/pkg/reconcilermanager/controllers/reconciler_container_resources.go @@ -0,0 +1,208 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "kpt.dev/configsync/pkg/api/configsync/v1beta1" + "kpt.dev/configsync/pkg/metrics" + "kpt.dev/configsync/pkg/reconcilermanager" +) + +// ReconcilerContainerResourceDefaults are the default resources to use when autoscaling +// (VPA) is NOT configured for the reconciler deployment. +// These defaults should be high enough to work for most users our of the box, +// with a moderately high number of resource objects (e.g. 1k). +func ReconcilerContainerResourceDefaults() map[string]v1beta1.ContainerResourcesSpec { + return map[string]v1beta1.ContainerResourcesSpec{ + reconcilermanager.Reconciler: { + ContainerName: reconcilermanager.Reconciler, + CPURequest: resource.MustParse("50m"), + MemoryRequest: resource.MustParse("200Mi"), + }, + reconcilermanager.HydrationController: { + ContainerName: reconcilermanager.HydrationController, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("100Mi"), + }, + reconcilermanager.OciSync: { + ContainerName: reconcilermanager.OciSync, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("200Mi"), + }, + reconcilermanager.HelmSync: { + ContainerName: reconcilermanager.HelmSync, + CPURequest: resource.MustParse("50m"), + MemoryRequest: resource.MustParse("200Mi"), + }, + reconcilermanager.GitSync: { + ContainerName: reconcilermanager.GitSync, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("200Mi"), + }, + reconcilermanager.GCENodeAskpassSidecar: { + ContainerName: reconcilermanager.GCENodeAskpassSidecar, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("20Mi"), + }, + metrics.OtelAgentName: { + ContainerName: metrics.OtelAgentName, + CPURequest: resource.MustParse("10m"), + // TODO: CPU limits considered harmful. + // A throttled otel-agent is also much more likely to drop metrics. + CPULimit: resource.MustParse("1000m"), + // TODO: If we want to avoid OOMKill, set Request = Limit for Guaranteed QoS. + // Burstable pods are the most likely to be OOM killed by the kernel. + MemoryRequest: resource.MustParse("100Mi"), + MemoryLimit: resource.MustParse("1Gi"), + }, + } +} + +// ReconcilerContainerResourceAutoscaleDefaults are the default resource to use when +// autoscaling (VPA) is configured for the reconciler deployment. +// These defaults should be less than the defaults in the manifest yaml, so +// that the reconciler starts with minimal requirements and scales up as-needed. +func ReconcilerContainerResourceAutoscaleDefaults() map[string]v1beta1.ContainerResourcesSpec { + return map[string]v1beta1.ContainerResourcesSpec{ + reconcilermanager.Reconciler: { + ContainerName: reconcilermanager.Reconciler, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("128Mi"), + }, + reconcilermanager.HydrationController: { + ContainerName: reconcilermanager.HydrationController, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("10Mi"), + }, + reconcilermanager.OciSync: { + ContainerName: reconcilermanager.OciSync, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("10Mi"), + }, + reconcilermanager.HelmSync: { + ContainerName: reconcilermanager.HelmSync, + CPURequest: resource.MustParse("50m"), + MemoryRequest: resource.MustParse("200Mi"), + }, + reconcilermanager.GitSync: { + ContainerName: reconcilermanager.GitSync, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("10Mi"), + }, + reconcilermanager.GCENodeAskpassSidecar: { + ContainerName: reconcilermanager.GCENodeAskpassSidecar, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("20Mi"), + }, + metrics.OtelAgentName: { + ContainerName: metrics.OtelAgentName, + CPURequest: resource.MustParse("10m"), + MemoryRequest: resource.MustParse("128Mi"), + }, + } +} + +// setContainerResourceDefaults sets the defaults when not specified in the +// overrides. +func setContainerResourceDefaults(overrides []v1beta1.ContainerResourcesSpec, defaultsMap map[string]v1beta1.ContainerResourcesSpec) []v1beta1.ContainerResourcesSpec { + // Convert overrides list to map, indexed by container name + overrideMap := make(map[string]v1beta1.ContainerResourcesSpec) + for _, override := range overrides { + // TODO: validate no dupes & known container names + overrideMap[override.ContainerName] = override + } + // Merge the defaults with the overrides. + // Only use the default if the override is not specified. + for containerName, defaults := range defaultsMap { + override, found := overrideMap[containerName] + if !found { + // No overrides specified for this container - use the defaults as-is (copy struct) + overrideMap[containerName] = v1beta1.ContainerResourcesSpec{ + CPURequest: defaults.CPURequest, + CPULimit: defaults.CPULimit, + MemoryRequest: defaults.MemoryRequest, + MemoryLimit: defaults.MemoryLimit, + } + continue + } + updated := v1beta1.ContainerResourcesSpec{} + // Some overrides specified for this container - use default, if no override is specified for each value + if !override.CPURequest.IsZero() { + updated.CPURequest = override.CPURequest + } else if !defaults.CPURequest.IsZero() { + updated.CPURequest = defaults.CPURequest + } + if !override.CPULimit.IsZero() { + updated.CPULimit = override.CPULimit + } else if !defaults.CPULimit.IsZero() { + updated.CPULimit = defaults.CPULimit + } + if !override.MemoryRequest.IsZero() { + updated.MemoryRequest = override.MemoryRequest + } else if !defaults.MemoryRequest.IsZero() { + updated.MemoryRequest = defaults.MemoryRequest + } + if !override.MemoryLimit.IsZero() { + updated.MemoryLimit = override.MemoryLimit + } else if !defaults.MemoryLimit.IsZero() { + updated.MemoryLimit = defaults.MemoryLimit + } + overrideMap[containerName] = updated + } + // Convert back to list + overrides = make([]v1beta1.ContainerResourcesSpec, 0, len(overrideMap)) + for containerName, override := range overrideMap { + override.ContainerName = containerName + overrides = append(overrides, override) + } + return overrides +} + +func mutateContainerResource(c *corev1.Container, overrides []v1beta1.ContainerResourcesSpec) { + if len(overrides) == 0 { + return + } + + for _, override := range overrides { + if override.ContainerName == c.Name { + if !override.CPURequest.IsZero() { + if c.Resources.Requests == nil { + c.Resources.Requests = corev1.ResourceList{} + } + c.Resources.Requests[corev1.ResourceCPU] = override.CPURequest + } + if !override.CPULimit.IsZero() { + if c.Resources.Limits == nil { + c.Resources.Limits = corev1.ResourceList{} + } + c.Resources.Limits[corev1.ResourceCPU] = override.CPULimit + } + if !override.MemoryRequest.IsZero() { + if c.Resources.Requests == nil { + c.Resources.Requests = corev1.ResourceList{} + } + c.Resources.Requests[corev1.ResourceMemory] = override.MemoryRequest + } + if !override.MemoryLimit.IsZero() { + if c.Resources.Limits == nil { + c.Resources.Limits = corev1.ResourceList{} + } + c.Resources.Limits[corev1.ResourceMemory] = override.MemoryLimit + } + } + } +} diff --git a/pkg/reconcilermanager/controllers/reposync_controller.go b/pkg/reconcilermanager/controllers/reposync_controller.go index 3b6301db50..0fbc1ee25d 100644 --- a/pkg/reconcilermanager/controllers/reposync_controller.go +++ b/pkg/reconcilermanager/controllers/reposync_controller.go @@ -210,13 +210,13 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile // Create secret in config-management-system namespace using the // existing secret in the reposync.namespace. if _, err := r.upsertAuthSecret(ctx, rs, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "upserting auth secret") } // Create secret in config-management-system namespace using the // existing secret in the reposync.namespace. if _, err := r.upsertCACertSecret(ctx, rs, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "upserting CA cert secret") } labelMap := map[string]string{ @@ -243,12 +243,19 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile return errors.Errorf("invalid source type: %s", rs.Spec.SourceType) } if _, err := r.upsertServiceAccount(ctx, reconcilerRef, auth, gcpSAEmail, labelMap); err != nil { - return err + return errors.Wrap(err, "upserting service account") } // Overwrite reconciler rolebinding. if _, err := r.upsertRoleBinding(ctx, reconcilerRef, rsRef); err != nil { - return err + return errors.Wrap(err, "upserting role binding") + } + + // Upsert autoscaling config for the reconciler deployment + autoscalingStrategy := reconcilerAutoscalingStrategy(rs) + _, autoscale, err := r.upsertVerticalPodAutoscaler(ctx, autoscalingStrategy, reconcilerRef, labelMap) + if err != nil { + return errors.Wrap(err, "upserting autoscaler") } if err := r.upsertHelmConfigMaps(ctx, rs, labelMap); err != nil { @@ -256,12 +263,12 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile } containerEnvs := r.populateContainerEnvs(ctx, rs, reconcilerRef.Name) - mut := r.mutationsFor(ctx, rs, containerEnvs) + mut := r.mutationsFor(ctx, rs, containerEnvs, autoscale) // Upsert Namespace reconciler deployment. deployObj, op, err := r.upsertDeployment(ctx, reconcilerRef, labelMap, mut) if err != nil { - return err + return errors.Wrap(err, "upserting reconciler deployment") } rs.Status.Reconciler = reconcilerRef.Name @@ -270,7 +277,7 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile if op == controllerutil.OperationResultNone { deployObj, err = r.deployment(ctx, reconcilerRef) if err != nil { - return err + return errors.Wrap(err, "getting reconciler deployment") } } @@ -285,7 +292,7 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile result, err := kstatus.Compute(deployObj) if err != nil { - return errors.Wrap(err, "computing reconciler Deployment status failed") + return errors.Wrap(err, "computing reconciler deployment status") } r.logger(ctx).V(3).Info("Reconciler status", @@ -421,8 +428,12 @@ func (r *RepoSyncReconciler) handleReconcileError(ctx context.Context, err error func (r *RepoSyncReconciler) deleteManagedObjects(ctx context.Context, reconcilerRef, rsRef types.NamespacedName) error { r.logger(ctx).Info("Deleting managed objects") + if err := r.deleteVerticalPodAutoscaler(ctx, reconcilerRef); err != nil { + return errors.Wrap(err, "deleting autoscaler") + } + if err := r.deleteDeployment(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting reconciler deployment") } // Note: ConfigMaps have been replaced by Deployment env vars. @@ -430,22 +441,26 @@ func (r *RepoSyncReconciler) deleteManagedObjects(ctx context.Context, reconcile // This deletion remains to clean up after users upgrade. if err := r.deleteConfigMaps(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting config maps") } if err := r.deleteSecrets(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting secrets") } if err := r.deleteRoleBinding(ctx, reconcilerRef, rsRef); err != nil { - return err + return errors.Wrap(err, "deleting role bindings") } if err := r.deleteHelmConfigMapCopies(ctx, rsRef, nil); err != nil { - return err + return errors.Wrap(err, "deleting helm config maps") } - return r.deleteServiceAccount(ctx, reconcilerRef) + if err := r.deleteServiceAccount(ctx, reconcilerRef); err != nil { + return errors.Wrap(err, "deleting service account") + } + + return nil } // SetupWithManager registers RepoSync controller with reconciler-manager. @@ -857,6 +872,10 @@ func (r *RepoSyncReconciler) validateRepoSync(ctx context.Context, rs *v1beta1.R return fmt.Errorf("Invalid reconciler name %q: %s.", reconcilerName, strings.Join(err, ", ")) } + if err := r.validateAnnotations(ctx, rs); err != nil { + return err + } + if err := r.validateSourceSpec(ctx, rs, reconcilerName); err != nil { return err } @@ -1000,7 +1019,7 @@ func (r *RepoSyncReconciler) updateSyncStatus(ctx context.Context, rs *v1beta1.R return updated, nil } -func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoSync, containerEnvs map[string][]corev1.EnvVar) mutateFn { +func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoSync, containerEnvs map[string][]corev1.EnvVar, autoscale bool) mutateFn { return func(obj client.Object) error { d, ok := obj.(*appsv1.Deployment) if !ok { @@ -1056,6 +1075,21 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS caCertSecretRefName = ReconcilerResourceName(reconcilerName, caCertSecretRefName) } templateSpec.Volumes = filterVolumes(templateSpec.Volumes, auth, secretName, caCertSecretRefName, rs.Spec.SourceType, r.membership) + + // Resource priority order: + // - user-specified resource overrides (from RepoSync) + // - autoscale defaults (hard-coded), if enabled + // - declared defaults (from ConfigMap) + overrides := rs.Spec.SafeOverride() + resourceOverrides := overrides.Resources + if autoscale { + resourceOverrides = setContainerResourceDefaults(resourceOverrides, + ReconcilerContainerResourceAutoscaleDefaults()) + } else { + resourceOverrides = setContainerResourceDefaults(resourceOverrides, + ReconcilerContainerResourceDefaults()) + } + var updatedContainers []corev1.Container // Mutate spec.Containers to update name, configmap references and volumemounts. for _, container := range templateSpec.Containers { @@ -1063,7 +1097,6 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS switch container.Name { case reconcilermanager.Reconciler: container.Env = append(container.Env, containerEnvs[container.Name]...) - mutateContainerResource(&container, rs.Spec.Override) case reconcilermanager.HydrationController: if !enableRendering(rs.GetAnnotations()) { // if the sync source does not require rendering, omit the hydration controller @@ -1072,7 +1105,6 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS } else { container.Env = append(container.Env, containerEnvs[container.Name]...) container.Image = updateHydrationControllerImage(container.Image, *rs.Spec.SafeOverride()) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.OciSync: // Don't add the oci-sync container when sourceType is NOT oci. @@ -1081,7 +1113,6 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS } else { container.Env = append(container.Env, containerEnvs[container.Name]...) injectFWICredsToContainer(&container, injectFWICreds) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.HelmSync: // Don't add the helm-sync container when sourceType is NOT helm. @@ -1095,7 +1126,6 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS } mountConfigMapValuesFiles(templateSpec, &container, r.getReconcilerHelmConfigMapRefs(rs)) injectFWICredsToContainer(&container, injectFWICreds) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.GitSync: // Don't add the git-sync container when sourceType is NOT git. @@ -1113,7 +1143,6 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS sRef := client.ObjectKey{Namespace: rs.Namespace, Name: v1beta1.GetSecretName(rs.Spec.SecretRef)} keys := GetSecretKeys(ctx, r.client, sRef) container.Env = append(container.Env, gitSyncHTTPSProxyEnv(secretName, keys)...) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.GCENodeAskpassSidecar: if !enableAskpassSidecar(rs.Spec.SourceType, auth) { @@ -1124,12 +1153,14 @@ func (r *RepoSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RepoS // TODO: enable resource/logLevel overrides for gcenode-askpass-sidecar } case metrics.OtelAgentName: - // The no-op case to avoid unknown container error after - // first-ever reconcile. + container.Env = append(container.Env, containerEnvs[container.Name]...) default: return errors.Errorf("unknown container in reconciler deployment template: %q", container.Name) } if addContainer { + // Common mutations for all containers + mutateContainerResource(&container, resourceOverrides) + mutateContainerLogLevel(&container, overrides.LogLevels) updatedContainers = append(updatedContainers, container) } } diff --git a/pkg/reconcilermanager/controllers/reposync_controller_test.go b/pkg/reconcilermanager/controllers/reposync_controller_test.go index ac3f8591c1..c1f99eaccd 100644 --- a/pkg/reconcilermanager/controllers/reposync_controller_test.go +++ b/pkg/reconcilermanager/controllers/reposync_controller_test.go @@ -366,11 +366,12 @@ func TestCreateAndUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(overrideReconcilerAndGitSyncResourceLimits, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), - containerResourcesMutator(overrideReconcilerAndGitSyncResourceLimits), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -423,11 +424,12 @@ func TestCreateAndUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideReconcilerCPUAndGitSyncMemResources, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), - containerResourcesMutator(overrideReconcilerCPUAndGitSyncMemResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -460,10 +462,12 @@ func TestCreateAndUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -500,10 +504,12 @@ func TestUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -562,11 +568,12 @@ func TestUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideReconcilerAndGitSyncResources, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), - containerResourcesMutator(overrideReconcilerAndGitSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -617,11 +624,12 @@ func TestUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideReconcilerResources, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), - containerResourcesMutator(overrideReconcilerResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -664,11 +672,12 @@ func TestUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideGitSyncResources, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), - containerResourcesMutator(overrideGitSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -700,10 +709,12 @@ func TestUpdateNamespaceReconcilerWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("5"), setGeneration(5), ) @@ -740,10 +751,12 @@ func TestRepoSyncCreateWithNoSSLVerify(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -781,10 +794,12 @@ func TestRepoSyncUpdateNoSSLVerify(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -922,6 +937,7 @@ func TestRepoSyncUpdateNoSSLVerify(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("4"), setGeneration(2), ) @@ -1015,6 +1031,7 @@ func TestRepoSyncUpdateNoSSLVerify(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("6"), setGeneration(2), ) @@ -1050,6 +1067,7 @@ func TestRepoSyncUpdateNoSSLVerify(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("7"), setGeneration(3), ) @@ -1084,12 +1102,13 @@ func TestRepoSyncCreateWithCACert(t *testing.T) { } repoContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) - + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) nsSecretName := nsReconcilerName + "-" + secretName nsCACertSecret := nsReconcilerName + "-" + caCertSecret repoDeployment := repoSyncDeployment(nsReconcilerName, setServiceAccountName(nsReconcilerName), caCertSecretMutator(nsSecretName, nsCACertSecret), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", nsSecretName, "https_proxy"), envVarMutator(gitSyncName, nsSecretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, nsSecretName, GitSecretConfigKeyToken), @@ -1124,10 +1143,12 @@ func TestRepoSyncUpdateCACert(t *testing.T) { } repoContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) nsSecretName := nsReconcilerName + "-" + secretName - repoDeployment := rootSyncDeployment(nsReconcilerName, + repoDeployment := repoSyncDeployment(nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsSecretName), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", nsSecretName, "https_proxy"), envVarMutator(gitSyncName, nsSecretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, nsSecretName, GitSecretConfigKeyToken), @@ -1174,9 +1195,10 @@ func TestRepoSyncUpdateCACert(t *testing.T) { repoContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) nsCACertSecret := nsReconcilerName + "-" + caCertSecret - updatedRepoDeployment := rootSyncDeployment(nsReconcilerName, + updatedRepoDeployment := repoSyncDeployment(nsReconcilerName, setServiceAccountName(nsReconcilerName), caCertSecretMutator(nsSecretName, nsCACertSecret), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", nsSecretName, "https_proxy"), envVarMutator(gitSyncName, nsSecretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, nsSecretName, GitSecretConfigKeyToken), @@ -1206,6 +1228,7 @@ func TestRepoSyncUpdateCACert(t *testing.T) { repoDeployment = repoSyncDeployment(nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsSecretName), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", nsSecretName, "https_proxy"), envVarMutator(gitSyncName, nsSecretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, nsSecretName, GitSecretConfigKeyToken), @@ -1298,10 +1321,12 @@ func TestRepoSyncCreateWithOverrideGitSyncDepth(t *testing.T) { } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1331,10 +1356,12 @@ func TestRepoSyncUpdateOverrideGitSyncDepth(t *testing.T) { } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1367,6 +1394,7 @@ func TestRepoSyncUpdateOverrideGitSyncDepth(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1399,6 +1427,7 @@ func TestRepoSyncUpdateOverrideGitSyncDepth(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1430,6 +1459,7 @@ func TestRepoSyncUpdateOverrideGitSyncDepth(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -1479,10 +1509,12 @@ func TestRepoSyncCreateWithOverrideReconcileTimeout(t *testing.T) { } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1512,10 +1544,12 @@ func TestRepoSyncUpdateOverrideReconcileTimeout(t *testing.T) { } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1548,6 +1582,7 @@ func TestRepoSyncUpdateOverrideReconcileTimeout(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1579,6 +1614,7 @@ func TestRepoSyncUpdateOverrideReconcileTimeout(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1628,11 +1664,12 @@ func TestRepoSyncCreateWithOverrideAPIServerTimeout(t *testing.T) { } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) - + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1659,10 +1696,12 @@ func TestRepoSyncUpdateOverrideAPIServerTimeout(t *testing.T) { } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1695,6 +1734,7 @@ func TestRepoSyncUpdateOverrideAPIServerTimeout(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1726,6 +1766,7 @@ func TestRepoSyncUpdateOverrideAPIServerTimeout(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1789,10 +1830,12 @@ func TestRepoSyncSwitchAuthTypes(t *testing.T) { ) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1832,6 +1875,7 @@ func TestRepoSyncSwitchAuthTypes(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1863,6 +1907,7 @@ func TestRepoSyncSwitchAuthTypes(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneGitContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1900,10 +1945,12 @@ func TestRepoSyncReconcilerRestart(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1950,6 +1997,7 @@ func TestRepoSyncReconcilerRestart(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -2036,10 +2084,12 @@ func TestMultipleRepoSyncs(t *testing.T) { wantRoleBindings := map[core.ID]*rbacv1.RoleBinding{core.IDOf(roleBinding1): roleBinding1} repoContainerEnv1 := testReconciler.populateContainerEnvs(ctx, rs1, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment1 := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv1), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2084,6 +2134,7 @@ func TestMultipleRepoSyncs(t *testing.T) { nsReconcilerName2, setServiceAccountName(nsReconcilerName2), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv2), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2143,6 +2194,7 @@ func TestMultipleRepoSyncs(t *testing.T) { nsReconcilerName3, setServiceAccountName(nsReconcilerName3), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv3), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2202,6 +2254,7 @@ func TestMultipleRepoSyncs(t *testing.T) { setServiceAccountName(nsReconcilerName4), secretMutator(nsReconcilerName4+"-"+reposyncCookie), envVarMutator("HTTPS_PROXY", nsReconcilerName4+"-"+reposyncCookie, "https_proxy"), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv4), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2259,6 +2312,7 @@ func TestMultipleRepoSyncs(t *testing.T) { nsReconcilerName5, setServiceAccountName(nsReconcilerName5), secretMutator(nsReconcilerName5+"-"+secretName), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", nsReconcilerName5+"-"+secretName, "https_proxy"), envVarMutator(gitSyncName, nsReconcilerName5+"-"+secretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, nsReconcilerName5+"-"+secretName, GitSecretConfigKeyToken), @@ -2310,6 +2364,7 @@ func TestMultipleRepoSyncs(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv1), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2345,6 +2400,7 @@ func TestMultipleRepoSyncs(t *testing.T) { nsReconcilerName2, setServiceAccountName(nsReconcilerName2), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv2), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2380,6 +2436,7 @@ func TestMultipleRepoSyncs(t *testing.T) { nsReconcilerName3, setServiceAccountName(nsReconcilerName3), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv3), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2848,11 +2905,12 @@ func TestInjectFleetWorkloadIdentityCredentialsToRepoSync(t *testing.T) { t.Fatalf("unexpected reconciliation error, got error: %q, want error: nil", err) } repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) - + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2888,6 +2946,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRepoSync(t *testing.T) { }), setServiceAccountName(nsReconcilerName), fleetWorkloadIdentityMutator(workloadIdentityPool), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2925,6 +2984,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRepoSync(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -2957,6 +3017,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRepoSync(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneGitContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -2988,10 +3049,11 @@ func TestRepoSyncWithHelm(t *testing.T) { t.Fatalf("unexpected reconciliation error, got error: %q, want error: nil", err) } repoContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) - - repoDeployment := rootSyncDeployment(nsReconcilerName, + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) + repoDeployment := repoSyncDeployment(nsReconcilerName, setServiceAccountName(nsReconcilerName), helmSecretMutator(nsReconcilerName+"-"+secretName), + containerResourcesMutator(resourceOverrides), envVarMutator(helmSyncName, nsReconcilerName+"-"+secretName, "username"), envVarMutator(helmSyncPassword, nsReconcilerName+"-"+secretName, "password"), containerEnvMutator(repoContainerEnvs), @@ -3021,6 +3083,7 @@ func TestRepoSyncWithHelm(t *testing.T) { repoDeployment = repoSyncDeployment(nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneHelmContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -3057,11 +3120,12 @@ func TestRepoSyncWithHelm(t *testing.T) { } repoContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideHelmSyncResources, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneHelmContainers()), - containerResourcesMutator(overrideHelmSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -3103,10 +3167,12 @@ func TestRepoSyncWithOCI(t *testing.T) { ) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneOciContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -3149,6 +3215,7 @@ func TestRepoSyncWithOCI(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneOciContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -3178,6 +3245,7 @@ func TestRepoSyncWithOCI(t *testing.T) { nsReconcilerName, setServiceAccountName(nsReconcilerName), containersWithRepoVolumeMutator(noneOciContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -3228,6 +3296,7 @@ func TestRepoSyncWithOCI(t *testing.T) { }), setServiceAccountName(nsReconcilerName), fwiOciMutator(workloadIdentityPool), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -3265,6 +3334,7 @@ func TestRepoSyncWithOCI(t *testing.T) { } repoContainerEnv = testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideOciSyncResources, ReconcilerContainerResourceDefaults()) repoDeployment = repoSyncDeployment( nsReconcilerName, setAnnotations(map[string]string{ @@ -3272,7 +3342,7 @@ func TestRepoSyncWithOCI(t *testing.T) { }), setServiceAccountName(nsReconcilerName), fwiOciMutator(workloadIdentityPool), - containerResourcesMutator(overrideOciSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("5"), setGeneration(5), ) @@ -3688,10 +3758,12 @@ func TestUpdateNamespaceReconcilerLogLevelWithOverride(t *testing.T) { validateRepoSyncStatus(t, wantRs, fakeClient) repoContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, nsReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) repoDeployment := repoSyncDeployment( nsReconcilerName, setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -3757,6 +3829,7 @@ func TestUpdateNamespaceReconcilerLogLevelWithOverride(t *testing.T) { setServiceAccountName(nsReconcilerName), secretMutator(nsReconcilerName+"-"+reposyncSSHKey), containerArgsMutator(containerArgs), + containerResourcesMutator(resourceOverrides), containerEnvMutator(repoContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -3861,7 +3934,7 @@ func validateDeployments(wants map[core.ID]*appsv1.Deployment, fakeDynamicClient // Compare Deployment ResourceVersion if diff := cmp.Diff(want.ResourceVersion, got.ResourceVersion); diff != "" { - return errors.Errorf("Unexpected Deployment ResourceVersion found for %q. Diff: %v", id, diff) + return errors.Errorf("Unexpected Deployment ResourceVersion found for %q: Diff (- want, + got): %v", id, diff) } // Compare Deployment Generation @@ -3871,17 +3944,17 @@ func validateDeployments(wants map[core.ID]*appsv1.Deployment, fakeDynamicClient // Compare Deployment Annotations if diff := cmp.Diff(want.Annotations, got.Annotations); diff != "" { - return errors.Errorf("Unexpected Deployment Annotations found for %q. Diff (- want, + got): %v", id, diff) + return errors.Errorf("Unexpected Deployment Annotations found for %q: Diff (- want, + got): %v", id, diff) } // Compare Deployment Template Annotations. if diff := cmp.Diff(want.Spec.Template.Annotations, got.Spec.Template.Annotations); diff != "" { - return errors.Errorf("Unexpected Template Annotations found for %q. Diff (- want, + got): %v", id, diff) + return errors.Errorf("Unexpected Template Annotations found for %q: Diff (- want, + got): %v", id, diff) } // Compare ServiceAccountName. if diff := cmp.Diff(want.Spec.Template.Spec.ServiceAccountName, got.Spec.Template.Spec.ServiceAccountName); diff != "" { - return errors.Errorf("Unexpected ServiceAccountName for %q. Diff (- want, + got): %v", id, diff) + return errors.Errorf("Unexpected ServiceAccountName for %q: Diff (- want, + got): %v", id, diff) } // Compare Replicas @@ -3908,23 +3981,23 @@ func validateDeployments(wants map[core.ID]*appsv1.Deployment, fakeDynamicClient // Compare EnvFrom fields in the container. if diff := cmp.Diff(i.EnvFrom, j.EnvFrom, cmpopts.SortSlices(func(x, y corev1.EnvFromSource) bool { return x.ConfigMapRef.Name < y.ConfigMapRef.Name })); diff != "" { - return errors.Errorf("Unexpected configMapRef found for the %q container of %q, diff %s", i.Name, id, diff) + return errors.Errorf("Unexpected configMapRef found for the %q container of %q: Diff (- want, + got): %v", i.Name, id, diff) } // Compare VolumeMount fields in the container. if diff := cmp.Diff(i.VolumeMounts, j.VolumeMounts, cmpopts.SortSlices(func(x, y corev1.VolumeMount) bool { return x.Name < y.Name })); diff != "" { - return errors.Errorf("Unexpected volumeMount found for the %q container of %q, diff %s", i.Name, id, diff) + return errors.Errorf("Unexpected volumeMount found for the %q container of %q: Diff (- want, + got): %v", i.Name, id, diff) } // Compare Env fields in the container. if diff := cmp.Diff(i.Env, j.Env, cmpopts.SortSlices(func(x, y corev1.EnvVar) bool { return x.Name < y.Name })); diff != "" { - return errors.Errorf("Unexpected EnvVar found for the %q container of %q, diff %s", i.Name, id, diff) + return errors.Errorf("Unexpected EnvVar found for the %q container of %q: Diff (- want, + got): %v", i.Name, id, diff) } // Compare Resources fields in the container. if diff := cmp.Diff(i.Resources, j.Resources); diff != "" { - return errors.Errorf("Unexpected resources found for the %q container of %q, diff %s", i.Name, id, diff) + return errors.Errorf("Unexpected resources found for the %q container of %q: Diff (- want, + got): %v", i.Name, id, diff) } // Compare Args diff --git a/pkg/reconcilermanager/controllers/rootsync_controller.go b/pkg/reconcilermanager/controllers/rootsync_controller.go index 08a29c8bcb..0e554b1802 100644 --- a/pkg/reconcilermanager/controllers/rootsync_controller.go +++ b/pkg/reconcilermanager/controllers/rootsync_controller.go @@ -203,21 +203,28 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile return errors.Errorf("invalid source type: %s", rs.Spec.SourceType) } if _, err := r.upsertServiceAccount(ctx, reconcilerRef, auth, gcpSAEmail, labelMap); err != nil { - return err + return errors.Wrap(err, "upserting service account") } // Overwrite reconciler clusterrolebinding. if _, err := r.upsertClusterRoleBinding(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "upserting cluster role binding") + } + + // Upsert autoscaling config for the reconciler deployment + autoscalingStrategy := reconcilerAutoscalingStrategy(rs) + _, autoscale, err := r.upsertVerticalPodAutoscaler(ctx, autoscalingStrategy, reconcilerRef, labelMap) + if err != nil { + return errors.Wrap(err, "upserting autoscaler") } containerEnvs := r.populateContainerEnvs(ctx, rs, reconcilerRef.Name) - mut := r.mutationsFor(ctx, rs, containerEnvs) + mut := r.mutationsFor(ctx, rs, containerEnvs, autoscale) // Upsert Root reconciler deployment. deployObj, op, err := r.upsertDeployment(ctx, reconcilerRef, labelMap, mut) if err != nil { - return err + return errors.Wrap(err, "upserting reconciler deployment") } // Get the latest deployment to check the status. @@ -225,7 +232,7 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile if op == controllerutil.OperationResultNone { deployObj, err = r.deployment(ctx, reconcilerRef) if err != nil { - return err + return errors.Wrap(err, "getting reconciler deployment") } } @@ -240,7 +247,7 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile result, err := kstatus.Compute(deployObj) if err != nil { - return errors.Wrap(err, "computing reconciler Deployment status failed") + return errors.Wrap(err, "computing reconciler deployment status") } r.logger(ctx).V(3).Info("Reconciler status", @@ -375,8 +382,12 @@ func (r *RootSyncReconciler) handleReconcileError(ctx context.Context, err error func (r *RootSyncReconciler) deleteManagedObjects(ctx context.Context, reconcilerRef types.NamespacedName) error { r.logger(ctx).Info("Deleting managed objects") + if err := r.deleteVerticalPodAutoscaler(ctx, reconcilerRef); err != nil { + return errors.Wrap(err, "deleting autoscaler") + } + if err := r.deleteDeployment(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting reconciler deployment") } // Note: ConfigMaps have been replaced by Deployment env vars. @@ -384,17 +395,21 @@ func (r *RootSyncReconciler) deleteManagedObjects(ctx context.Context, reconcile // This deletion remains to clean up after users upgrade. if err := r.deleteConfigMaps(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting config maps") } // Note: ReconcilerManager doesn't manage the RootSync Secret. // So we don't need to delete it here. if err := r.deleteClusterRoleBinding(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting cluster role bindings") + } + + if err := r.deleteServiceAccount(ctx, reconcilerRef); err != nil { + return errors.Wrap(err, "deleting service account") } - return r.deleteServiceAccount(ctx, reconcilerRef) + return nil } // SetupWithManager registers RootSync controller with reconciler-manager. @@ -751,6 +766,10 @@ func (r *RootSyncReconciler) validateRootSecret(ctx context.Context, rootSync *v return errors.Errorf("The managed secret name %q is invalid: %s. To fix it, update '.spec.git.secretRef.name'", secretName, strings.Join(errs, ", ")) } + if err := r.validateAnnotations(ctx, rootSync); err != nil { + return err + } + secret, err := validateSecretExist(ctx, v1beta1.GetSecretName(rootSync.Spec.SecretRef), rootSync.Namespace, @@ -851,7 +870,7 @@ func enableRendering(annotations map[string]string) bool { return renderingRequired } -func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootSync, containerEnvs map[string][]corev1.EnvVar) mutateFn { +func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootSync, containerEnvs map[string][]corev1.EnvVar, autoscale bool) mutateFn { return func(obj client.Object) error { d, ok := obj.(*appsv1.Deployment) if !ok { @@ -908,14 +927,27 @@ func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootS // in the RootSync CR. templateSpec.Volumes = filterVolumes(templateSpec.Volumes, auth, secretRefName, caCertSecretRefName, rs.Spec.SourceType, r.membership) - var updatedContainers []corev1.Container + // Resource priority order: + // - user-specified resource overrides (from RepoSync) + // - autoscale defaults (hard-coded), if enabled + // - non-autoscale defaults (hard-coded) + // - declared defaults (from ConfigMap, if specified) + overrides := rs.Spec.SafeOverride() + resourceOverrides := overrides.Resources + if autoscale { + resourceOverrides = setContainerResourceDefaults(resourceOverrides, + ReconcilerContainerResourceAutoscaleDefaults()) + } else { + resourceOverrides = setContainerResourceDefaults(resourceOverrides, + ReconcilerContainerResourceDefaults()) + } + var updatedContainers []corev1.Container for _, container := range templateSpec.Containers { addContainer := true switch container.Name { case reconcilermanager.Reconciler: container.Env = append(container.Env, containerEnvs[container.Name]...) - mutateContainerResource(&container, rs.Spec.Override) case reconcilermanager.HydrationController: if !enableRendering(rs.GetAnnotations()) { // if the sync source does not require rendering, omit the hydration controller @@ -924,7 +956,6 @@ func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootS } else { container.Env = append(container.Env, containerEnvs[container.Name]...) container.Image = updateHydrationControllerImage(container.Image, *rs.Spec.SafeOverride()) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.OciSync: // Don't add the oci-sync container when sourceType is NOT oci. @@ -933,7 +964,6 @@ func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootS } else { container.Env = append(container.Env, containerEnvs[container.Name]...) injectFWICredsToContainer(&container, injectFWICreds) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.HelmSync: // Don't add the helm-sync container when sourceType is NOT helm. @@ -947,7 +977,6 @@ func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootS } mountConfigMapValuesFiles(templateSpec, &container, r.getReconcilerHelmConfigMapRefs(rs)) injectFWICredsToContainer(&container, injectFWICreds) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.GitSync: // Don't add the git-sync container when sourceType is NOT git. @@ -966,7 +995,6 @@ func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootS sRef := client.ObjectKey{Namespace: rs.Namespace, Name: secretName} keys := GetSecretKeys(ctx, r.client, sRef) container.Env = append(container.Env, gitSyncHTTPSProxyEnv(secretName, keys)...) - mutateContainerResource(&container, rs.Spec.Override) } case reconcilermanager.GCENodeAskpassSidecar: if !enableAskpassSidecar(rs.Spec.SourceType, auth) { @@ -977,12 +1005,14 @@ func (r *RootSyncReconciler) mutationsFor(ctx context.Context, rs *v1beta1.RootS // TODO: enable resource/logLevel overrides for gcenode-askpass-sidecar } case metrics.OtelAgentName: - // The no-op case to avoid unknown container error after - // first-ever reconcile. + container.Env = append(container.Env, containerEnvs[container.Name]...) default: return errors.Errorf("unknown container in reconciler deployment template: %q", container.Name) } if addContainer { + // Common mutations for all containers + mutateContainerResource(&container, resourceOverrides) + mutateContainerLogLevel(&container, overrides.LogLevels) updatedContainers = append(updatedContainers, container) } } diff --git a/pkg/reconcilermanager/controllers/rootsync_controller_manager_test.go b/pkg/reconcilermanager/controllers/rootsync_controller_manager_test.go index 3461e5558a..9294d243d3 100644 --- a/pkg/reconcilermanager/controllers/rootsync_controller_manager_test.go +++ b/pkg/reconcilermanager/controllers/rootsync_controller_manager_test.go @@ -59,7 +59,7 @@ func TestRootSyncReconcilerDeploymentLifecycle(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) secretObj := secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace)) - fakeClient, _, testReconciler := setupRootReconciler(t, secretObj) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, secretObj) defer logObjectYAMLIfFailed(t, fakeClient, rs) @@ -139,7 +139,7 @@ func TestReconcileInvalidRootSyncLifecycle(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeyToken), rootsyncSecretRef(rootsyncSSHKey)) secretObj := secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace)) - fakeClient, _, testReconciler := setupRootReconciler(t, secretObj) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, secretObj) defer logObjectYAMLIfFailed(t, fakeClient, rs) @@ -212,7 +212,7 @@ func TestReconcileRootSyncLifecycleValidToInvalid1(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) secretObj := secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace)) - fakeClient, _, testReconciler := setupRootReconciler(t, secretObj) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, secretObj) defer logObjectYAMLIfFailed(t, fakeClient, rs) @@ -449,7 +449,7 @@ func testRootSyncDriftProtection(t *testing.T, exampleObj client.Object, objKeyF t.Log("building RootSyncReconciler") syncObj := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) secretObj := secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(syncObj.Namespace)) - fakeClient, _, testReconciler := setupRootReconciler(t, secretObj) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, secretObj) testDriftProtection(t, fakeClient, testReconciler, syncObj, exampleObj, objKeyFunc, modify, validate) } diff --git a/pkg/reconcilermanager/controllers/rootsync_controller_test.go b/pkg/reconcilermanager/controllers/rootsync_controller_test.go index 640ecf1e08..44e398ff19 100644 --- a/pkg/reconcilermanager/controllers/rootsync_controller_test.go +++ b/pkg/reconcilermanager/controllers/rootsync_controller_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/utils/pointer" @@ -113,13 +114,13 @@ func secretObjWithProxy(t *testing.T, name string, auth configsync.AuthType, opt return result } -func setupRootReconciler(t *testing.T, objs ...client.Object) (*syncerFake.Client, *syncerFake.DynamicClient, *RootSyncReconciler) { +func setupRootReconciler(t *testing.T, scheme *runtime.Scheme, objs ...client.Object) (*syncerFake.Client, *syncerFake.DynamicClient, *RootSyncReconciler) { t.Helper() // Configure controller-manager to log to the test logger controllerruntime.SetLogger(testr.New(t)) - cs := syncerFake.NewClientSet(t, core.Scheme) + cs := syncerFake.NewClientSet(t, scheme) ctx := context.Background() for _, obj := range objs { @@ -312,7 +313,7 @@ func TestCreateAndUpdateRootReconcilerWithOverride(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey), rootsyncOverrideResources(overrideAllContainerResources)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -371,10 +372,11 @@ func TestCreateAndUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(overrideSelectedResources, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), - containerResourcesMutator(overrideSelectedResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -400,9 +402,11 @@ func TestCreateAndUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides = setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -422,7 +426,7 @@ func TestUpdateRootReconcilerWithOverride(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -431,10 +435,12 @@ func TestUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -488,10 +494,11 @@ func TestUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideAllContainerResources, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), - containerResourcesMutator(overrideAllContainerResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -538,10 +545,11 @@ func TestUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideReconcilerAndHydrationResources, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), - containerResourcesMutator(overrideReconcilerAndHydrationResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -578,10 +586,11 @@ func TestUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideGitSyncResources, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), - containerResourcesMutator(overrideGitSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -608,9 +617,11 @@ func TestUpdateRootReconcilerWithOverride(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides = setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("5"), setGeneration(5), ) @@ -630,7 +641,7 @@ func TestRootSyncCreateWithNoSSLVerify(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey), rootsyncNoSSLVerify()) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - _, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + _, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -639,10 +650,12 @@ func TestRootSyncCreateWithNoSSLVerify(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -663,7 +676,7 @@ func TestRootSyncUpdateNoSSLVerify(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -672,9 +685,11 @@ func TestRootSyncUpdateNoSSLVerify(t *testing.T) { } rootContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -726,6 +741,7 @@ func TestRootSyncUpdateNoSSLVerify(t *testing.T) { updatedRootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -755,6 +771,7 @@ func TestRootSyncUpdateNoSSLVerify(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -780,7 +797,7 @@ func TestRootSyncCreateWithCACertSecret(t *testing.T) { gitSecret.Data[GitSecretConfigKeyTokenUsername] = []byte("test-user") certSecret := secretObj(t, caCertSecret, GitSecretConfigKeyToken, v1beta1.GitSource, core.Namespace(rs.Namespace)) certSecret.Data[CACertSecretKey] = []byte("test-data") - _, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, gitSecret, certSecret) + _, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, gitSecret, certSecret) // Test creating Deployment resources. ctx := context.Background() @@ -789,10 +806,11 @@ func TestRootSyncCreateWithCACertSecret(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) - + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), caCertSecretMutator(secretName, caCertSecret), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", secretName, "https_proxy"), envVarMutator(gitSyncName, secretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, secretName, GitSecretConfigKeyToken), @@ -818,7 +836,7 @@ func TestRootSyncUpdateCACertSecret(t *testing.T) { gitSecret.Data[GitSecretConfigKeyTokenUsername] = []byte("test-user") certSecret := secretObj(t, caCertSecret, GitSecretConfigKeyToken, v1beta1.GitSource, core.Namespace(rs.Namespace)) certSecret.Data[CACertSecretKey] = []byte("test-data") - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, gitSecret, certSecret) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, gitSecret, certSecret) // Test creating Deployment resources. ctx := context.Background() @@ -827,9 +845,11 @@ func TestRootSyncUpdateCACertSecret(t *testing.T) { } rootContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(secretName), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", secretName, "https_proxy"), envVarMutator(gitSyncName, secretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, secretName, GitSecretConfigKeyToken), @@ -878,6 +898,7 @@ func TestRootSyncUpdateCACertSecret(t *testing.T) { updatedRootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), caCertSecretMutator(secretName, caCertSecret), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", secretName, "https_proxy"), envVarMutator(gitSyncName, secretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, secretName, GitSecretConfigKeyToken), @@ -907,6 +928,7 @@ func TestRootSyncUpdateCACertSecret(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(secretName), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", secretName, "https_proxy"), envVarMutator(gitSyncName, secretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, secretName, GitSecretConfigKeyToken), @@ -930,7 +952,7 @@ func TestRootSyncReconcileWithInvalidCACertSecret(t *testing.T) { gitSecret := secretObjWithProxy(t, secretName, GitSecretConfigKeyToken, core.Namespace(rs.Namespace)) gitSecret.Data[GitSecretConfigKeyTokenUsername] = []byte("test-user") certSecret := secretObj(t, caCertSecret, GitSecretConfigKeyToken, v1beta1.GitSource, core.Namespace(rs.Namespace)) - fakeClient, _, testReconciler := setupRootReconciler(t, rs, gitSecret, certSecret) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, rs, gitSecret, certSecret) // reconcile ctx := context.Background() @@ -953,7 +975,7 @@ func TestRootSyncWithInvalidCACertSecret(t *testing.T) { gitSecret := secretObjWithProxy(t, secretName, GitSecretConfigKeyToken, core.Namespace(rs.Namespace)) gitSecret.Data[GitSecretConfigKeyTokenUsername] = []byte("test-user") certSecret := secretObj(t, caCertSecret, GitSecretConfigKeyToken, v1beta1.GitSource, core.Namespace(rs.Namespace)) - _, _, testReconciler := setupRootReconciler(t, rs, gitSecret, certSecret) + _, _, testReconciler := setupRootReconciler(t, core.Scheme, rs, gitSecret, certSecret) ctx := context.Background() // validation should return an error @@ -972,7 +994,7 @@ func TestRootSyncWithoutCACertSecret(t *testing.T) { gitSecret.Data[GitSecretConfigKeyTokenUsername] = []byte("test-user") // no cert secret is setup to trigger not found error - _, _, testReconciler := setupRootReconciler(t, rs, gitSecret) + _, _, testReconciler := setupRootReconciler(t, core.Scheme, rs, gitSecret) ctx := context.Background() // validation should return a not found error @@ -987,7 +1009,7 @@ func TestRootSyncCreateWithOverrideGitSyncDepth(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey), rootsyncOverrideGitSyncDepth(5)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - _, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + _, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -996,10 +1018,12 @@ func TestRootSyncCreateWithOverrideGitSyncDepth(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1020,7 +1044,7 @@ func TestRootSyncUpdateOverrideGitSyncDepth(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -1029,9 +1053,11 @@ func TestRootSyncUpdateOverrideGitSyncDepth(t *testing.T) { } rootContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1063,6 +1089,7 @@ func TestRootSyncUpdateOverrideGitSyncDepth(t *testing.T) { updatedRootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1094,6 +1121,7 @@ func TestRootSyncUpdateOverrideGitSyncDepth(t *testing.T) { updatedRootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1124,6 +1152,7 @@ func TestRootSyncUpdateOverrideGitSyncDepth(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -1164,7 +1193,7 @@ func TestRootSyncCreateWithOverrideReconcileTimeout(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey), rootsyncOverrideReconcileTimeout(metav1.Duration{Duration: 50 * time.Second})) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - _, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + _, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -1173,10 +1202,12 @@ func TestRootSyncCreateWithOverrideReconcileTimeout(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1197,7 +1228,7 @@ func TestRootSyncUpdateOverrideReconcileTimeout(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -1205,9 +1236,11 @@ func TestRootSyncUpdateOverrideReconcileTimeout(t *testing.T) { t.Fatalf("unexpected reconciliation error, got error: %q, want error: nil", err) } rootContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1238,6 +1271,7 @@ func TestRootSyncUpdateOverrideReconcileTimeout(t *testing.T) { updatedRootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1268,6 +1302,7 @@ func TestRootSyncUpdateOverrideReconcileTimeout(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1308,7 +1343,7 @@ func TestRootSyncCreateWithOverrideAPIServerTimeout(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey), rootsyncOverrideReconcileTimeout(metav1.Duration{Duration: 50 * time.Second})) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - _, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + _, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -1317,10 +1352,12 @@ func TestRootSyncCreateWithOverrideAPIServerTimeout(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1338,7 +1375,7 @@ func TestRootSyncUpdateOverrideAPIServerTimeout(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -1346,9 +1383,11 @@ func TestRootSyncUpdateOverrideAPIServerTimeout(t *testing.T) { t.Fatalf("unexpected reconciliation error, got error: %q, want error: nil", err) } rootContainerEnv := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1379,6 +1418,7 @@ func TestRootSyncUpdateOverrideAPIServerTimeout(t *testing.T) { updatedRootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1408,6 +1448,7 @@ func TestRootSyncUpdateOverrideAPIServerTimeout(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1448,7 +1489,7 @@ func TestRootSyncSwitchAuthTypes(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(configsync.AuthGCPServiceAccount), rootsyncGCPSAEmail(gcpSAEmail)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources with GCPServiceAccount auth type. ctx := context.Background() @@ -1471,10 +1512,12 @@ func TestRootSyncSwitchAuthTypes(t *testing.T) { ) rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1513,6 +1556,7 @@ func TestRootSyncSwitchAuthTypes(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -1543,6 +1587,7 @@ func TestRootSyncSwitchAuthTypes(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneGitContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -1563,7 +1608,7 @@ func TestRootSyncReconcilerRestart(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - _, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + _, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -1572,9 +1617,11 @@ func TestRootSyncReconcilerRestart(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1616,6 +1663,7 @@ func TestRootSyncReconcilerRestart(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setReplicas(1), // Change reverted setUID("1"), setResourceVersion("3"), setGeneration(3), @@ -1655,7 +1703,7 @@ func TestMultipleRootSyncs(t *testing.T) { secret5 := secretObjWithProxy(t, secretName, GitSecretConfigKeyToken, core.Namespace(rs5.Namespace)) secret5.Data[GitSecretConfigKeyTokenUsername] = []byte("test-user") - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs1, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs1.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs1, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs1.Namespace))) rootReconcilerName2 := core.RootReconcilerName(rs2.Name) rootReconcilerName3 := core.RootReconcilerName(rs3.Name) @@ -1696,9 +1744,11 @@ func TestMultipleRootSyncs(t *testing.T) { ) crb.Subjects = addSubjectByName(crb.Subjects, rootReconcilerName) rootContainerEnv1 := testReconciler.populateContainerEnvs(ctx, rs1, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment1 := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv1), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1744,6 +1794,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment2 := rootSyncDeployment(rootReconcilerName2, setServiceAccountName(rootReconcilerName2), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv2), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1800,6 +1851,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment3 := rootSyncDeployment(rootReconcilerName3, setServiceAccountName(rootReconcilerName3), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv3), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -1860,6 +1912,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment4 := rootSyncDeployment(rootReconcilerName4, setServiceAccountName(rootReconcilerName4), secretMutator(reposyncCookie), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", reposyncCookie, "https_proxy"), containerEnvMutator(rootContainerEnvs4), setUID("1"), setResourceVersion("1"), setGeneration(1), @@ -1920,6 +1973,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment5 := rootSyncDeployment(rootReconcilerName5, setServiceAccountName(rootReconcilerName5), secretMutator(secretName), + containerResourcesMutator(resourceOverrides), envVarMutator("HTTPS_PROXY", secretName, "https_proxy"), envVarMutator(gitSyncName, secretName, GitSecretConfigKeyTokenUsername), envVarMutator(gitSyncPassword, secretName, GitSecretConfigKeyToken), @@ -1978,6 +2032,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment1 = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv1), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2015,6 +2070,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment2 = rootSyncDeployment(rootReconcilerName2, setServiceAccountName(rootReconcilerName2), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv2), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2053,6 +2109,7 @@ func TestMultipleRootSyncs(t *testing.T) { rootDeployment3 = rootSyncDeployment(rootReconcilerName3, setServiceAccountName(rootReconcilerName3), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnv3), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2268,7 +2325,7 @@ func TestMapSecretToRootSyncs(t *testing.T) { objs = append(objs, rs) } } - _, _, testReconciler := setupRootReconciler(t, objs...) + _, _, testReconciler := setupRootReconciler(t, core.Scheme, objs...) result := testReconciler.mapSecretToRootSyncs(tc.secret) if len(tc.want) != len(result) { @@ -2296,7 +2353,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRootSync(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(configsync.AuthGCPServiceAccount), rootsyncGCPSAEmail(gcpSAEmail)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // The membership doesn't have WorkloadIdentityPool and IdentityProvider specified, so FWI creds won't be injected. testReconciler.membership = &hubv1.Membership{ Spec: hubv1.MembershipSpec{ @@ -2311,10 +2368,12 @@ func TestInjectFleetWorkloadIdentityCredentialsToRootSync(t *testing.T) { t.Fatalf("unexpected reconciliation error, got error: %q, want error: nil", err) } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), gceNodeMutator(), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2348,6 +2407,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRootSync(t *testing.T) { }), setServiceAccountName(rootReconcilerName), fleetWorkloadIdentityMutator(workloadIdentityPool), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2384,6 +2444,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRootSync(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -2414,6 +2475,7 @@ func TestInjectFleetWorkloadIdentityCredentialsToRootSync(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneGitContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -2437,7 +2499,7 @@ func TestRootSyncWithHelm(t *testing.T) { rootsyncHelmAuthType(configsync.AuthToken), rootsyncHelmSecretRef(secretName)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) helmSecret := secretObj(t, secretName, configsync.AuthToken, v1beta1.HelmSource, core.Namespace(rs.Namespace)) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, helmSecret) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, helmSecret) // Test creating Deployment resources. ctx := context.Background() @@ -2446,10 +2508,12 @@ func TestRootSyncWithHelm(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), helmSecretMutator(secretName), + containerResourcesMutator(resourceOverrides), envVarMutator(helmSyncName, secretName, HelmSecretKeyUsername), envVarMutator(helmSyncPassword, secretName, HelmSecretKeyPassword), containerEnvMutator(rootContainerEnvs), @@ -2479,6 +2543,7 @@ func TestRootSyncWithHelm(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneHelmContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2513,12 +2578,13 @@ func TestRootSyncWithHelm(t *testing.T) { if _, err := testReconciler.Reconcile(ctx, reqNamespacedName); err != nil { t.Fatalf("unexpected reconciliation error upon request update, got error: %q, want error: nil", err) } + resourceOverrides = setContainerResourceDefaults(overrideHelmSyncResources, ReconcilerContainerResourceDefaults()) rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneHelmContainers()), - containerResourcesMutator(overrideHelmSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -2538,7 +2604,7 @@ func TestRootSyncWithOCI(t *testing.T) { rs := rootSyncWithOCI(rootsyncName, rootsyncOCIAuthType(configsync.AuthNone)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs) // Test creating Deployment resources with GCPServiceAccount auth type. ctx := context.Background() @@ -2560,10 +2626,12 @@ func TestRootSyncWithOCI(t *testing.T) { ) rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneOciContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -2605,6 +2673,7 @@ func TestRootSyncWithOCI(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneOciContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -2646,6 +2715,7 @@ func TestRootSyncWithOCI(t *testing.T) { rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), containersWithRepoVolumeMutator(noneOciContainers()), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("3"), setGeneration(3), ) @@ -2680,6 +2750,7 @@ func TestRootSyncWithOCI(t *testing.T) { }), setServiceAccountName(rootReconcilerName), fwiOciMutator(workloadIdentityPool), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("4"), setGeneration(4), ) @@ -2717,13 +2788,14 @@ func TestRootSyncWithOCI(t *testing.T) { } rootContainerEnvs = testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) + resourceOverrides = setContainerResourceDefaults(overrideOciSyncResources, ReconcilerContainerResourceDefaults()) rootDeployment = rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), setAnnotations(map[string]string{ metadata.FleetWorkloadIdentityCredentials: `{"audience":"identitynamespace:test-gke-dev.svc.id.goog:https://container.googleapis.com/v1/projects/test-gke-dev/locations/us-central1-c/clusters/fleet-workload-identity-test-cluster","credential_source":{"file":"/var/run/secrets/tokens/gcp-ksa/token"},"service_account_impersonation_url":"https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/config-sync@cs-project.iam.gserviceaccount.com:generateAccessToken","subject_token_type":"urn:ietf:params:oauth:token-type:jwt","token_url":"https://sts.googleapis.com/v1/token","type":"external_account"}`, }), fwiOciMutator(workloadIdentityPool), - containerResourcesMutator(overrideOciSyncResources), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("5"), setGeneration(5), ) @@ -2743,7 +2815,7 @@ func TestRootSyncSpecValidation(t *testing.T) { rs := fake.RootSyncObjectV1Beta1(rootsyncName) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, _, testReconciler := setupRootReconciler(t, rs) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, rs) // Verify unsupported source type ctx := context.Background() @@ -2936,7 +3008,7 @@ func TestRootSyncSpecValidation(t *testing.T) { func TestRootSyncReconcileStaleClientCache(t *testing.T) { rs := fake.RootSyncObjectV1Beta1(rootsyncName) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, _, testReconciler := setupRootReconciler(t, rs) + fakeClient, _, testReconciler := setupRootReconciler(t, core.Scheme, rs) ctx := context.Background() rs.ResourceVersion = "1" @@ -3108,7 +3180,7 @@ func TestPopulateRootContainerEnvs(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, _, testReconciler := setupRootReconciler(t, tc.rootSync, secretObj(t, reposyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(tc.rootSync.Namespace))) + _, _, testReconciler := setupRootReconciler(t, core.Scheme, tc.rootSync, secretObj(t, reposyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(tc.rootSync.Namespace))) env := testReconciler.populateContainerEnvs(ctx, tc.rootSync, rootReconcilerName) @@ -3127,7 +3199,7 @@ func TestUpdateRootReconcilerLogLevelWithOverride(t *testing.T) { rs := rootSyncWithGit(rootsyncName, rootsyncRef(gitRevision), rootsyncBranch(branch), rootsyncSecretType(GitSecretConfigKeySSH), rootsyncSecretRef(rootsyncSSHKey)) reqNamespacedName := namespacedName(rs.Name, rs.Namespace) - fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) + fakeClient, fakeDynamicClient, testReconciler := setupRootReconciler(t, core.Scheme, rs, secretObj(t, rootsyncSSHKey, configsync.AuthSSH, v1beta1.GitSource, core.Namespace(rs.Namespace))) // Test creating Deployment resources. ctx := context.Background() @@ -3136,10 +3208,11 @@ func TestUpdateRootReconcilerLogLevelWithOverride(t *testing.T) { } rootContainerEnvs := testReconciler.populateContainerEnvs(ctx, rs, rootReconcilerName) - + resourceOverrides := setContainerResourceDefaults(nil, ReconcilerContainerResourceDefaults()) rootDeployment := rootSyncDeployment(rootReconcilerName, setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("1"), setGeneration(1), ) @@ -3199,6 +3272,7 @@ func TestUpdateRootReconcilerLogLevelWithOverride(t *testing.T) { setServiceAccountName(rootReconcilerName), secretMutator(rootsyncSSHKey), containerArgsMutator(containerArgs), + containerResourcesMutator(resourceOverrides), containerEnvMutator(rootContainerEnvs), setUID("1"), setResourceVersion("2"), setGeneration(2), ) @@ -3407,19 +3481,16 @@ func fwiOciMutator(workloadIdentityPool string) depMutator { func fwiOciContainers() []corev1.Container { return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { - Name: reconcilermanager.OciSync, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.OciSync, + Args: defaultArgs(), Env: []corev1.EnvVar{{ Name: googleApplicationCredentialsEnvKey, Value: filepath.Join(gcpKSATokenDir, googleApplicationCredentialsFile), @@ -3450,15 +3521,11 @@ func setAnnotations(annotations map[string]string) depMutator { func containerResourcesMutator(overrides []v1beta1.ContainerResourcesSpec) depMutator { return func(dep *appsv1.Deployment) { - for _, container := range dep.Spec.Template.Spec.Containers { - switch container.Name { - case reconcilermanager.Reconciler, reconcilermanager.GitSync, - reconcilermanager.HydrationController, reconcilermanager.OciSync, - reconcilermanager.HelmSync: - for _, override := range overrides { - if override.ContainerName == container.Name { - mutateContainerResourceRequestsLimits(&container, override) - } + for i, container := range dep.Spec.Template.Spec.Containers { + for _, override := range overrides { + if override.ContainerName == container.Name { + mutateContainerResourceRequestsLimits(&container, override) + dep.Spec.Template.Spec.Containers[i] = container } } } @@ -3467,40 +3534,39 @@ func containerResourcesMutator(overrides []v1beta1.ContainerResourcesSpec) depMu func mutateContainerResourceRequestsLimits(container *corev1.Container, resourcesSpec v1beta1.ContainerResourcesSpec) { if !resourcesSpec.CPURequest.IsZero() { + if container.Resources.Requests == nil { + container.Resources.Requests = make(corev1.ResourceList) + } container.Resources.Requests[corev1.ResourceCPU] = resourcesSpec.CPURequest } else { - container.Resources.Requests[corev1.ResourceCPU] = resource.MustParse("100m") + delete(container.Resources.Requests, corev1.ResourceCPU) } if !resourcesSpec.CPULimit.IsZero() { + if container.Resources.Limits == nil { + container.Resources.Limits = make(corev1.ResourceList) + } container.Resources.Limits[corev1.ResourceCPU] = resourcesSpec.CPULimit } else { - container.Resources.Limits[corev1.ResourceCPU] = resource.MustParse("100m") + delete(container.Resources.Limits, corev1.ResourceCPU) } if !resourcesSpec.MemoryRequest.IsZero() { + if container.Resources.Requests == nil { + container.Resources.Requests = make(corev1.ResourceList) + } container.Resources.Requests[corev1.ResourceMemory] = resourcesSpec.MemoryRequest } else { - container.Resources.Requests[corev1.ResourceMemory] = resource.MustParse("100Mi") + delete(container.Resources.Requests, corev1.ResourceMemory) } if !resourcesSpec.MemoryLimit.IsZero() { + if container.Resources.Limits == nil { + container.Resources.Limits = make(corev1.ResourceList) + } container.Resources.Limits[corev1.ResourceMemory] = resourcesSpec.MemoryLimit } else { - container.Resources.Limits[corev1.ResourceMemory] = resource.MustParse("100Mi") - } -} - -func defaultResourceRequirements() corev1.ResourceRequirements { - return corev1.ResourceRequirements{ - Limits: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, + delete(container.Resources.Limits, corev1.ResourceMemory) } } @@ -3519,38 +3585,38 @@ func defaultGitSyncArgs() []string { func defaultContainers() []corev1.Container { return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { - Name: reconcilermanager.GitSync, - Resources: defaultResourceRequirements(), + Name: reconcilermanager.GitSync, VolumeMounts: []corev1.VolumeMount{ {Name: "repo", MountPath: "/repo"}, {Name: "git-creds", MountPath: "/etc/git-secret", ReadOnly: true}, }, + Args: defaultGitSyncArgs(), }, { Name: reconcilermanager.GCENodeAskpassSidecar, }, { - Name: reconcilermanager.OciSync, - Resources: defaultResourceRequirements(), + Name: reconcilermanager.OciSync, VolumeMounts: []corev1.VolumeMount{ {Name: "repo", MountPath: "/repo"}, }, + Args: defaultArgs(), }, { - Name: reconcilermanager.HelmSync, - Resources: defaultResourceRequirements(), + Name: reconcilermanager.HelmSync, VolumeMounts: []corev1.VolumeMount{ {Name: "repo", MountPath: "/repo"}, {Name: "helm-creds", MountPath: "/etc/helm-secret", ReadOnly: true}, }, + Args: defaultArgs(), }, } } @@ -3567,18 +3633,15 @@ func secretMountContainers(caCertSecret string) []corev1.Container { } return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { Name: reconcilermanager.GitSync, - Resources: defaultResourceRequirements(), VolumeMounts: gitSyncVolumeMounts, Args: defaultGitSyncArgs(), }, @@ -3592,18 +3655,15 @@ func helmSecretMountContainers() []corev1.Container { } return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { Name: reconcilermanager.HelmSync, - Resources: defaultResourceRequirements(), VolumeMounts: helmSyncVolumeMounts, Args: defaultArgs(), }, @@ -3613,19 +3673,16 @@ func helmSecretMountContainers() []corev1.Container { func noneGitContainers() []corev1.Container { return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { - Name: reconcilermanager.GitSync, - Resources: defaultResourceRequirements(), - Args: defaultGitSyncArgs(), + Name: reconcilermanager.GitSync, + Args: defaultGitSyncArgs(), VolumeMounts: []corev1.VolumeMount{ {Name: "repo", MountPath: "/repo"}, }}, @@ -3635,19 +3692,16 @@ func noneGitContainers() []corev1.Container { func noneOciContainers() []corev1.Container { return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { - Name: reconcilermanager.OciSync, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.OciSync, + Args: defaultArgs(), VolumeMounts: []corev1.VolumeMount{ {Name: "repo", MountPath: "/repo"}, }}, @@ -3657,19 +3711,16 @@ func noneOciContainers() []corev1.Container { func noneHelmContainers() []corev1.Container { return []corev1.Container{ { - Name: reconcilermanager.Reconciler, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.Reconciler, + Args: defaultArgs(), }, { - Name: reconcilermanager.HydrationController, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HydrationController, + Args: defaultArgs(), }, { - Name: reconcilermanager.HelmSync, - Resources: defaultResourceRequirements(), - Args: defaultArgs(), + Name: reconcilermanager.HelmSync, + Args: defaultArgs(), VolumeMounts: []corev1.VolumeMount{ {Name: "repo", MountPath: "/repo"}, }}, diff --git a/pkg/util/autopilot.go b/pkg/util/autopilot.go index 5c8f608aa0..d31d1bff75 100644 --- a/pkg/util/autopilot.go +++ b/pkg/util/autopilot.go @@ -132,3 +132,11 @@ func AutopilotResourceMutation(annotation string) (map[string]corev1.ResourceReq } return input, output, nil } + +// FakeAutopilotWebhookObject returns a fake empty MutatingWebhookConfiguration +// that satisfies IsGKEAutopilotCluster, for testing. +func FakeAutopilotWebhookObject() client.Object { + webhook := &admissionregistrationv1.MutatingWebhookConfiguration{} + webhook.Name = autopilotWebhooks[0] + return webhook +} diff --git a/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/LICENSE b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/doc.go b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/doc.go new file mode 100644 index 0000000000..8b81690810 --- /dev/null +++ b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package v1 contains definitions of Vertical Pod Autoscaler related objects. +// +groupName=autoscaling.k8s.io +// +kubebuilder:object:generate=true +package v1 diff --git a/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/register.go b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/register.go new file mode 100644 index 0000000000..70214933be --- /dev/null +++ b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: "autoscaling.k8s.io", Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VerticalPodAutoscaler{}, + &VerticalPodAutoscalerList{}, + &VerticalPodAutoscalerCheckpoint{}, + &VerticalPodAutoscalerCheckpointList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go new file mode 100644 index 0000000000..3842bcd719 --- /dev/null +++ b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go @@ -0,0 +1,393 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains definitions of Vertical Pod Autoscaler related objects. +package v1 + +import ( + autoscaling "k8s.io/api/autoscaling/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects. +type VerticalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of vertical pod autoscaler objects. + Items []VerticalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=vpa +// +kubebuilder:printcolumn:name="Mode",type="string",JSONPath=".spec.updatePolicy.updateMode" +// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.recommendation.containerRecommendations[0].target.cpu" +// +kubebuilder:printcolumn:name="Mem",type="string",JSONPath=".status.recommendation.containerRecommendations[0].target.memory" +// +kubebuilder:printcolumn:name="Provided",type="string",JSONPath=".status.conditions[?(@.type=='RecommendationProvided')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// VerticalPodAutoscaler is the configuration for a vertical pod +// autoscaler, which automatically manages pod resources based on historical and +// real time resource utilization. +type VerticalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the behavior of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + Spec VerticalPodAutoscalerSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Current information about the autoscaler. + // +optional + Status VerticalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// VerticalPodAutoscalerRecommenderSelector points to a specific Vertical Pod Autoscaler recommender. +// In the future it might pass parameters to the recommender. +type VerticalPodAutoscalerRecommenderSelector struct { + // Name of the recommender responsible for generating recommendation for this object. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler. +type VerticalPodAutoscalerSpec struct { + + // TargetRef points to the controller managing the set of pods for the + // autoscaler to control - e.g. Deployment, StatefulSet. VerticalPodAutoscaler + // can be targeted at controller implementing scale subresource (the pod set is + // retrieved from the controller's ScaleStatus) or some well known controllers + // (e.g. for DaemonSet the pod set is read from the controller's spec). + // If VerticalPodAutoscaler cannot use specified target it will report + // ConfigUnsupported condition. + // Note that VerticalPodAutoscaler does not require full implementation + // of scale subresource - it will not use it to modify the replica count. + // The only thing retrieved is a label selector matching pods grouped by + // the target resource. + TargetRef *autoscaling.CrossVersionObjectReference `json:"targetRef" protobuf:"bytes,1,name=targetRef"` + + // Describes the rules on how changes are applied to the pods. + // If not specified, all fields in the `PodUpdatePolicy` are set to their + // default values. + // +optional + UpdatePolicy *PodUpdatePolicy `json:"updatePolicy,omitempty" protobuf:"bytes,2,opt,name=updatePolicy"` + + // Controls how the autoscaler computes recommended resources. + // The resource policy may be used to set constraints on the recommendations + // for individual containers. If not specified, the autoscaler computes recommended + // resources for all containers in the pod, without additional constraints. + // +optional + ResourcePolicy *PodResourcePolicy `json:"resourcePolicy,omitempty" protobuf:"bytes,3,opt,name=resourcePolicy"` + + // Recommender responsible for generating recommendation for this object. + // List should be empty (then the default recommender will generate the + // recommendation) or contain exactly one recommender. + // +optional + Recommenders []*VerticalPodAutoscalerRecommenderSelector `json:"recommenders,omitempty" protobuf:"bytes,4,opt,name=recommenders"` +} + +// PodUpdatePolicy describes the rules on how changes are applied to the pods. +type PodUpdatePolicy struct { + // Controls when autoscaler applies changes to the pod resources. + // The default is 'Auto'. + // +optional + UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"` + + // Minimal number of replicas which need to be alive for Updater to attempt + // pod eviction (pending other checks like PDB). Only positive values are + // allowed. Overrides global '--min-replicas' flag. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` +} + +// UpdateMode controls when autoscaler applies changes to the pod resoures. +// +kubebuilder:validation:Enum=Off;Initial;Recreate;Auto +type UpdateMode string + +const ( + // UpdateModeOff means that autoscaler never changes Pod resources. + // The recommender still sets the recommended resources in the + // VerticalPodAutoscaler object. This can be used for a "dry run". + UpdateModeOff UpdateMode = "Off" + // UpdateModeInitial means that autoscaler only assigns resources on pod + // creation and does not change them during the lifetime of the pod. + UpdateModeInitial UpdateMode = "Initial" + // UpdateModeRecreate means that autoscaler assigns resources on pod + // creation and additionally can update them during the lifetime of the + // pod by deleting and recreating the pod. + UpdateModeRecreate UpdateMode = "Recreate" + // UpdateModeAuto means that autoscaler assigns resources on pod creation + // and additionally can update them during the lifetime of the pod, + // using any available update method. Currently this is equivalent to + // Recreate, which is the only available update method. + UpdateModeAuto UpdateMode = "Auto" +) + +// PodResourcePolicy controls how autoscaler computes the recommended resources +// for containers belonging to the pod. There can be at most one entry for every +// named container and optionally a single wildcard entry with `containerName` = '*', +// which handles all containers that don't have individual policies. +type PodResourcePolicy struct { + // Per-container resource policies. + // +optional + // +patchMergeKey=containerName + // +patchStrategy=merge + ContainerPolicies []ContainerResourcePolicy `json:"containerPolicies,omitempty" patchStrategy:"merge" patchMergeKey:"containerName" protobuf:"bytes,1,rep,name=containerPolicies"` +} + +// ContainerResourcePolicy controls how autoscaler computes the recommended +// resources for a specific container. +type ContainerResourcePolicy struct { + // Name of the container or DefaultContainerResourcePolicy, in which + // case the policy is used by the containers that don't have their own + // policy specified. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Whether autoscaler is enabled for the container. The default is "Auto". + // +optional + Mode *ContainerScalingMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode"` + // Specifies the minimal amount of resources that will be recommended + // for the container. The default is no minimum. + // +optional + MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + // Specifies the maximum amount of resources that will be recommended + // for the container. The default is no maximum. + // +optional + MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` + + // Specifies the type of recommendations that will be computed + // (and possibly applied) by VPA. + // If not specified, the default of [ResourceCPU, ResourceMemory] will be used. + ControlledResources *[]v1.ResourceName `json:"controlledResources,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=controlledResources"` + + // Specifies which resource values should be controlled. + // The default is "RequestsAndLimits". + // +optional + ControlledValues *ContainerControlledValues `json:"controlledValues,omitempty" protobuf:"bytes,6,rep,name=controlledValues"` +} + +const ( + // DefaultContainerResourcePolicy can be passed as + // ContainerResourcePolicy.ContainerName to specify the default policy. + DefaultContainerResourcePolicy = "*" +) + +// ContainerScalingMode controls whether autoscaler is enabled for a specific +// container. +// +kubebuilder:validation:Enum=Auto;Off +type ContainerScalingMode string + +const ( + // ContainerScalingModeAuto means autoscaling is enabled for a container. + ContainerScalingModeAuto ContainerScalingMode = "Auto" + // ContainerScalingModeOff means autoscaling is disabled for a container. + ContainerScalingModeOff ContainerScalingMode = "Off" +) + +// ContainerControlledValues controls which resource value should be autoscaled. +// +kubebuilder:validation:Enum=RequestsAndLimits;RequestsOnly +type ContainerControlledValues string + +const ( + // ContainerControlledValuesRequestsAndLimits means resource request and limits + // are scaled automatically. The limit is scaled proportionally to the request. + ContainerControlledValuesRequestsAndLimits ContainerControlledValues = "RequestsAndLimits" + // ContainerControlledValuesRequestsOnly means only requested resource is autoscaled. + ContainerControlledValuesRequestsOnly ContainerControlledValues = "RequestsOnly" +) + +// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler. +type VerticalPodAutoscalerStatus struct { + // The most recently computed amount of resources recommended by the + // autoscaler for the controlled pods. + // +optional + Recommendation *RecommendedPodResources `json:"recommendation,omitempty" protobuf:"bytes,1,opt,name=recommendation"` + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []VerticalPodAutoscalerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// RecommendedPodResources is the recommendation of resources computed by +// autoscaler. It contains a recommendation for each container in the pod +// (except for those with `ContainerScalingMode` set to 'Off'). +type RecommendedPodResources struct { + // Resources recommended by the autoscaler for each container. + // +optional + ContainerRecommendations []RecommendedContainerResources `json:"containerRecommendations,omitempty" protobuf:"bytes,1,rep,name=containerRecommendations"` +} + +// RecommendedContainerResources is the recommendation of resources computed by +// autoscaler for a specific container. Respects the container resource policy +// if present in the spec. In particular the recommendation is not produced for +// containers with `ContainerScalingMode` set to 'Off'. +type RecommendedContainerResources struct { + // Name of the container. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Recommended amount of resources. Observes ContainerResourcePolicy. + Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + // Minimum recommended amount of resources. Observes ContainerResourcePolicy. + // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however + // running with less resources is likely to have significant impact on performance/availability. + // +optional + LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + // Maximum recommended amount of resources. Observes ContainerResourcePolicy. + // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum + // amount of application is actually capable of consuming. + // +optional + UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` + // The most recent recommended resources target computed by the autoscaler + // for the controlled pods, based only on actual resource usage, not taking + // into account the ContainerResourcePolicy. + // May differ from the Recommendation if the actual resource usage causes + // the target to violate the ContainerResourcePolicy (lower than MinAllowed + // or higher that MaxAllowed). + // Used only as status indication, will not affect actual resource assignment. + // +optional + UncappedTarget v1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` +} + +// VerticalPodAutoscalerConditionType are the valid conditions of +// a VerticalPodAutoscaler. +type VerticalPodAutoscalerConditionType string + +var ( + // RecommendationProvided indicates whether the VPA recommender was able to calculate a recommendation. + RecommendationProvided VerticalPodAutoscalerConditionType = "RecommendationProvided" + // LowConfidence indicates whether the VPA recommender has low confidence in the recommendation for + // some of containers. + LowConfidence VerticalPodAutoscalerConditionType = "LowConfidence" + // NoPodsMatched indicates that label selector used with VPA object didn't match any pods. + NoPodsMatched VerticalPodAutoscalerConditionType = "NoPodsMatched" + // FetchingHistory indicates that VPA recommender is in the process of loading additional history samples. + FetchingHistory VerticalPodAutoscalerConditionType = "FetchingHistory" + // ConfigDeprecated indicates that this VPA configuration is deprecated + // and will stop being supported soon. + ConfigDeprecated VerticalPodAutoscalerConditionType = "ConfigDeprecated" + // ConfigUnsupported indicates that this VPA configuration is unsupported + // and recommendations will not be provided for it. + ConfigUnsupported VerticalPodAutoscalerConditionType = "ConfigUnsupported" +) + +// VerticalPodAutoscalerCondition describes the state of +// a VerticalPodAutoscaler at a certain point. +type VerticalPodAutoscalerCondition struct { + // type describes the current condition + Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=vpacheckpoint + +// VerticalPodAutoscalerCheckpoint is the checkpoint of the internal state of VPA that +// is used for recovery after recommender's restart. +type VerticalPodAutoscalerCheckpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the checkpoint. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec VerticalPodAutoscalerCheckpointSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Data of the checkpoint. + // +optional + Status VerticalPodAutoscalerCheckpointStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscalerCheckpointList is a list of VerticalPodAutoscalerCheckpoint objects. +type VerticalPodAutoscalerCheckpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []VerticalPodAutoscalerCheckpoint `json:"items"` +} + +// VerticalPodAutoscalerCheckpointSpec is the specification of the checkpoint object. +type VerticalPodAutoscalerCheckpointSpec struct { + // Name of the VPA object that stored VerticalPodAutoscalerCheckpoint object. + VPAObjectName string `json:"vpaObjectName,omitempty" protobuf:"bytes,1,opt,name=vpaObjectName"` + + // Name of the checkpointed container. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,2,opt,name=containerName"` +} + +// VerticalPodAutoscalerCheckpointStatus contains data of the checkpoint. +type VerticalPodAutoscalerCheckpointStatus struct { + // The time when the status was last refreshed. + // +nullable + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,1,opt,name=lastUpdateTime"` + + // Version of the format of the stored data. + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + + // Checkpoint of histogram for consumption of CPU. + CPUHistogram HistogramCheckpoint `json:"cpuHistogram,omitempty" protobuf:"bytes,3,rep,name=cpuHistograms"` + + // Checkpoint of histogram for consumption of memory. + MemoryHistogram HistogramCheckpoint `json:"memoryHistogram,omitempty" protobuf:"bytes,4,rep,name=memoryHistogram"` + + // Timestamp of the fist sample from the histograms. + // +nullable + FirstSampleStart metav1.Time `json:"firstSampleStart,omitempty" protobuf:"bytes,5,opt,name=firstSampleStart"` + + // Timestamp of the last sample from the histograms. + // +nullable + LastSampleStart metav1.Time `json:"lastSampleStart,omitempty" protobuf:"bytes,6,opt,name=lastSampleStart"` + + // Total number of samples in the histograms. + TotalSamplesCount int `json:"totalSamplesCount,omitempty" protobuf:"bytes,7,opt,name=totalSamplesCount"` +} + +// HistogramCheckpoint contains data needed to reconstruct the histogram. +type HistogramCheckpoint struct { + // Reference timestamp for samples collected within this histogram. + // +nullable + ReferenceTimestamp metav1.Time `json:"referenceTimestamp,omitempty" protobuf:"bytes,1,opt,name=referenceTimestamp"` + + // Map from bucket index to bucket weight. + // +kubebuilder:validation:Type=object + // +kubebuilder:validation:XPreserveUnknownFields + BucketWeights map[int]uint32 `json:"bucketWeights,omitempty" protobuf:"bytes,2,opt,name=bucketWeights"` + + // Sum of samples to be used as denominator for weights from BucketWeights. + TotalWeight float64 `json:"totalWeight,omitempty" protobuf:"bytes,3,opt,name=totalWeight"` +} diff --git a/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/zz_generated.deepcopy.go b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9cea0376dd --- /dev/null +++ b/vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/zz_generated.deepcopy.go @@ -0,0 +1,479 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourcePolicy) DeepCopyInto(out *ContainerResourcePolicy) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(ContainerScalingMode) + **out = **in + } + if in.MinAllowed != nil { + in, out := &in.MinAllowed, &out.MinAllowed + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxAllowed != nil { + in, out := &in.MaxAllowed, &out.MaxAllowed + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.ControlledResources != nil { + in, out := &in.ControlledResources, &out.ControlledResources + *out = new([]corev1.ResourceName) + if **in != nil { + in, out := *in, *out + *out = make([]corev1.ResourceName, len(*in)) + copy(*out, *in) + } + } + if in.ControlledValues != nil { + in, out := &in.ControlledValues, &out.ControlledValues + *out = new(ContainerControlledValues) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourcePolicy. +func (in *ContainerResourcePolicy) DeepCopy() *ContainerResourcePolicy { + if in == nil { + return nil + } + out := new(ContainerResourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HistogramCheckpoint) DeepCopyInto(out *HistogramCheckpoint) { + *out = *in + in.ReferenceTimestamp.DeepCopyInto(&out.ReferenceTimestamp) + if in.BucketWeights != nil { + in, out := &in.BucketWeights, &out.BucketWeights + *out = make(map[int]uint32, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HistogramCheckpoint. +func (in *HistogramCheckpoint) DeepCopy() *HistogramCheckpoint { + if in == nil { + return nil + } + out := new(HistogramCheckpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourcePolicy) DeepCopyInto(out *PodResourcePolicy) { + *out = *in + if in.ContainerPolicies != nil { + in, out := &in.ContainerPolicies, &out.ContainerPolicies + *out = make([]ContainerResourcePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourcePolicy. +func (in *PodResourcePolicy) DeepCopy() *PodResourcePolicy { + if in == nil { + return nil + } + out := new(PodResourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodUpdatePolicy) DeepCopyInto(out *PodUpdatePolicy) { + *out = *in + if in.UpdateMode != nil { + in, out := &in.UpdateMode, &out.UpdateMode + *out = new(UpdateMode) + **out = **in + } + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodUpdatePolicy. +func (in *PodUpdatePolicy) DeepCopy() *PodUpdatePolicy { + if in == nil { + return nil + } + out := new(PodUpdatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedContainerResources) DeepCopyInto(out *RecommendedContainerResources) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.LowerBound != nil { + in, out := &in.LowerBound, &out.LowerBound + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.UpperBound != nil { + in, out := &in.UpperBound, &out.UpperBound + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.UncappedTarget != nil { + in, out := &in.UncappedTarget, &out.UncappedTarget + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedContainerResources. +func (in *RecommendedContainerResources) DeepCopy() *RecommendedContainerResources { + if in == nil { + return nil + } + out := new(RecommendedContainerResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedPodResources) DeepCopyInto(out *RecommendedPodResources) { + *out = *in + if in.ContainerRecommendations != nil { + in, out := &in.ContainerRecommendations, &out.ContainerRecommendations + *out = make([]RecommendedContainerResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedPodResources. +func (in *RecommendedPodResources) DeepCopy() *RecommendedPodResources { + if in == nil { + return nil + } + out := new(RecommendedPodResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler. +func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(VerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpoint) DeepCopyInto(out *VerticalPodAutoscalerCheckpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpoint. +func (in *VerticalPodAutoscalerCheckpoint) DeepCopy() *VerticalPodAutoscalerCheckpoint { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerCheckpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpointList) DeepCopyInto(out *VerticalPodAutoscalerCheckpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VerticalPodAutoscalerCheckpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpointList. +func (in *VerticalPodAutoscalerCheckpointList) DeepCopy() *VerticalPodAutoscalerCheckpointList { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerCheckpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpointSpec) DeepCopyInto(out *VerticalPodAutoscalerCheckpointSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpointSpec. +func (in *VerticalPodAutoscalerCheckpointSpec) DeepCopy() *VerticalPodAutoscalerCheckpointSpec { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpointStatus) DeepCopyInto(out *VerticalPodAutoscalerCheckpointStatus) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.CPUHistogram.DeepCopyInto(&out.CPUHistogram) + in.MemoryHistogram.DeepCopyInto(&out.MemoryHistogram) + in.FirstSampleStart.DeepCopyInto(&out.FirstSampleStart) + in.LastSampleStart.DeepCopyInto(&out.LastSampleStart) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpointStatus. +func (in *VerticalPodAutoscalerCheckpointStatus) DeepCopy() *VerticalPodAutoscalerCheckpointStatus { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCondition) DeepCopyInto(out *VerticalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCondition. +func (in *VerticalPodAutoscalerCondition) DeepCopy() *VerticalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerList) DeepCopyInto(out *VerticalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VerticalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerList. +func (in *VerticalPodAutoscalerList) DeepCopy() *VerticalPodAutoscalerList { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerRecommenderSelector) DeepCopyInto(out *VerticalPodAutoscalerRecommenderSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerRecommenderSelector. +func (in *VerticalPodAutoscalerRecommenderSelector) DeepCopy() *VerticalPodAutoscalerRecommenderSelector { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerRecommenderSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerSpec) DeepCopyInto(out *VerticalPodAutoscalerSpec) { + *out = *in + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(autoscalingv1.CrossVersionObjectReference) + **out = **in + } + if in.UpdatePolicy != nil { + in, out := &in.UpdatePolicy, &out.UpdatePolicy + *out = new(PodUpdatePolicy) + (*in).DeepCopyInto(*out) + } + if in.ResourcePolicy != nil { + in, out := &in.ResourcePolicy, &out.ResourcePolicy + *out = new(PodResourcePolicy) + (*in).DeepCopyInto(*out) + } + if in.Recommenders != nil { + in, out := &in.Recommenders, &out.Recommenders + *out = make([]*VerticalPodAutoscalerRecommenderSelector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(VerticalPodAutoscalerRecommenderSelector) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerSpec. +func (in *VerticalPodAutoscalerSpec) DeepCopy() *VerticalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerStatus) DeepCopyInto(out *VerticalPodAutoscalerStatus) { + *out = *in + if in.Recommendation != nil { + in, out := &in.Recommendation, &out.Recommendation + *out = new(RecommendedPodResources) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VerticalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerStatus. +func (in *VerticalPodAutoscalerStatus) DeepCopy() *VerticalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 721ca5c6b6..8cc95981d4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -748,6 +748,9 @@ k8s.io/apimachinery/third_party/forked/golang/reflect ## explicit; go 1.19 k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/feature +# k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0 +## explicit; go 1.19 +k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1 # k8s.io/cli-runtime v0.26.7 ## explicit; go 1.19 k8s.io/cli-runtime/pkg/genericclioptions From 93218c5e64e5a92670745039dc7200fb2bf3188b Mon Sep 17 00:00:00 2001 From: Karl Isenberg Date: Sat, 29 Jul 2023 11:26:56 -0700 Subject: [PATCH 2/5] Reduce number of kind clusters to test in parallel Hopefully this leads to fewer oomkills --- Makefile.e2e | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.e2e b/Makefile.e2e index a81d15af41..d5d360970c 100644 --- a/Makefile.e2e +++ b/Makefile.e2e @@ -68,7 +68,7 @@ test-e2e-kind-multi-repo: config-sync-manifest-local --share-test-env \ --timeout $(KIND_E2E_TIMEOUT) \ --test.v -v \ - --num-clusters 15 \ + --num-clusters 10 \ $(E2E_ARGS) # This target runs the first group of e2e tests with the multi-repo mode. From 54050d3b8dc61b41536b53ec27a11a6af587b051 Mon Sep 17 00:00:00 2001 From: Karl Isenberg Date: Sun, 30 Jul 2023 22:13:05 -0700 Subject: [PATCH 3/5] tet: Disable otel-agent metrics buffering --- e2e/nomostest/config_sync.go | 48 ++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/e2e/nomostest/config_sync.go b/e2e/nomostest/config_sync.go index 841893e981..01994dd31d 100644 --- a/e2e/nomostest/config_sync.go +++ b/e2e/nomostest/config_sync.go @@ -59,6 +59,7 @@ import ( kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/object/dependson" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" ) const ( @@ -114,6 +115,14 @@ func isOtelCollectorDeployment(obj client.Object) bool { obj.GetObjectKind().GroupVersionKind() == kinds.Deployment() } +// isOtelAgentConfigMap returns true if passed obj is the +// otel-agent ConfigMap in the config-management-monitoring namespace. +func isOtelAgentConfigMap(obj client.Object) bool { + return obj.GetName() == ocmetrics.OtelAgentName && + obj.GetNamespace() == configmanagement.ControllerNamespace && + obj.GetObjectKind().GroupVersionKind() == kinds.ConfigMap() +} + // ResetReconcilerManagerConfigMap resets the reconciler manager config map // to what is defined in the manifest func ResetReconcilerManagerConfigMap(nt *NT) error { @@ -151,6 +160,7 @@ func parseConfigSyncManifests(nt *NT) ([]client.Object, error) { objs, err = multiRepoObjects(objs, setReconcilerDebugMode, setPollingPeriods, + setOtelAgentBatchDisabled, setOtelCollectorPrometheusAnnotations) if err != nil { return nil, err @@ -579,6 +589,44 @@ func setPollingPeriods(obj client.Object) error { return nil } +// setOtelAgentBatch updates the otel-agent ConfigMap config to disable metrics +// batching. +func setOtelAgentBatchDisabled(obj client.Object) error { + if obj == nil { + return testpredicates.ErrObjectNotFound + } + if !isOtelAgentConfigMap(obj) { + return nil + } + cm, ok := obj.(*corev1.ConfigMap) + if !ok { + return fmt.Errorf("failed to cast %T to *corev1.ConfigMap", obj) + } + yamlString := cm.Data["otel-agent-config.yaml"] + var dataMap map[string]interface{} + if err := yaml.Unmarshal([]byte(yamlString), &dataMap); err != nil { + return errors.Wrapf(err, "unmarshaling yaml data from ConfigMap %s", + client.ObjectKeyFromObject(obj)) + } + processors, found, err := unstructured.NestedMap(dataMap, "processors") + if err != nil { + return errors.Wrapf(err, "ConfigMap %s missing processors field", + client.ObjectKeyFromObject(obj)) + } + if !found { + return errors.Errorf("ConfigMap %s missing processors field", + client.ObjectKeyFromObject(obj)) + } + delete(processors, "batch") + yamlBytes, err := yaml.Marshal(dataMap) + if err != nil { + return errors.Wrapf(err, "marshaling yaml data for ConfigMap %s", + client.ObjectKeyFromObject(obj)) + } + cm.Data["otel-agent-config.yaml"] = string(yamlBytes) + return nil +} + // setOtelCollectorPrometheusAnnotations updates the otel-collector Deployment // to add pod annotations to enable metrics scraping. func setOtelCollectorPrometheusAnnotations(obj client.Object) error { From 9810d28291ccd7b540b1232bc25e0e6ebcf7cdb9 Mon Sep 17 00:00:00 2001 From: Karl Isenberg Date: Mon, 31 Jul 2023 10:20:33 -0700 Subject: [PATCH 4/5] Always log recorded metrics --- pkg/metrics/record.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/metrics/record.go b/pkg/metrics/record.go index ed8b9fe0af..f0e18870c4 100644 --- a/pkg/metrics/record.go +++ b/pkg/metrics/record.go @@ -29,7 +29,7 @@ import ( func record(ctx context.Context, ms ...stats.Measurement) { stats.Record(ctx, ms...) - if klog.V(5).Enabled() { + if klog.V(0).Enabled() { for _, m := range ms { klog.Infof("Metric recorded: { \"Name\": %q, \"Value\": %#v, \"Tags\": %s }", m.Measure().Name(), m.Value(), tag.FromContext(ctx)) } From 945403c5257a721d90d672260d91cf6b036bd0a5 Mon Sep 17 00:00:00 2001 From: Karl Isenberg Date: Mon, 31 Jul 2023 12:38:42 -0700 Subject: [PATCH 5/5] fix: TestCRDDeleteBeforeRemoveCustomResourceV1 Wait for maagement conflict metric after manual change, with the current sync labels. --- e2e/testcases/custom_resources_test.go | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/e2e/testcases/custom_resources_test.go b/e2e/testcases/custom_resources_test.go index 8342a720a8..66c5de6075 100644 --- a/e2e/testcases/custom_resources_test.go +++ b/e2e/testcases/custom_resources_test.go @@ -218,6 +218,22 @@ func TestCRDDeleteBeforeRemoveCustomResourceV1(t *testing.T) { nt.T.Fatal(err) } + rootSyncLabels, err := nomostest.MetricLabelsForRootSync(nt, rootSyncNN) + if err != nil { + nt.T.Fatal(err) + } + + err = nomostest.ValidateMetrics(nt, + nomostest.ReconcilerErrorMetrics(nt, rootSyncLabels, firstCommitHash, metrics.ErrorSummary{ + // Remediator conflict after the first commit, because the declared + // Anvil was deleted by another client after successful sync. + // TODO: distinguish between management conflict (spec/generation drift) and concurrent status update conflict (resource version change) + Conflicts: 1, + })) + if err != nil { + nt.T.Fatal(err) + } + // Reset discovery client to invalidate the cached Anvil CRD nt.RenewClient() @@ -230,17 +246,12 @@ func TestCRDDeleteBeforeRemoveCustomResourceV1(t *testing.T) { nt.WaitForRootSyncSourceError(configsync.RootSyncName, status.UnknownKindErrorCode, "") - rootSyncLabels, err := nomostest.MetricLabelsForRootSync(nt, rootSyncNN) + rootSyncLabels, err = nomostest.MetricLabelsForRootSync(nt, rootSyncNN) if err != nil { nt.T.Fatal(err) } err = nomostest.ValidateMetrics(nt, - nomostest.ReconcilerErrorMetrics(nt, rootSyncLabels, firstCommitHash, metrics.ErrorSummary{ - // Remediator conflict after the first commit, because the declared - // Anvil was deleted by another client after successful sync. - Conflicts: 1, - }), nomostest.ReconcilerErrorMetrics(nt, rootSyncLabels, secondCommitHash, metrics.ErrorSummary{ // No remediator conflict after the second commit, because the // reconciler hasn't been updated with the latest declared resources,