diff --git a/bootstrap/kubeadm/controllers/alias.go b/bootstrap/kubeadm/controllers/alias.go index 199c1a655957..7c9a2aff9b0f 100644 --- a/bootstrap/kubeadm/controllers/alias.go +++ b/bootstrap/kubeadm/controllers/alias.go @@ -38,7 +38,8 @@ const ( // KubeadmConfigReconciler reconciles a KubeadmConfig object. type KubeadmConfigReconciler struct { - Client client.Client + Client client.Client + SecretCachingClient client.Client Tracker *remote.ClusterCacheTracker @@ -52,9 +53,10 @@ type KubeadmConfigReconciler struct { // SetupWithManager sets up the reconciler with the Manager. func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{ - Client: r.Client, - Tracker: r.Tracker, - WatchFilterValue: r.WatchFilterValue, - TokenTTL: r.TokenTTL, + Client: r.Client, + SecretCachingClient: r.SecretCachingClient, + Tracker: r.Tracker, + WatchFilterValue: r.WatchFilterValue, + TokenTTL: r.TokenTTL, }).SetupWithManager(ctx, mgr, options) } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 200885257264..18e1fce5716d 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -76,9 +76,10 @@ type InitLocker interface { // KubeadmConfigReconciler reconciles a KubeadmConfig object. type KubeadmConfigReconciler struct { - Client client.Client - Tracker *remote.ClusterCacheTracker - KubeadmInitLock InitLocker + Client client.Client + SecretCachingClient client.Client + Tracker *remote.ClusterCacheTracker + KubeadmInitLock InitLocker // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -453,13 +454,15 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // Otherwise rely on certificates generated by the ControlPlane controller. // Note: A cluster does not have a ControlPlane reference when using standalone CP machines. if scope.Cluster.Spec.ControlPlaneRef == nil { - err = certificates.LookupOrGenerate( + err = certificates.LookupOrGenerateCached( ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster), *metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"))) } else { - err = certificates.Lookup(ctx, + err = certificates.LookupCached(ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster)) } @@ -531,8 +534,9 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) scope.Info("Creating BootstrapData for the worker node") certificates := secret.NewCertificatesForWorker(scope.Config.Spec.JoinConfiguration.CACertPath) - err := certificates.Lookup( + err := certificates.LookupCached( ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster), ) @@ -645,8 +649,9 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S } certificates := secret.NewControlPlaneJoinCerts(scope.Config.Spec.ClusterConfiguration) - err := certificates.Lookup( + err := certificates.LookupCached( ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster), ) @@ -1055,7 +1060,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope // Ensure the bootstrap secret has the KubeadmConfig as a controller OwnerReference. func (r *KubeadmConfigReconciler) ensureBootstrapSecretOwnersRef(ctx context.Context, scope *Scope) error { secret := &corev1.Secret{} - err := r.Client.Get(ctx, client.ObjectKey{Namespace: scope.Config.Namespace, Name: scope.Config.Name}, secret) + err := r.SecretCachingClient.Get(ctx, client.ObjectKey{Namespace: scope.Config.Namespace, Name: scope.Config.Name}, secret) if err != nil { // If the secret has not been created yet return early. if apierrors.IsNotFound(err) { diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go index 7e855189197e..41f991f08fae 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go @@ -47,7 +47,8 @@ func TestKubeadmConfigReconciler(t *testing.T) { }(cluster, machine, config, ns) reconciler := KubeadmConfigReconciler{ - Client: env, + Client: env, + SecretCachingClient: secretCachingClient, } t.Log("Calling reconcile should requeue") result, err := reconciler.Reconcile(ctx, ctrl.Request{ diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 77deb624f067..fda11b1ba845 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -78,7 +78,8 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build() reconciler := &KubeadmConfigReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, } for i := 0; i < 3; i++ { o := machineObjs[i] @@ -110,7 +111,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -163,7 +165,8 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -241,7 +244,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFoun myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -277,7 +281,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -320,7 +325,8 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -354,7 +360,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasNoCluster(t *t myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -388,7 +395,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfAssociatedClusterIsNotFoun myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -455,8 +463,9 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI myclient := fake.NewClientBuilder().WithObjects(tc.objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } result, err := k.Reconcile(ctx, tc.request) @@ -497,9 +506,10 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -558,9 +568,10 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -602,8 +613,9 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -679,9 +691,10 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe objects = append(objects, createSecrets(t, cluster, config)...) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -756,9 +769,10 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { objects = append(objects, createSecrets(t, cluster, config)...) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -856,9 +870,10 @@ func TestBootstrapDataFormat(t *testing.T) { myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -936,9 +951,10 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { objects = append(objects, createSecrets(t, cluster, initConfig)...) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -1013,10 +1029,11 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { objects = append(objects, createSecrets(t, cluster, initConfig)...) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}, &clusterv1.Machine{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, - TokenTTL: DefaultTokenTTL, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, + TokenTTL: DefaultTokenTTL, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -1214,10 +1231,11 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { objects = append(objects, createSecrets(t, cluster, initConfig)...) myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}, &expv1.MachinePool{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, - TokenTTL: DefaultTokenTTL, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, + TokenTTL: DefaultTokenTTL, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -1497,9 +1515,10 @@ func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testin fakeClient := fake.NewClientBuilder().Build() k := &KubeadmConfigReconciler{ - Client: fakeClient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: fakeClient, + SecretCachingClient: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } res, err := k.reconcileDiscovery(ctx, tc.cluster, tc.config, secret.Certificates{}) @@ -1713,9 +1732,10 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques myclient := fake.NewClientBuilder().WithObjects(objects...).Build() reconciler := KubeadmConfigReconciler{ - Client: myclient, - Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } wc := newWorkerJoinKubeadmConfig(metav1.NamespaceDefault, "worker-join-cfg") @@ -1767,7 +1787,8 @@ func TestKubeadmConfigReconciler_ClusterToKubeadmConfigs(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build() reconciler := &KubeadmConfigReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, } configs := reconciler.ClusterToKubeadmConfigs(ctx, cluster) names := make([]string, 6) @@ -1806,8 +1827,9 @@ func TestKubeadmConfigReconciler_Reconcile_DoesNotFailIfCASecretsAlreadyExist(t } fakec := fake.NewClientBuilder().WithObjects(cluster, m, c, scrt).Build() reconciler := &KubeadmConfigReconciler{ - Client: fakec, - KubeadmInitLock: &myInitLocker{}, + Client: fakec, + SecretCachingClient: fakec, + KubeadmInitLock: &myInitLocker{}, } req := ctrl.Request{ NamespacedName: client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: configName}, @@ -1840,8 +1862,9 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali } myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -1906,8 +1929,9 @@ func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -2042,8 +2066,9 @@ func TestKubeadmConfigReconciler_ResolveFiles(t *testing.T) { myclient := fake.NewClientBuilder().WithObjects(tc.objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } // make a list of files we expect to be sourced from secrets @@ -2168,8 +2193,9 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { myclient := fake.NewClientBuilder().WithObjects(tc.objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } // make a list of password we expect to be sourced from secrets diff --git a/bootstrap/kubeadm/internal/controllers/suite_test.go b/bootstrap/kubeadm/internal/controllers/suite_test.go index 865f39ef5fcc..5b1afdb40b52 100644 --- a/bootstrap/kubeadm/internal/controllers/suite_test.go +++ b/bootstrap/kubeadm/internal/controllers/suite_test.go @@ -17,22 +17,45 @@ limitations under the License. package controllers import ( + "context" + "fmt" "os" "testing" + corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api/internal/test/envtest" ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() + secretCachingClient client.Client ) func TestMain(m *testing.M) { + setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { + var err error + secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + } + os.Exit(envtest.Run(ctx, envtest.RunInput{ - M: m, - SetupEnv: func(e *envtest.Environment) { env = e }, + M: m, + ManagerUncachedObjs: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + SetupEnv: func(e *envtest.Environment) { env = e }, + SetupReconcilers: setupReconcilers, })) } diff --git a/bootstrap/kubeadm/main.go b/bootstrap/kubeadm/main.go index a3f90eb887ea..624afdf7d333 100644 --- a/bootstrap/kubeadm/main.go +++ b/bootstrap/kubeadm/main.go @@ -29,7 +29,9 @@ import ( // +kubebuilder:scaffold:imports "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/leaderelection/resourcelock" cliflag "k8s.io/component-base/cli/flag" @@ -193,6 +195,9 @@ func main() { goruntime.SetBlockProfileRate(1) } + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + ctrlOptions := ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsBindAddr, @@ -207,6 +212,14 @@ func main() { Cache: cache.Options{ Namespaces: watchNamespaces, SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, }, Client: client.Options{ Cache: &client.CacheOptions{ @@ -259,14 +272,26 @@ func setupChecks(mgr ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + setupLog.Error(err, "unable to create secret caching client") + os.Exit(1) + } + // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers // requiring a connection to a remote cluster log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker") tracker, err := remote.NewClusterCacheTracker( mgr, remote.ClusterCacheTrackerOptions{ - ControllerName: controllerName, - Log: &log, + SecretCachingClient: secretCachingClient, + ControllerName: controllerName, + Log: &log, }, ) if err != nil { @@ -283,10 +308,11 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { } if err := (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{ - Client: mgr.GetClient(), - Tracker: tracker, - WatchFilterValue: watchFilterValue, - TokenTTL: tokenTTL, + Client: mgr.GetClient(), + SecretCachingClient: secretCachingClient, + Tracker: tracker, + WatchFilterValue: watchFilterValue, + TokenTTL: tokenTTL, }).SetupWithManager(ctx, mgr, concurrency(kubeadmConfigConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubeadmConfig") os.Exit(1) diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index 7ebf4efa8a83..191e09db85eb 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -68,8 +68,15 @@ var ErrClusterLocked = errors.New("cluster is locked already") type ClusterCacheTracker struct { log logr.Logger clientUncachedObjects []client.Object - client client.Client - scheme *runtime.Scheme + + client client.Client + + // SecretCachingClient is a client which caches secrets. + // If set it will be used to read the kubeconfig secret. + // Otherwise the default client from the manager will be used. + secretCachingClient client.Client + + scheme *runtime.Scheme // clusterAccessorsLock is used to lock the access to the clusterAccessors map. clusterAccessorsLock sync.RWMutex @@ -95,6 +102,11 @@ type ClusterCacheTracker struct { // ClusterCacheTrackerOptions defines options to configure // a ClusterCacheTracker. type ClusterCacheTrackerOptions struct { + // SecretCachingClient is a client which caches secrets. + // If set it will be used to read the kubeconfig secret. + // Otherwise the default client from the manager will be used. + SecretCachingClient client.Client + // Log is the logger used throughout the lifecycle of caches. // Defaults to a no-op logger if it's not set. Log *logr.Logger @@ -155,6 +167,7 @@ func NewClusterCacheTracker(manager ctrl.Manager, options ClusterCacheTrackerOpt log: *options.Log, clientUncachedObjects: options.ClientUncachedObjects, client: manager.GetClient(), + secretCachingClient: options.SecretCachingClient, scheme: manager.GetScheme(), clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), clusterLock: newKeyedMutex(), @@ -271,8 +284,13 @@ func (t *ClusterCacheTracker) getClusterAccessor(ctx context.Context, cluster cl func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster client.ObjectKey, indexes ...Index) (*clusterAccessor, error) { log := ctrl.LoggerFrom(ctx) - // Get a rest config for the remote cluster - config, err := RESTConfig(ctx, t.controllerName, t.client, cluster) + // Get a rest config for the remote cluster. + // Use the secretCachingClient if set. + secretClient := t.client + if t.secretCachingClient != nil { + secretClient = t.secretCachingClient + } + config, err := RESTConfig(ctx, t.controllerName, secretClient, cluster) if err != nil { return nil, errors.Wrapf(err, "error fetching REST client config for remote cluster %q", cluster.String()) } diff --git a/controlplane/kubeadm/controllers/alias.go b/controlplane/kubeadm/controllers/alias.go index 0f6a3f02fca4..7b03abff618a 100644 --- a/controlplane/kubeadm/controllers/alias.go +++ b/controlplane/kubeadm/controllers/alias.go @@ -30,8 +30,9 @@ import ( // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { - Client client.Client - Tracker *remote.ClusterCacheTracker + Client client.Client + SecretCachingClient client.Client + Tracker *remote.ClusterCacheTracker EtcdDialTimeout time.Duration EtcdCallTimeout time.Duration @@ -43,10 +44,11 @@ type KubeadmControlPlaneReconciler struct { // SetupWithManager sets up the reconciler with the Manager. func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ - Client: r.Client, - Tracker: r.Tracker, - EtcdDialTimeout: r.EtcdDialTimeout, - EtcdCallTimeout: r.EtcdCallTimeout, - WatchFilterValue: r.WatchFilterValue, + Client: r.Client, + SecretCachingClient: r.SecretCachingClient, + Tracker: r.Tracker, + EtcdDialTimeout: r.EtcdDialTimeout, + EtcdCallTimeout: r.EtcdCallTimeout, + WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index 521e2df2af10..3f4b7721ccba 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -51,10 +52,11 @@ type ManagementCluster interface { // Management holds operations on the management cluster. type Management struct { - Client client.Reader - Tracker *remote.ClusterCacheTracker - EtcdDialTimeout time.Duration - EtcdCallTimeout time.Duration + Client client.Reader + SecretCachingClient client.Reader + Tracker *remote.ClusterCacheTracker + EtcdDialTimeout time.Duration + EtcdCallTimeout time.Duration } // RemoteClusterConnectionError represents a failure to connect to a remote cluster. @@ -181,9 +183,21 @@ func (m *Management) getEtcdCAKeyPair(ctx context.Context, clusterKey client.Obj Namespace: clusterKey.Namespace, Name: fmt.Sprintf("%s-etcd", clusterKey.Name), } - if err := m.Client.Get(ctx, etcdCAObjectKey, etcdCASecret); err != nil { - return nil, nil, errors.Wrapf(err, "failed to get secret; etcd CA bundle %s/%s", etcdCAObjectKey.Namespace, etcdCAObjectKey.Name) + + // Try to get the certificate via the cached client. + err := m.SecretCachingClient.Get(ctx, etcdCAObjectKey, etcdCASecret) + if err != nil { + if !apierrors.IsNotFound(err) { + // Return error if we got an errors which is not a NotFound error. + return nil, nil, errors.Wrapf(err, "failed to get secret; etcd CA bundle %s/%s", etcdCAObjectKey.Namespace, etcdCAObjectKey.Name) + } + + // Try to get the certificate via the uncached client. + if err := m.Client.Get(ctx, etcdCAObjectKey, etcdCASecret); err != nil { + return nil, nil, errors.Wrapf(err, "failed to get secret; etcd CA bundle %s/%s", etcdCAObjectKey.Namespace, etcdCAObjectKey.Name) + } } + crtData, ok := etcdCASecret.Data[secret.TLSCrtDataName] if !ok { return nil, nil, errors.Errorf("etcd tls crt does not exist for cluster %s/%s", clusterKey.Namespace, clusterKey.Name) diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index 4f2c65aed16b..d932ff5e7671 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -202,8 +202,9 @@ func TestGetWorkloadCluster(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) m := Management{ - Client: env.GetClient(), - Tracker: tracker, + Client: env.GetClient(), + SecretCachingClient: secretCachingClient, + Tracker: tracker, } workloadCluster, err := m.GetWorkloadCluster(ctx, tt.clusterKey) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 57bde7db4b07..7e8a20a6e355 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -70,10 +70,11 @@ const ( // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { - Client client.Client - controller controller.Controller - recorder record.EventRecorder - Tracker *remote.ClusterCacheTracker + Client client.Client + SecretCachingClient client.Client + controller controller.Controller + recorder record.EventRecorder + Tracker *remote.ClusterCacheTracker EtcdDialTimeout time.Duration EtcdCallTimeout time.Duration @@ -121,10 +122,11 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg return errors.New("cluster cache tracker is nil, cannot create the internal management cluster resource") } r.managementCluster = &internal.Management{ - Client: r.Client, - Tracker: r.Tracker, - EtcdDialTimeout: r.EtcdDialTimeout, - EtcdCallTimeout: r.EtcdCallTimeout, + Client: r.Client, + SecretCachingClient: r.SecretCachingClient, + Tracker: r.Tracker, + EtcdDialTimeout: r.EtcdDialTimeout, + EtcdCallTimeout: r.EtcdCallTimeout, } } @@ -482,7 +484,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context } certificates := secret.NewCertificatesForInitialControlPlane(config.ClusterConfiguration) controllerRef := metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) - if err := certificates.LookupOrGenerate(ctx, r.Client, util.ObjectKey(controlPlane.Cluster), *controllerRef); err != nil { + if err := certificates.LookupOrGenerateCached(ctx, r.SecretCachingClient, r.Client, util.ObjectKey(controlPlane.Cluster), *controllerRef); err != nil { log.Error(err, "unable to lookup or create cluster certificates") conditions.MarkFalse(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err @@ -626,35 +628,40 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro {"f:metadata", "f:annotations"}, {"f:metadata", "f:labels"}, } - infraMachine := controlPlane.InfraResources[machineName] - // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations - // from "manager". We do this so that InfrastructureMachines that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { - return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) - } - // Update in-place mutating fields on InfrastructureMachine. - if err := r.updateExternalObject(ctx, infraMachine, controlPlane.KCP, controlPlane.Cluster); err != nil { - return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) - } - - kubeadmConfig, ok := controlPlane.GetKubeadmConfig(machineName) - if !ok || kubeadmConfig == nil { - return errors.Wrapf(err, "failed to retrieve KubeadmConfig for machine %s", machineName) - } - // Note: Set the GroupVersionKind because updateExternalObject depends on it. - kubeadmConfig.SetGroupVersionKind(m.Spec.Bootstrap.ConfigRef.GroupVersionKind()) - // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations - // from "manager". We do this so that KubeadmConfigs that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { - return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) - } - // Update in-place mutating fields on BootstrapConfig. - if err := r.updateExternalObject(ctx, kubeadmConfig, controlPlane.KCP, controlPlane.Cluster); err != nil { - return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) + infraMachine, infraMachineFound := controlPlane.InfraResources[machineName] + // Only update the InfraMachine if it is already found, otherwise just skip it. + // This could happen e.g. if the cache is not up-to-date yet. + if infraMachineFound { + // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations + // from "manager". We do this so that InfrastructureMachines that are created using the Create method + // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" + // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. + if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) + } + // Update in-place mutating fields on InfrastructureMachine. + if err := r.updateExternalObject(ctx, infraMachine, controlPlane.KCP, controlPlane.Cluster); err != nil { + return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) + } + } + + kubeadmConfig, kubeadmConfigFound := controlPlane.KubeadmConfigs[machineName] + // Only update the KubeadmConfig if it is already found, otherwise just skip it. + // This could happen e.g. if the cache is not up-to-date yet. + if kubeadmConfigFound { + // Note: Set the GroupVersionKind because updateExternalObject depends on it. + kubeadmConfig.SetGroupVersionKind(m.Spec.Bootstrap.ConfigRef.GroupVersionKind()) + // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations + // from "manager". We do this so that KubeadmConfigs that are created using the Create method + // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" + // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. + if err := ssa.DropManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) + } + // Update in-place mutating fields on BootstrapConfig. + if err := r.updateExternalObject(ctx, kubeadmConfig, controlPlane.KCP, controlPlane.Cluster); err != nil { + return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) + } } } // Update the patch helpers. diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 53c9ac29c0fa..276258cc25a9 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -87,8 +87,9 @@ func TestClusterToKubeadmControlPlane(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } got := r.ClusterToKubeadmControlPlane(ctx, cluster) @@ -102,8 +103,9 @@ func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) { cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault}) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } got := r.ClusterToKubeadmControlPlane(ctx, cluster) @@ -125,8 +127,9 @@ func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } got := r.ClusterToKubeadmControlPlane(ctx, cluster) @@ -147,8 +150,9 @@ func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { }(kcp, ns) r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -169,9 +173,10 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { g := NewWithT(t) r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), - managementCluster: &internal.Management{Client: env.Client, Tracker: nil}, + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), + managementCluster: &internal.Management{Client: env.Client, Tracker: nil}, } ns, err := env.CreateNamespace(ctx, "test-reconcile-upd-og") @@ -251,8 +256,9 @@ func TestReconcileNoClusterOwnerRef(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -287,8 +293,9 @@ func TestReconcileNoKCP(t *testing.T) { fakeClient := newFakeClient() r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -328,8 +335,9 @@ func TestReconcileNoCluster(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -377,8 +385,9 @@ func TestReconcilePaused(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -432,8 +441,9 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -515,6 +525,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } @@ -610,6 +621,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } @@ -695,6 +707,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } @@ -747,6 +760,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { recorder := record.NewFakeRecorder(32) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, + SecretCachingClient: fakeClient, recorder: recorder, managementCluster: fmc, managementClusterUncached: fmc, @@ -823,7 +837,10 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { fakeClient := newFakeClient(objs...) - r := KubeadmControlPlaneReconciler{Client: fakeClient} + r := KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + } err = r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner) g.Expect(err).To(BeNil()) @@ -869,7 +886,10 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { fakeClient := newFakeClient(objs...) - r := KubeadmControlPlaneReconciler{Client: fakeClient} + r := KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + } err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner) g.Expect(err).To(BeNil()) @@ -917,7 +937,10 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { fakeClient := newFakeClient(objs...) - r := KubeadmControlPlaneReconciler{Client: fakeClient} + r := KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + } err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner) g.Expect(err).To(BeNil()) @@ -1112,8 +1135,9 @@ func TestReconcileCertificateExpiries(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - managementCluster: managementCluster, + Client: fakeClient, + SecretCachingClient: fakeClient, + managementCluster: managementCluster, } controlPlane, err := internal.NewControlPlane(ctx, managementCluster, fakeClient, cluster, kcp, ownedMachines) @@ -1287,8 +1311,9 @@ kubernetesVersion: metav1.16.1`, expectedLabels := map[string]string{clusterv1.ClusterNameLabel: "foo"} r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: env}, Workload: fakeWorkloadCluster{ @@ -1599,7 +1624,11 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Run syncMachines to clean up managed fields and have proper field ownership // for Machines, InfrastructureMachines and KubeadmConfigs. - reconciler := &KubeadmControlPlaneReconciler{Client: env, ssaCache: ssa.NewCache()} + reconciler := &KubeadmControlPlaneReconciler{ + Client: env, + SecretCachingClient: secretCachingClient, + ssaCache: ssa.NewCache(), + } g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed()) // The inPlaceMutatingMachine should have cleaned up managed fields. @@ -2042,7 +2071,8 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2105,7 +2135,8 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2162,7 +2193,8 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2199,7 +2231,8 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 5083c04c55fc..49431f90a729 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -57,12 +57,12 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, controllerOwnerRef := *metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) clusterName := util.ObjectKey(controlPlane.Cluster) - configSecret, err := secret.GetFromNamespacedName(ctx, r.Client, clusterName, secret.Kubeconfig) + configSecret, err := secret.GetFromNamespacedName(ctx, r.SecretCachingClient, clusterName, secret.Kubeconfig) switch { case apierrors.IsNotFound(err): createErr := kubeconfig.CreateSecretWithOwner( ctx, - r.Client, + r.SecretCachingClient, clusterName, endpoint.String(), controllerOwnerRef, diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 6ed3ad61df1f..057ea3a35871 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -74,8 +74,9 @@ func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } controlPlane := &internal.ControlPlane{ @@ -128,8 +129,9 @@ func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } controlPlane := &internal.ControlPlane{ @@ -199,8 +201,9 @@ func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } controlPlane := &internal.ControlPlane{ @@ -264,8 +267,9 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy(), existingCACertSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } controlPlane := &internal.ControlPlane{ @@ -356,8 +360,9 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), } bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ @@ -440,8 +445,9 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ @@ -677,8 +683,9 @@ func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } got, err := r.generateKubeadmConfig(ctx, kcp, cluster, spec.DeepCopy()) @@ -784,7 +791,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { t.Run(tt.name, func(t *testing.T) { fakeClient := newFakeClient(kcp, tt.configSecret) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, } g.Expect(r.adoptKubeconfigSecret(ctx, tt.configSecret, kcp)).To(Succeed()) actualSecret := &corev1.Secret{} diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 40367172a9e6..82f05d263694 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -209,6 +209,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { r := &KubeadmControlPlaneReconciler{ Client: env, + SecretCachingClient: secretCachingClient, managementCluster: fmc, managementClusterUncached: fmc, recorder: record.NewFakeRecorder(32), @@ -251,8 +252,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. fakeClient := newFakeClient(machines["one"]) r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), - Client: fakeClient, + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{}, }, @@ -293,8 +295,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), - Client: fakeClient, + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{}, }, @@ -334,8 +337,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), - Client: fakeClient, + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{}, }, diff --git a/controlplane/kubeadm/internal/controllers/suite_test.go b/controlplane/kubeadm/internal/controllers/suite_test.go index c3d10ccb2a23..40e89d39a7b5 100644 --- a/controlplane/kubeadm/internal/controllers/suite_test.go +++ b/controlplane/kubeadm/internal/controllers/suite_test.go @@ -17,6 +17,8 @@ limitations under the License. package controllers import ( + "context" + "fmt" "os" "testing" @@ -28,17 +30,31 @@ import ( ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() + secretCachingClient client.Client ) func TestMain(m *testing.M) { + setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { + var err error + secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + } os.Exit(envtest.Run(ctx, envtest.RunInput{ M: m, ManagerUncachedObjs: []client.Object{ &corev1.ConfigMap{}, &corev1.Secret{}, }, - SetupEnv: func(e *envtest.Environment) { env = e }, + SetupEnv: func(e *envtest.Environment) { env = e }, + SetupReconcilers: setupReconcilers, })) } diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index 8977d8b0f86f..37e5a22979ea 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -78,8 +78,9 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { setKCPHealthy(kcp) r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: env}, Workload: fakeWorkloadCluster{ @@ -227,6 +228,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } diff --git a/controlplane/kubeadm/internal/suite_test.go b/controlplane/kubeadm/internal/suite_test.go index e76fd92af79a..7bdd48b5ae72 100644 --- a/controlplane/kubeadm/internal/suite_test.go +++ b/controlplane/kubeadm/internal/suite_test.go @@ -17,6 +17,8 @@ limitations under the License. package internal import ( + "context" + "fmt" "os" "testing" @@ -28,17 +30,31 @@ import ( ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() + secretCachingClient client.Client ) func TestMain(m *testing.M) { + setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { + var err error + secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + } os.Exit(envtest.Run(ctx, envtest.RunInput{ M: m, ManagerUncachedObjs: []client.Object{ &corev1.ConfigMap{}, &corev1.Secret{}, }, - SetupEnv: func(e *envtest.Environment) { env = e }, + SetupEnv: func(e *envtest.Environment) { env = e }, + SetupReconcilers: setupReconcilers, })) } diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index f7b038a85252..4b4e40670acb 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -31,7 +31,9 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/leaderelection/resourcelock" cliflag "k8s.io/component-base/cli/flag" @@ -197,6 +199,9 @@ func main() { goruntime.SetBlockProfileRate(1) } + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + ctrlOptions := ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsBindAddr, @@ -211,6 +216,14 @@ func main() { Cache: cache.Options{ Namespaces: watchNamespaces, SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, }, Client: client.Options{ Cache: &client.CacheOptions{ @@ -268,12 +281,24 @@ func setupChecks(mgr ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + setupLog.Error(err, "unable to create secret caching client") + os.Exit(1) + } + // Set up a ClusterCacheTracker to provide to controllers // requiring a connection to a remote cluster log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker") tracker, err := remote.NewClusterCacheTracker(mgr, remote.ClusterCacheTrackerOptions{ - ControllerName: controllerName, - Log: &log, + SecretCachingClient: secretCachingClient, + ControllerName: controllerName, + Log: &log, ClientUncachedObjects: []client.Object{ &corev1.ConfigMap{}, &corev1.Secret{}, @@ -296,11 +321,12 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { } if err := (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ - Client: mgr.GetClient(), - Tracker: tracker, - WatchFilterValue: watchFilterValue, - EtcdDialTimeout: etcdDialTimeout, - EtcdCallTimeout: etcdCallTimeout, + Client: mgr.GetClient(), + SecretCachingClient: secretCachingClient, + Tracker: tracker, + WatchFilterValue: watchFilterValue, + EtcdDialTimeout: etcdDialTimeout, + EtcdCallTimeout: etcdCallTimeout, }).SetupWithManager(ctx, mgr, concurrency(kubeadmControlPlaneConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubeadmControlPlane") os.Exit(1) diff --git a/main.go b/main.go index 52efce438a7c..55d42c9774fa 100644 --- a/main.go +++ b/main.go @@ -29,7 +29,9 @@ import ( "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/leaderelection/resourcelock" cliflag "k8s.io/component-base/cli/flag" @@ -261,6 +263,9 @@ func main() { goruntime.SetBlockProfileRate(1) } + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + ctrlOptions := ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsBindAddr, @@ -275,6 +280,14 @@ func main() { Cache: cache.Options{ Namespaces: watchNamespaces, SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, }, Client: client.Options{ Cache: &client.CacheOptions{ @@ -335,15 +348,27 @@ func setupIndexes(ctx context.Context, mgr ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + setupLog.Error(err, "unable to create secret caching client") + os.Exit(1) + } + // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers // requiring a connection to a remote cluster log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker") tracker, err := remote.NewClusterCacheTracker( mgr, remote.ClusterCacheTrackerOptions{ - ControllerName: controllerName, - Log: &log, - Indexes: []remote.Index{remote.NodeProviderIDIndex}, + SecretCachingClient: secretCachingClient, + ControllerName: controllerName, + Log: &log, + Indexes: []remote.Index{remote.NodeProviderIDIndex}, }, ) if err != nil { diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index 111bcaeb4faf..ac1ab3e3ce8c 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -28,7 +28,9 @@ import ( // +kubebuilder:scaffold:imports "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -195,6 +197,9 @@ func main() { goruntime.SetBlockProfileRate(1) } + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + ctrlOptions := ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsBindAddr, @@ -209,6 +214,14 @@ func main() { Cache: cache.Options{ Namespaces: watchNamespaces, SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, }, Client: client.Options{ Cache: &client.CacheOptions{ @@ -261,6 +274,17 @@ func setupChecks(mgr ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + setupLog.Error(err, "unable to create secret caching client") + os.Exit(1) + } + // Set our runtime client into the context for later use runtimeClient, err := container.NewDockerClient() if err != nil { @@ -272,8 +296,9 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { tracker, err := remote.NewClusterCacheTracker( mgr, remote.ClusterCacheTrackerOptions{ - ControllerName: controllerName, - Log: &log, + SecretCachingClient: secretCachingClient, + ControllerName: controllerName, + Log: &log, }, ) if err != nil { diff --git a/util/secret/certificates.go b/util/secret/certificates.go index c97031ced03c..bf438d647ced 100644 --- a/util/secret/certificates.go +++ b/util/secret/certificates.go @@ -34,6 +34,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/cert" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -190,26 +191,33 @@ func (c Certificates) GetByPurpose(purpose Purpose) *Certificate { // Lookup looks up each certificate from secrets and populates the certificate with the secret data. func (c Certificates) Lookup(ctx context.Context, ctrlclient client.Client, clusterName client.ObjectKey) error { + return c.LookupCached(ctx, nil, ctrlclient, clusterName) +} + +// LookupCached looks up each certificate from secrets and populates the certificate with the secret data. +// First we try to lookup the certificate secret via the secretCachingClient. If we get a NotFound error +// we fall back to the regular uncached client. +func (c Certificates) LookupCached(ctx context.Context, secretCachingClient, ctrlclient client.Client, clusterName client.ObjectKey) error { // Look up each certificate as a secret and populate the certificate/key for _, certificate := range c { - s := &corev1.Secret{} key := client.ObjectKey{ Name: Name(clusterName.Name, certificate.Purpose), Namespace: clusterName.Namespace, } - if err := ctrlclient.Get(ctx, key, s); err != nil { + s, err := getCertificateSecret(ctx, secretCachingClient, ctrlclient, key) + if err != nil { if apierrors.IsNotFound(err) { if certificate.External { - return errors.WithMessage(err, "external certificate not found") + return errors.Wrap(err, "external certificate not found") } continue } - return errors.WithStack(err) + return err } // If a user has a badly formatted secret it will prevent the cluster from working. kp, err := secretToKeyPair(s) if err != nil { - return err + return errors.Wrapf(err, "failed to read keypair from certificate %s", klog.KObj(s)) } certificate.KeyPair = kp certificate.Secret = s @@ -217,6 +225,28 @@ func (c Certificates) Lookup(ctx context.Context, ctrlclient client.Client, clus return nil } +func getCertificateSecret(ctx context.Context, secretCachingClient, ctrlclient client.Client, key client.ObjectKey) (*corev1.Secret, error) { + secret := &corev1.Secret{} + + if secretCachingClient != nil { + // Try to get the certificate via the cached client. + err := secretCachingClient.Get(ctx, key, secret) + if err != nil && !apierrors.IsNotFound(err) { + // Return error if we got an error which is not a NotFound error. + return nil, errors.Wrapf(err, "failed to get certificate %s", klog.KObj(secret)) + } + if err == nil { + return secret, nil + } + } + + // Try to get the certificate via the uncached client. + if err := ctrlclient.Get(ctx, key, secret); err != nil { + return nil, errors.Wrapf(err, "failed to get certificate %s", klog.KObj(secret)) + } + return secret, nil +} + // EnsureAllExist ensure that there is some data present for every certificate. func (c Certificates) EnsureAllExist() error { for _, certificate := range c { @@ -265,8 +295,15 @@ func (c Certificates) SaveGenerated(ctx context.Context, ctrlclient client.Clien // LookupOrGenerate is a convenience function that wraps cluster bootstrap certificate behavior. func (c Certificates) LookupOrGenerate(ctx context.Context, ctrlclient client.Client, clusterName client.ObjectKey, owner metav1.OwnerReference) error { + return c.LookupOrGenerateCached(ctx, nil, ctrlclient, clusterName, owner) +} + +// LookupOrGenerateCached is a convenience function that wraps cluster bootstrap certificate behavior. +// During lookup we first try to lookup the certificate secret via the secretCachingClient. If we get a NotFound error +// we fall back to the regular uncached client. +func (c Certificates) LookupOrGenerateCached(ctx context.Context, secretCachingClient, ctrlclient client.Client, clusterName client.ObjectKey, owner metav1.OwnerReference) error { // Find the certificates that exist - if err := c.Lookup(ctx, ctrlclient, clusterName); err != nil { + if err := c.LookupCached(ctx, secretCachingClient, ctrlclient, clusterName); err != nil { return err }