From 99cbdf86414c585a055a2e2728d84140aaf4fd59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakob=20M=C3=B6ller?= Date: Wed, 23 Aug 2023 14:41:17 +0200 Subject: [PATCH] feat: switch to SSA for resource reconciliation --- controllers/lvm_volumegroup.go | 26 +++++----- controllers/lvmcluster_controller.go | 21 ++++---- controllers/scc.go | 3 +- controllers/topolvm_controller.go | 36 +++++++------- controllers/topolvm_csi_driver.go | 26 ++++++---- controllers/topolvm_node.go | 47 +++++------------- controllers/topolvm_snapshotclass.go | 14 ++++-- controllers/topolvm_storageclass.go | 14 ++++-- controllers/vgmanager.go | 71 +++++----------------------- controllers/vgmanager_daemonset.go | 4 +- controllers/vgmanager_test.go | 34 +++++++++++-- 11 files changed, 135 insertions(+), 161 deletions(-) diff --git a/controllers/lvm_volumegroup.go b/controllers/lvm_volumegroup.go index 58a1d5ec0..6ee0413c4 100644 --- a/controllers/lvm_volumegroup.go +++ b/controllers/lvm_volumegroup.go @@ -23,7 +23,10 @@ import ( lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -32,7 +35,7 @@ const ( lvmvgFinalizer = "lvm.openshift.io/lvmvolumegroup" ) -type lvmVG struct{} +type lvmVG struct{ *runtime.Scheme } func (c lvmVG) getName() string { return lvmVGName @@ -43,30 +46,23 @@ func (c lvmVG) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCl lvmVolumeGroups := lvmVolumeGroups(r.Namespace, lvmCluster.Spec.Storage.DeviceClasses) for _, volumeGroup := range lvmVolumeGroups { - existingVolumeGroup := &lvmv1alpha1.LVMVolumeGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: volumeGroup.Name, - Namespace: volumeGroup.Namespace, - }, + if versioned, err := c.Scheme.ConvertToVersion(volumeGroup, + runtime.GroupVersioner(schema.GroupVersions(c.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + volumeGroup = versioned.(*lvmv1alpha1.LVMVolumeGroup) } - err := cutil.SetControllerReference(lvmCluster, existingVolumeGroup, r.Scheme) + err := cutil.SetControllerReference(lvmCluster, volumeGroup, r.Scheme) if err != nil { r.Log.Error(err, "failed to set controller reference to LVMVolumeGroup with name", volumeGroup.Name) return err } - result, err := cutil.CreateOrUpdate(ctx, r.Client, existingVolumeGroup, func() error { - existingVolumeGroup.Finalizers = volumeGroup.Finalizers - existingVolumeGroup.Spec = volumeGroup.Spec - return nil - }) - + err = r.Client.Patch(ctx, volumeGroup, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { r.Log.Error(err, "failed to reconcile LVMVolumeGroup", "name", volumeGroup.Name) return err } - r.Log.Info("successfully reconciled LVMVolumeGroup", "operation", result, "name", volumeGroup.Name) + r.Log.Info("successfully reconciled LVMVolumeGroup", "name", volumeGroup.Name) } return nil } @@ -138,7 +134,7 @@ func lvmVolumeGroups(namespace string, deviceClasses []lvmv1alpha1.DeviceClass) NodeSelector: deviceClass.NodeSelector, DeviceSelector: deviceClass.DeviceSelector, ThinPoolConfig: deviceClass.ThinPoolConfig, - Default: len(deviceClasses) == 1 || deviceClass.Default, //True if there is only one device class or default is explicitly set. + Default: len(deviceClasses) == 1 || deviceClass.Default, // True if there is only one device class or default is explicitly set. }, } lvmVolumeGroups = append(lvmVolumeGroups, lvmVolumeGroup) diff --git a/controllers/lvmcluster_controller.go b/controllers/lvmcluster_controller.go index 08c26903f..520b1e953 100644 --- a/controllers/lvmcluster_controller.go +++ b/controllers/lvmcluster_controller.go @@ -165,7 +165,7 @@ func (r *LVMClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) // errors returned by this will be updated in the reconcileSucceeded condition of the LVMCluster func (r *LVMClusterReconciler) reconcile(ctx context.Context, instance *lvmv1alpha1.LVMCluster) (ctrl.Result, error) { - //The resource was deleted + // The resource was deleted if !instance.DeletionTimestamp.IsZero() { // Check for existing LogicalVolumes lvsExist, err := r.logicalVolumesExist(ctx, instance) @@ -198,14 +198,14 @@ func (r *LVMClusterReconciler) reconcile(ctx context.Context, instance *lvmv1alp } resourceCreationList := []resourceManager{ - &csiDriver{}, - &topolvmController{r.TopoLVMLeaderElectionPassthrough}, - &openshiftSccs{}, - &topolvmNode{}, - &vgManager{}, - &lvmVG{}, - &topolvmStorageClass{}, - &topolvmVolumeSnapshotClass{}, + &csiDriver{r.Scheme}, + &topolvmController{r.Scheme, r.TopoLVMLeaderElectionPassthrough}, + &openshiftSccs{r.Scheme}, + &topolvmNode{r.Scheme}, + &vgManager{r.Scheme}, + &lvmVG{r.Scheme}, + &topolvmStorageClass{r.Scheme}, + &topolvmVolumeSnapshotClass{r.Scheme}, } // handle create/update @@ -290,7 +290,8 @@ func (r *LVMClusterReconciler) updateLVMClusterStatus(ctx context.Context, insta instance.Status.DeviceClassStatuses = allVgStatuses // Apply status changes - err = r.Client.Status().Update(ctx, instance) + instance.SetManagedFields(nil) + err = r.Client.Status().Patch(ctx, instance, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { if errors.IsNotFound(err) { r.Log.Error(err, "failed to update status") diff --git a/controllers/scc.go b/controllers/scc.go index 8e68fc2dd..1466be201 100644 --- a/controllers/scc.go +++ b/controllers/scc.go @@ -25,13 +25,14 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) const ( sccName = "topolvm-scc" ) -type openshiftSccs struct{} +type openshiftSccs struct{ *runtime.Scheme } // openshiftSccs unit satisfies resourceManager interface var _ resourceManager = openshiftSccs{} diff --git a/controllers/topolvm_controller.go b/controllers/topolvm_controller.go index d7e801748..f8c0047b5 100644 --- a/controllers/topolvm_controller.go +++ b/controllers/topolvm_controller.go @@ -19,9 +19,13 @@ package controllers import ( "context" "fmt" - v1 "github.com/openshift/api/config/v1" "path/filepath" + v1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" @@ -39,6 +43,7 @@ const ( ) type topolvmController struct { + *runtime.Scheme topoLVMLeaderElectionPassthrough v1.LeaderElection } @@ -54,30 +59,25 @@ func (c topolvmController) getName() string { func (c topolvmController) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error { // get the desired state of topolvm controller deployment - desiredDeployment := getControllerDeployment(lvmCluster, r.Namespace, r.ImageName, c.topoLVMLeaderElectionPassthrough) - existingDeployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: desiredDeployment.Name, - Namespace: desiredDeployment.Namespace, - }, + deploy := getControllerDeployment(lvmCluster, r.Namespace, r.ImageName, c.topoLVMLeaderElectionPassthrough) + + if versioned, err := c.Scheme.ConvertToVersion(deploy, + runtime.GroupVersioner(schema.GroupVersions(c.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + deploy = versioned.(*appsv1.Deployment) } - err := cutil.SetControllerReference(lvmCluster, existingDeployment, r.Scheme) + err := cutil.SetControllerReference(lvmCluster, deploy, r.Scheme) if err != nil { - r.Log.Error(err, "failed to set controller reference to topolvm controller deployment with name", existingDeployment.Name) + r.Log.Error(err, "failed to set controller reference to topolvm controller deployment with name", deploy.Name) return err } - result, err := cutil.CreateOrUpdate(ctx, r.Client, existingDeployment, func() error { - return c.setTopolvmControllerDesiredState(existingDeployment, desiredDeployment) - }) - - if err != nil { - r.Log.Error(err, "csi controller reconcile failure", "name", desiredDeployment.Name) + if err := r.Client.Patch(ctx, deploy, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)); err != nil { + r.Log.Error(err, "csi controller reconcile failure", "name", deploy.Name) return err } - r.Log.Info("csi controller", "operation", result, "name", desiredDeployment.Name) + r.Log.Info("csi controller", "name", deploy.Name) return nil } @@ -159,7 +159,7 @@ func getControllerDeployment(lvmCluster *lvmv1alpha1.LVMCluster, namespace strin AppKubernetesComponentLabel: TopolvmControllerLabelVal, } - return &appsv1.Deployment{ + deploy := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: TopolvmControllerDeploymentName, Namespace: namespace, @@ -186,6 +186,8 @@ func getControllerDeployment(lvmCluster *lvmv1alpha1.LVMCluster, namespace strin }, }, } + + return deploy } func initContainer(initImage string) corev1.Container { diff --git a/controllers/topolvm_csi_driver.go b/controllers/topolvm_csi_driver.go index d7ca169be..fe003fb1b 100644 --- a/controllers/topolvm_csi_driver.go +++ b/controllers/topolvm_csi_driver.go @@ -24,15 +24,19 @@ import ( storagev1 "k8s.io/api/storage/v1" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( driverName = "topolvm-csi-driver" ) -type csiDriver struct{} +type csiDriver struct { + *runtime.Scheme +} // csiDriver unit satisfies resourceManager interface var _ resourceManager = csiDriver{} @@ -43,20 +47,22 @@ func (c csiDriver) getName() string { //+kubebuilder:rbac:groups=storage.k8s.io,resources=csidrivers,verbs=get;create;delete;watch;list -func (c csiDriver) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error { - csiDriverResource := getCSIDriverResource() +func (c csiDriver) ensureCreated(r *LVMClusterReconciler, ctx context.Context, _ *lvmv1alpha1.LVMCluster) error { + driver := getCSIDriverResource() + + if versioned, err := c.Scheme.ConvertToVersion(driver, + runtime.GroupVersioner(schema.GroupVersions(c.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + driver = versioned.(*storagev1.CSIDriver) + } - result, err := cutil.CreateOrUpdate(ctx, r.Client, csiDriverResource, func() error { - // no need to mutate any field - return nil - }) + err := r.Client.Patch(ctx, driver, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { - r.Log.Error(err, "csi driver reconcile failure", "name", csiDriverResource.Name) + r.Log.Error(err, "csi driver reconcile failure", "name", driver.Name) return err } - r.Log.Info("csi driver", "operation", result, "name", csiDriverResource.Name) + r.Log.Info("csi driver", "name", driver.Name) return nil } diff --git a/controllers/topolvm_node.go b/controllers/topolvm_node.go index c3168cba8..b336e2664 100644 --- a/controllers/topolvm_node.go +++ b/controllers/topolvm_node.go @@ -28,8 +28,11 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -37,7 +40,7 @@ const ( topolvmNodeName = "topolvm-node" ) -type topolvmNode struct{} +type topolvmNode struct{ *runtime.Scheme } func (n topolvmNode) getName() string { return topolvmNodeName @@ -49,14 +52,10 @@ func (n topolvmNode) ensureCreated(r *LVMClusterReconciler, ctx context.Context, unitLogger := r.Log.WithValues("topolvmNode", n.getName()) // get desired daemonSet spec - dsTemplate := getNodeDaemonSet(lvmCluster, r.Namespace, r.ImageName) - - // create desired daemonSet or update mutable fields on existing one - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: dsTemplate.Name, - Namespace: dsTemplate.Namespace, - }, + ds := getNodeDaemonSet(lvmCluster, r.Namespace, r.ImageName) + if versioned, err := n.Scheme.ConvertToVersion(ds, + runtime.GroupVersioner(schema.GroupVersions(n.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + ds = versioned.(*appsv1.DaemonSet) } err := cutil.SetControllerReference(lvmCluster, ds, r.Scheme) @@ -65,37 +64,13 @@ func (n topolvmNode) ensureCreated(r *LVMClusterReconciler, ctx context.Context, return err } - unitLogger.Info("running CreateOrUpdate") - result, err := cutil.CreateOrUpdate(ctx, r.Client, ds, func() error { - // at creation, deep copy the whole daemonSet - if ds.CreationTimestamp.IsZero() { - dsTemplate.DeepCopyInto(ds) - return nil - } - // if update, update only mutable fields - // For topolvm Node, we have containers, initcontainers, node selector and toleration terms - - // containers - ds.Spec.Template.Spec.Containers = dsTemplate.Spec.Template.Spec.Containers - - // initcontainers - ds.Spec.Template.Spec.InitContainers = dsTemplate.Spec.Template.Spec.InitContainers - - // tolerations - ds.Spec.Template.Spec.Tolerations = dsTemplate.Spec.Template.Spec.Tolerations - - // nodeSelector if non-nil - if dsTemplate.Spec.Template.Spec.Affinity != nil { - setDaemonsetNodeSelector(dsTemplate.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, ds) - } - - return nil - }) + unitLogger.Info("Apply") + err = r.Client.Patch(ctx, ds, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { r.Log.Error(err, fmt.Sprintf("%s reconcile failure", topolvmNodeName), "name", ds.Name) } else { - r.Log.Info(topolvmNodeName, "operation", result, "name", ds.Name) + r.Log.Info(topolvmNodeName, "name", ds.Name) } return err } diff --git a/controllers/topolvm_snapshotclass.go b/controllers/topolvm_snapshotclass.go index dff54718f..7dce700b5 100644 --- a/controllers/topolvm_snapshotclass.go +++ b/controllers/topolvm_snapshotclass.go @@ -25,16 +25,18 @@ import ( lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/discovery" - cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( vscName = "topolvm-volumeSnapshotClass" ) -type topolvmVolumeSnapshotClass struct{} +type topolvmVolumeSnapshotClass struct{ *runtime.Scheme } // topolvmVolumeSnapshotClass unit satisfies resourceManager interface var _ resourceManager = topolvmVolumeSnapshotClass{} @@ -50,9 +52,13 @@ func (s topolvmVolumeSnapshotClass) ensureCreated(r *LVMClusterReconciler, ctx c // one volume snapshot class for every deviceClass based on CR is created topolvmSnapshotClasses := getTopolvmSnapshotClasses(lvmCluster) for _, vsc := range topolvmSnapshotClasses { + if versioned, err := s.Scheme.ConvertToVersion(vsc, + runtime.GroupVersioner(schema.GroupVersions(s.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + vsc = versioned.(*snapapi.VolumeSnapshotClass) + } // we anticipate no edits to volume snapshot class - result, err := cutil.CreateOrUpdate(ctx, r.Client, vsc, func() error { return nil }) + err := r.Client.Patch(ctx, vsc, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { // this is necessary in case the VolumeSnapshotClass CRDs are not registered in the Distro, e.g. for OpenShift Local if discovery.IsGroupDiscoveryFailedError(errors.Unwrap(err)) { @@ -62,7 +68,7 @@ func (s topolvmVolumeSnapshotClass) ensureCreated(r *LVMClusterReconciler, ctx c r.Log.Error(err, "topolvm volume snapshot class reconcile failure", "name", vsc.Name) return err } else { - r.Log.Info("topolvm volume snapshot class", "operation", result, "name", vsc.Name) + r.Log.Info("topolvm volume snapshot class", "name", vsc.Name) } } return nil diff --git a/controllers/topolvm_storageclass.go b/controllers/topolvm_storageclass.go index 7d9215f0a..f99d1f698 100644 --- a/controllers/topolvm_storageclass.go +++ b/controllers/topolvm_storageclass.go @@ -24,15 +24,17 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( scName = "topolvm-storageclass" ) -type topolvmStorageClass struct{} +type topolvmStorageClass struct{ *runtime.Scheme } // topolvmStorageClass unit satisfies resourceManager interface var _ resourceManager = topolvmStorageClass{} @@ -48,14 +50,18 @@ func (s topolvmStorageClass) ensureCreated(r *LVMClusterReconciler, ctx context. // one storage class for every deviceClass based on CR is created topolvmStorageClasses := getTopolvmStorageClasses(r, ctx, lvmCluster) for _, sc := range topolvmStorageClasses { + if versioned, err := s.Scheme.ConvertToVersion(sc, + runtime.GroupVersioner(schema.GroupVersions(s.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + sc = versioned.(*storagev1.StorageClass) + } // we anticipate no edits to storage class - result, err := cutil.CreateOrUpdate(ctx, r.Client, sc, func() error { return nil }) + err := r.Client.Patch(ctx, sc, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { r.Log.Error(err, "topolvm storage class reconcile failure", "name", sc.Name) return err } else { - r.Log.Info("topolvm storage class", "operation", result, "name", sc.Name) + r.Log.Info("topolvm storage class", "name", sc.Name) } } return nil diff --git a/controllers/vgmanager.go b/controllers/vgmanager.go index 78a994304..3cca2c9bf 100644 --- a/controllers/vgmanager.go +++ b/controllers/vgmanager.go @@ -22,12 +22,13 @@ import ( lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/client" ) -type vgManager struct{} +type vgManager struct{ *runtime.Scheme } var _ resourceManager = vgManager{} @@ -43,72 +44,26 @@ func (v vgManager) ensureCreated(r *LVMClusterReconciler, ctx context.Context, l unitLogger := r.Log.WithValues("resourceManager", v.getName()) // get desired daemonset spec - dsTemplate := newVGManagerDaemonset(lvmCluster, r.Namespace, r.ImageName) + ds := newVGManagerDaemonset(lvmCluster, r.Namespace, r.ImageName) + if versioned, err := v.Scheme.ConvertToVersion(ds, + runtime.GroupVersioner(schema.GroupVersions(v.Scheme.PrioritizedVersionsAllGroups()))); err == nil { + ds = versioned.(*appsv1.DaemonSet) + } // controller reference - err := ctrl.SetControllerReference(lvmCluster, &dsTemplate, r.Scheme) + err := ctrl.SetControllerReference(lvmCluster, ds, r.Scheme) if err != nil { - return fmt.Errorf("failed to set controller reference on vgManager daemonset %q. %v", dsTemplate.Name, err) + return fmt.Errorf("failed to set controller reference on vgManager daemonset %q. %v", ds.Name, err) } - // create desired daemonset or update mutable fields on existing one - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: dsTemplate.Name, - Namespace: dsTemplate.Namespace, - }, - } - unitLogger.Info("running CreateOrUpdate") + unitLogger.Info("Apply") // the anonymous mutate function modifies the daemonset object after fetching it. // if the daemonset does not already exist, it creates it, otherwise, it updates it - result, err := ctrl.CreateOrUpdate(ctx, r.Client, ds, func() error { - // at creation, deep copy the whole daemonset - if ds.CreationTimestamp.IsZero() { - dsTemplate.DeepCopyInto(ds) - return nil - } - // if update, update only mutable fields - - // copy selector labels to daemonset and template - initMapIfNil(&ds.ObjectMeta.Labels) - initMapIfNil(&ds.Spec.Template.ObjectMeta.Labels) - for key, value := range dsTemplate.Labels { - ds.ObjectMeta.Labels[key] = value - ds.Spec.Template.ObjectMeta.Labels[key] = value - } - - // containers - ds.Spec.Template.Spec.Containers = dsTemplate.Spec.Template.Spec.Containers - - // volumes - ds.Spec.Template.Spec.Volumes = dsTemplate.Spec.Template.Spec.Volumes - - // service account - ds.Spec.Template.Spec.ServiceAccountName = dsTemplate.Spec.Template.Spec.ServiceAccountName - - // controller reference - err := ctrl.SetControllerReference(lvmCluster, ds, r.Scheme) - if err != nil { - return fmt.Errorf("failed to update controller reference on vgManager daemonset %q. %v", ds.Name, err) - } - // tolerations - ds.Spec.Template.Spec.Tolerations = dsTemplate.Spec.Template.Spec.Tolerations - - // nodeSelector if non-nil - if dsTemplate.Spec.Template.Spec.Affinity != nil { - setDaemonsetNodeSelector(dsTemplate.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, ds) - } - - return nil - }) + err = r.Client.Patch(ctx, ds, client.Apply, client.ForceOwnership, client.FieldOwner(ControllerName)) if err != nil { r.Log.Error(err, "failed to create or update vgManager daemonset", "name", ds.Name) return err - } else if result != controllerutil.OperationResultNone { - unitLogger.Info("daemonset modified", "operation", result, "name", ds.Name) - } else { - unitLogger.Info("daemonset unchanged") } return err } diff --git a/controllers/vgmanager_daemonset.go b/controllers/vgmanager_daemonset.go index e14253867..9d0ae923a 100644 --- a/controllers/vgmanager_daemonset.go +++ b/controllers/vgmanager_daemonset.go @@ -113,7 +113,7 @@ var ( ) // newVGManagerDaemonset returns the desired vgmanager daemonset for a given LVMCluster -func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, vgImage string) appsv1.DaemonSet { +func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, vgImage string) *appsv1.DaemonSet { // aggregate nodeSelector and tolerations from all deviceClasses nodeSelector, tolerations := extractNodeSelectorAndTolerations(lvmCluster) volumes := []corev1.Volume{LVMDConfVol, DevHostDirVol, UDevHostDirVol, SysHostDirVol} @@ -207,5 +207,5 @@ func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, // set nodeSelector setDaemonsetNodeSelector(nodeSelector, &ds) - return ds + return &ds } diff --git a/controllers/vgmanager_test.go b/controllers/vgmanager_test.go index be25bee46..958cec97f 100644 --- a/controllers/vgmanager_test.go +++ b/controllers/vgmanager_test.go @@ -18,18 +18,23 @@ package controllers import ( "context" + "errors" + "fmt" "testing" + "gotest.tools/v3/assert" + snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" - "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" logf "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -53,10 +58,31 @@ func newFakeLVMClusterReconciler(t *testing.T, objs ...client.Object) *LVMCluste err = snapapi.AddToScheme(scheme) assert.NilError(t, err, "adding snapshot api to scheme") - client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() + // needed due to lack of apply fake support in controller-runtime https://github.com/kubernetes-sigs/controller-runtime/issues/2341 + interceptorForApplyWorkaround := func(ctx context.Context, clnt client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + // Apply patches are supposed to upsert, but fake client fails if the object doesn't exist, + // if an apply patch occurs for a deployment that doesn't yet exist, create it. + // However, we already hold the fakeclient lock, so we can't use the front door. + if patch.Type() != types.ApplyPatchType { + return clnt.Patch(ctx, obj, patch, opts...) + } + check, ok := obj.DeepCopyObject().(client.Object) + if !ok { + return errors.New("could not check for object in fake client") + } + if err := clnt.Get(ctx, client.ObjectKeyFromObject(obj), check); k8serror.IsNotFound(err) { + if err := clnt.Create(ctx, check); err != nil { + return fmt.Errorf("could not inject object creation for fake: %w", err) + } + } + return clnt.Patch(ctx, obj, patch, opts...) + } + + clnt := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...). + WithInterceptorFuncs(interceptor.Funcs{Patch: interceptorForApplyWorkaround}).Build() return &LVMClusterReconciler{ - Client: client, + Client: clnt, Scheme: scheme, Log: logf.Log.WithName("LVMCLusterTest"), Namespace: "default", @@ -94,7 +120,7 @@ func TestVGManagerEnsureCreated(t *testing.T) { Spec: testCase.lvmclusterSpec, } r := newFakeLVMClusterReconciler(t, lvmcluster) - var unit resourceManager = vgManager{} + var unit resourceManager = vgManager{Scheme: r.Scheme} err := unit.ensureCreated(r, context.Background(), lvmcluster) assert.NilError(t, err, "running ensureCreated") ds := &appsv1.DaemonSet{}