diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 0614607b9b40..2da2cd26b435 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -82,7 +82,7 @@ type generator struct { // NOTE: We are assuming all the required objects are provided as input; also, in case of any error, // the entire compute operation will fail. This might be improved in the future if support for reconciling // subset of a topology will be implemented. -func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { +func (g *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { var err error desiredState := &scope.ClusterState{ ControlPlane: &scope.ControlPlaneState{}, @@ -105,7 +105,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // - Building the TopologyReconciled condition. // - Make upgrade decisions on the control plane. // - Making upgrade decisions on machine deployments. - mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, e.Client) + mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, g.Client) if err != nil { return nil, errors.Wrap(err, "failed to check if any MachineDeployment is upgrading") } @@ -117,7 +117,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // - Make upgrade decisions on the control plane. // - Making upgrade decisions on machine pools. if len(s.Current.MachinePools) > 0 { - client, err := e.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) + client, err := g.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) if err != nil { return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading") } @@ -131,7 +131,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // Compute the desired state of the ControlPlane object, eventually adding a reference to the // InfrastructureMachineTemplate generated by the previous step. - if desiredState.ControlPlane.Object, err = e.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil { + if desiredState.ControlPlane.Object, err = g.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil { return nil, errors.Wrapf(err, "failed to compute ControlPlane") } @@ -156,7 +156,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies // defined in the cluster. if s.Blueprint.HasMachineDeployments() { - desiredState.MachineDeployments, err = e.computeMachineDeployments(ctx, s) + desiredState.MachineDeployments, err = g.computeMachineDeployments(ctx, s) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachineDeployments") } @@ -165,7 +165,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // If required, compute the desired state of the MachinePools from the list of MachinePoolTopologies // defined in the cluster. if s.Blueprint.HasMachinePools() { - desiredState.MachinePools, err = e.computeMachinePools(ctx, s) + desiredState.MachinePools, err = g.computeMachinePools(ctx, s) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachinePools") } @@ -177,7 +177,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // are preserved during patching. When desired objects are computed their spec is copied from a template, in some cases // further modifications to the spec are made afterwards. In those cases we have to make sure those fields are not overwritten // in apply patches. Some examples are .spec.machineTemplate and .spec.version in control planes. - if err := e.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil { + if err := g.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil { return nil, errors.Wrap(err, "failed to apply patches") } @@ -249,7 +249,7 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop // computeControlPlane computes the desired state for the ControlPlane object starting from the // corresponding template defined in the blueprint. -func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref cluster := s.Current.Cluster @@ -384,7 +384,7 @@ func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } // Sets the desired Kubernetes version for the control plane. - version, err := e.computeControlPlaneVersion(ctx, s) + version, err := g.computeControlPlaneVersion(ctx, s) if err != nil { return nil, errors.Wrap(err, "failed to compute version of control plane") } @@ -398,7 +398,7 @@ func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // computeControlPlaneVersion calculates the version of the desired control plane. // The version is calculated using the state of the current machine deployments, the current control plane // and the version defined in the topology. -func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { +func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { log := tlog.LoggerFrom(ctx) desiredVersion := s.Blueprint.Topology.Version // If we are creating the control plane object (current control plane is nil), use version from topology. @@ -467,7 +467,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco KubernetesVersion: desiredVersion, } hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{} - if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { return "", err } // Add the response to the tracker so we can later update condition or requeue when required. @@ -479,7 +479,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco if hookResponse.RetryAfterSeconds != 0 { log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) } else { - if err := hooks.MarkAsDone(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { + if err := hooks.MarkAsDone(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { return "", err } } @@ -520,7 +520,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco ToKubernetesVersion: desiredVersion, } hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{} - if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { return "", err } // Add the response to the tracker so we can later update condition or requeue when required. @@ -533,7 +533,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco // We are picking up the new version here. // Track the intent of calling the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks once we are done with the upgrade. - if err := hooks.MarkAsPending(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { + if err := hooks.MarkAsPending(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { return "", err } } @@ -605,10 +605,10 @@ func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredRe } // computeMachineDeployments computes the desired state of the list of MachineDeployments. -func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { +func (g *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap) for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments { - desiredMachineDeployment, err := e.computeMachineDeployment(ctx, s, mdTopology) + desiredMachineDeployment, err := g.computeMachineDeployment(ctx, s, mdTopology) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name) } @@ -620,7 +620,7 @@ func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scop // computeMachineDeployment computes the desired state for a MachineDeploymentTopology. // The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and // the machineDeployment class. -func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { +func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { desiredMachineDeployment := &scope.MachineDeploymentState{} // Gets the blueprint for the MachineDeployment class. @@ -699,7 +699,7 @@ func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope // Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels) - version := e.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment) + version := g.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment) // Compute values that can be set both in the MachineDeploymentClass and in the MachineDeploymentTopology minReadySeconds := machineDeploymentClass.MinReadySeconds @@ -840,7 +840,7 @@ func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope // computeMachineDeploymentVersion calculates the version of the desired machine deployment. // The version is calculated using the state of the current machine deployments, // the current control plane and the version defined in the topology. -func (e *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { +func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine deployment, mark it as pending if the control plane is not // yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions. @@ -931,10 +931,10 @@ func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology } // computeMachinePools computes the desired state of the list of MachinePools. -func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { +func (g *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { machinePoolsStateMap := make(scope.MachinePoolsStateMap) for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools { - desiredMachinePool, err := e.computeMachinePool(ctx, s, mpTopology) + desiredMachinePool, err := g.computeMachinePool(ctx, s, mpTopology) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachinePool for topology %q", mpTopology.Name) } @@ -946,7 +946,7 @@ func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (sc // computeMachinePool computes the desired state for a MachinePoolTopology. // The generated machinePool object is calculated using the values from the machinePoolTopology and // the machinePool class. -func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { +func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { desiredMachinePool := &scope.MachinePoolState{} // Gets the blueprint for the MachinePool class. @@ -1025,7 +1025,7 @@ func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin // Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels) - version := e.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) + version := g.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) // Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology minReadySeconds := machinePoolClass.MinReadySeconds @@ -1142,7 +1142,7 @@ func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin // computeMachinePoolVersion calculates the version of the desired machine pool. // The version is calculated using the state of the current machine pools, // the current control plane and the version defined in the topology. -func (e *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { +func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine pool, mark it as pending if the control plane is not // yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions. diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index ff7c68ddebc9..7ef777cdf1aa 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -338,9 +338,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -399,9 +397,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -429,9 +425,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithoutReplicas) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -474,9 +468,7 @@ func TestComputeControlPlane(t *testing.T) { s.Blueprint = blueprint s.Current.ControlPlane = &scope.ControlPlaneState{} - r := &generator{} - - obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate) + obj, err := (&generator{}).computeControlPlane(ctx, s, infrastructureMachineTemplate) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -535,9 +527,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithControlPlaneRef) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -605,9 +595,7 @@ func TestComputeControlPlane(t *testing.T) { Object: tt.currentControlPlane, } - r := &generator{} - - obj, err := r.computeControlPlane(ctx, s, nil) + obj, err := (&generator{}).computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) assertNestedField(g, obj, tt.expectedVersion, contract.ControlPlane().Version().Path()...) @@ -645,9 +633,7 @@ func TestComputeControlPlane(t *testing.T) { s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) s.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, s, nil) + obj, err := (&generator{}).computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue()) diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 4e86297d82f4..4a1391d8430d 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -223,7 +223,6 @@ func generateCRDForUnstructured(u *unstructured.Unstructured) *apiextensionsv1.C } // getScope gets blueprint (ClusterClass) and current state based on cluster and clusterClassFile. -// Note: MachinePools have not been implemented as they are not supported by CAPV. func getScope(cluster *clusterv1.Cluster, clusterClassFile string) (*scope.Scope, error) { clusterClassYAML, err := os.ReadFile(clusterClassFile) //nolint:gosec // reading a file in tests is not a security issue. if err != nil {