Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed Mar 28, 2024
1 parent 6e4cbab commit 347290f
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 46 deletions.
48 changes: 24 additions & 24 deletions exp/topology/desiredstate/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ type generator struct {
// NOTE: We are assuming all the required objects are provided as input; also, in case of any error,
// the entire compute operation will fail. This might be improved in the future if support for reconciling
// subset of a topology will be implemented.
func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) {
func (g *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) {
var err error
desiredState := &scope.ClusterState{
ControlPlane: &scope.ControlPlaneState{},
Expand All @@ -105,7 +105,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste
// - Building the TopologyReconciled condition.
// - Make upgrade decisions on the control plane.
// - Making upgrade decisions on machine deployments.
mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, e.Client)
mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, g.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachineDeployment is upgrading")
}
Expand All @@ -117,7 +117,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste
// - Make upgrade decisions on the control plane.
// - Making upgrade decisions on machine pools.
if len(s.Current.MachinePools) > 0 {
client, err := e.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
client, err := g.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
Expand All @@ -131,7 +131,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste

// Compute the desired state of the ControlPlane object, eventually adding a reference to the
// InfrastructureMachineTemplate generated by the previous step.
if desiredState.ControlPlane.Object, err = e.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
if desiredState.ControlPlane.Object, err = g.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
return nil, errors.Wrapf(err, "failed to compute ControlPlane")
}

Expand All @@ -156,7 +156,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste
// If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies
// defined in the cluster.
if s.Blueprint.HasMachineDeployments() {
desiredState.MachineDeployments, err = e.computeMachineDeployments(ctx, s)
desiredState.MachineDeployments, err = g.computeMachineDeployments(ctx, s)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute MachineDeployments")
}
Expand All @@ -165,7 +165,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste
// If required, compute the desired state of the MachinePools from the list of MachinePoolTopologies
// defined in the cluster.
if s.Blueprint.HasMachinePools() {
desiredState.MachinePools, err = e.computeMachinePools(ctx, s)
desiredState.MachinePools, err = g.computeMachinePools(ctx, s)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute MachinePools")
}
Expand All @@ -177,7 +177,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste
// are preserved during patching. When desired objects are computed their spec is copied from a template, in some cases
// further modifications to the spec are made afterwards. In those cases we have to make sure those fields are not overwritten
// in apply patches. Some examples are .spec.machineTemplate and .spec.version in control planes.
if err := e.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil {
if err := g.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil {
return nil, errors.Wrap(err, "failed to apply patches")
}

Expand Down Expand Up @@ -249,7 +249,7 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop

// computeControlPlane computes the desired state for the ControlPlane object starting from the
// corresponding template defined in the blueprint.
func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) {
func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) {
template := s.Blueprint.ControlPlane.Template
templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref
cluster := s.Current.Cluster
Expand Down Expand Up @@ -384,7 +384,7 @@ func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf
}

// Sets the desired Kubernetes version for the control plane.
version, err := e.computeControlPlaneVersion(ctx, s)
version, err := g.computeControlPlaneVersion(ctx, s)
if err != nil {
return nil, errors.Wrap(err, "failed to compute version of control plane")
}
Expand All @@ -398,7 +398,7 @@ func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf
// computeControlPlaneVersion calculates the version of the desired control plane.
// The version is calculated using the state of the current machine deployments, the current control plane
// and the version defined in the topology.
func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
log := tlog.LoggerFrom(ctx)
desiredVersion := s.Blueprint.Topology.Version
// If we are creating the control plane object (current control plane is nil), use version from topology.
Expand Down Expand Up @@ -467,7 +467,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
KubernetesVersion: desiredVersion,
}
hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{}
if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
return "", err
}
// Add the response to the tracker so we can later update condition or requeue when required.
Expand All @@ -479,7 +479,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
if hookResponse.RetryAfterSeconds != 0 {
log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))
} else {
if err := hooks.MarkAsDone(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
if err := hooks.MarkAsDone(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
return "", err
}
}
Expand Down Expand Up @@ -520,7 +520,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
ToKubernetesVersion: desiredVersion,
}
hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{}
if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
return "", err
}
// Add the response to the tracker so we can later update condition or requeue when required.
Expand All @@ -533,7 +533,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco

// We are picking up the new version here.
// Track the intent of calling the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks once we are done with the upgrade.
if err := hooks.MarkAsPending(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil {
if err := hooks.MarkAsPending(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil {
return "", err
}
}
Expand Down Expand Up @@ -605,10 +605,10 @@ func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredRe
}

// computeMachineDeployments computes the desired state of the list of MachineDeployments.
func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) {
func (g *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) {
machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap)
for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments {
desiredMachineDeployment, err := e.computeMachineDeployment(ctx, s, mdTopology)
desiredMachineDeployment, err := g.computeMachineDeployment(ctx, s, mdTopology)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name)
}
Expand All @@ -620,7 +620,7 @@ func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scop
// computeMachineDeployment computes the desired state for a MachineDeploymentTopology.
// The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and
// the machineDeployment class.
func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) {
func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) {
desiredMachineDeployment := &scope.MachineDeploymentState{}

// Gets the blueprint for the MachineDeployment class.
Expand Down Expand Up @@ -699,7 +699,7 @@ func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope
// Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template
infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels)
version := e.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment)
version := g.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment)

// Compute values that can be set both in the MachineDeploymentClass and in the MachineDeploymentTopology
minReadySeconds := machineDeploymentClass.MinReadySeconds
Expand Down Expand Up @@ -840,7 +840,7 @@ func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope
// computeMachineDeploymentVersion calculates the version of the desired machine deployment.
// The version is calculated using the state of the current machine deployments,
// the current control plane and the version defined in the topology.
func (e *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string {
func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string {
desiredVersion := s.Blueprint.Topology.Version
// If creating a new machine deployment, mark it as pending if the control plane is not
// yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions.
Expand Down Expand Up @@ -931,10 +931,10 @@ func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology
}

// computeMachinePools computes the desired state of the list of MachinePools.
func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) {
func (g *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) {
machinePoolsStateMap := make(scope.MachinePoolsStateMap)
for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools {
desiredMachinePool, err := e.computeMachinePool(ctx, s, mpTopology)
desiredMachinePool, err := g.computeMachinePool(ctx, s, mpTopology)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute MachinePool for topology %q", mpTopology.Name)
}
Expand All @@ -946,7 +946,7 @@ func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (sc
// computeMachinePool computes the desired state for a MachinePoolTopology.
// The generated machinePool object is calculated using the values from the machinePoolTopology and
// the machinePool class.
func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) {
func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) {
desiredMachinePool := &scope.MachinePoolState{}

// Gets the blueprint for the MachinePool class.
Expand Down Expand Up @@ -1025,7 +1025,7 @@ func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin
// Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object
infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels)
version := e.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool)
version := g.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool)

// Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology
minReadySeconds := machinePoolClass.MinReadySeconds
Expand Down Expand Up @@ -1142,7 +1142,7 @@ func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin
// computeMachinePoolVersion calculates the version of the desired machine pool.
// The version is calculated using the state of the current machine pools,
// the current control plane and the version defined in the topology.
func (e *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string {
func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string {
desiredVersion := s.Blueprint.Topology.Version
// If creating a new machine pool, mark it as pending if the control plane is not
// yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions.
Expand Down
28 changes: 7 additions & 21 deletions exp/topology/desiredstate/desired_state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -338,9 +338,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(cluster)
scope.Blueprint = blueprint

r := &generator{}

obj, err := r.computeControlPlane(ctx, scope, nil)
obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())

Expand Down Expand Up @@ -399,9 +397,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(cluster)
scope.Blueprint = blueprint

r := &generator{}

obj, err := r.computeControlPlane(ctx, scope, nil)
obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())

Expand Down Expand Up @@ -429,9 +425,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(clusterWithoutReplicas)
scope.Blueprint = blueprint

r := &generator{}

obj, err := r.computeControlPlane(ctx, scope, nil)
obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())

Expand Down Expand Up @@ -474,9 +468,7 @@ func TestComputeControlPlane(t *testing.T) {
s.Blueprint = blueprint
s.Current.ControlPlane = &scope.ControlPlaneState{}

r := &generator{}

obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate)
obj, err := (&generator{}).computeControlPlane(ctx, s, infrastructureMachineTemplate)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())

Expand Down Expand Up @@ -535,9 +527,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(clusterWithControlPlaneRef)
scope.Blueprint = blueprint

r := &generator{}

obj, err := r.computeControlPlane(ctx, scope, nil)
obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())

Expand Down Expand Up @@ -605,9 +595,7 @@ func TestComputeControlPlane(t *testing.T) {
Object: tt.currentControlPlane,
}

r := &generator{}

obj, err := r.computeControlPlane(ctx, s, nil)
obj, err := (&generator{}).computeControlPlane(ctx, s, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).NotTo(BeNil())
assertNestedField(g, obj, tt.expectedVersion, contract.ControlPlane().Version().Path()...)
Expand Down Expand Up @@ -645,9 +633,7 @@ func TestComputeControlPlane(t *testing.T) {
s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
s.Blueprint = blueprint

r := &generator{}

obj, err := r.computeControlPlane(ctx, s, nil)
obj, err := (&generator{}).computeControlPlane(ctx, s, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())
g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,6 @@ func generateCRDForUnstructured(u *unstructured.Unstructured) *apiextensionsv1.C
}

// getScope gets blueprint (ClusterClass) and current state based on cluster and clusterClassFile.
// Note: MachinePools have not been implemented as they are not supported by CAPV.
func getScope(cluster *clusterv1.Cluster, clusterClassFile string) (*scope.Scope, error) {
clusterClassYAML, err := os.ReadFile(clusterClassFile) //nolint:gosec // reading a file in tests is not a security issue.
if err != nil {
Expand Down

0 comments on commit 347290f

Please sign in to comment.