Skip to content

Commit

Permalink
Merge branch 'kubernetes-sigs:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
ryjogo committed Feb 21, 2024
2 parents b6703d7 + ae2e1e9 commit 68f7cb4
Show file tree
Hide file tree
Showing 14 changed files with 1,155 additions and 177 deletions.
24 changes: 24 additions & 0 deletions CHANGELOG/v1.5.6.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
## 👌 Kubernetes version support

- Management Cluster: v1.24.x -> v1.28.x
- Workload Cluster: v1.22.x -> v1.28.x

[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html)

## Changes since v1.5.5
## :chart_with_upwards_trend: Overview
- 6 new commits merged
- 1 bug fixed 🐛

## :bug: Bug Fixes
- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#10065)

## :seedling: Others
- clusterctl: Bump cert-manager to v1.14.1 (#10121)
- Community meeting: Promote chrischdi to Cluster API maintainer (#10090)
- Dependency: Bump Go to 1.21.5 (#10153)

:book: Additionally, there has been 1 contribution to our documentation and book. (#10117)


_Thanks to all our contributors!_ 😊
34 changes: 34 additions & 0 deletions CHANGELOG/v1.6.2.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
## 👌 Kubernetes version support

- Management Cluster: v1.25.x -> v1.29.x
- Workload Cluster: v1.23.x -> v1.29.x

[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html)

## Highlights
* :warning: Warning: This release fixes a bug (#10051) that was introduced in v1.6.0, which caused a regression in the conversion of v1alpha3/v1alpha4 objects. It is recommended to upgrade to v1.6.2 to avoid the issue.

## Changes since v1.6.1
## :chart_with_upwards_trend: Overview
- 16 new commits merged
- 3 bugs fixed 🐛

## :bug: Bug Fixes
- [API/e2e]: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#10151)
- :warning: Warning: This change is a fix for the conversion bug that was introduced in v1.6.0.
- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#10064)
- Machine: Watch external objects for machine before deleting (#10177)

## :seedling: Others
- clusterctl: Bump cert-manager to v1.14.1 (#10120)
- clusterctl: Clarify rules for adding new clusterctl default providers (#10109)
- Community meeting: Promote chrischdi to Cluster API maintainer (#10089)
- Dependency: Bump controller runtime v0.16.5 (#10163)
- Dependency: Bump Go to 1.21.5 (#10152)
- e2e: Use manager in test extension (#10106)
- Testing: Print conformance image used in kubetest (#10081)

:book: Additionally, there have been 4 contributions to our documentation and book. (#10024, #10047, #10105, #10116)


_Thanks to all our contributors!_ 😊
4 changes: 4 additions & 0 deletions api/v1beta1/common_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,10 @@ const (
// update that disallows a pre-existing Cluster to be populated with Topology information and Class.
ClusterTopologyUnsafeUpdateClassNameAnnotation = "unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check"

// ClusterTopologyUnsafeUpdateVersionAnnotation can be used to disable the webhook checks on
// update that disallows updating the .topology.spec.version on certain conditions.
ClusterTopologyUnsafeUpdateVersionAnnotation = "unsafe.topology.cluster.x-k8s.io/disable-update-version-check"

// ProviderNameLabel is the label set on components in the provider manifest.
// This label allows to easily identify all the components belonging to a provider; the clusterctl
// tool uses this label for implementing provider's lifecycle operations.
Expand Down
5 changes: 5 additions & 0 deletions controllers/remote/cluster_cache_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,11 @@ func (t *ClusterCacheTracker) GetClient(ctx context.Context, cluster client.Obje
return accessor.client, nil
}

// GetReader returns a cached read-only client for the given cluster.
func (t *ClusterCacheTracker) GetReader(ctx context.Context, cluster client.ObjectKey) (client.Reader, error) {
return t.GetClient(ctx, cluster)
}

// GetRESTConfig returns a cached REST config for the given cluster.
func (t *ClusterCacheTracker) GetRESTConfig(ctc context.Context, cluster client.ObjectKey) (*rest.Config, error) {
accessor, err := t.getClusterAccessor(ctc, cluster, t.indexes...)
Expand Down
5 changes: 3 additions & 2 deletions docs/book/src/reference/labels_and_annotations.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


| Label | Note |
| :---------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|:------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| cluster.x-k8s.io/cluster-name | It is set on machines linked to a cluster and external objects(bootstrap and infrastructure providers). |
| topology.cluster.x-k8s.io/owned | It is set on all the object which are managed as part of a ClusterTopology. |
| topology.cluster.x-k8s.io/deployment-name | It is set on the generated MachineDeployment objects to track the name of the MachineDeployment topology it represents. |
Expand All @@ -21,11 +21,12 @@
**Supported Annotations:**

| Annotation | Note |
| :--------------------------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|:-----------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| clusterctl.cluster.x-k8s.io/skip-crd-name-preflight-check | Can be placed on provider CRDs, so that clusterctl doesn't emit an error if the CRD doesn't comply with Cluster APIs naming scheme. Only CRDs that are referenced by core Cluster API CRDs have to comply with the naming scheme. |
| clusterctl.cluster.x-k8s.io/delete-for-move | DeleteForMoveAnnotation will be set to objects that are going to be deleted from the source cluster after being moved to the target cluster during the clusterctl move operation. It will help any validation webhook to take decision based on it. |
| clusterctl.cluster.x-k8s.io/block-move | BlockMoveAnnotation prevents the cluster move operation from starting if it is defined on at least one of the objects in scope. Provider controllers are expected to set the annotation on resources that cannot be instantaneously paused and remove the annotation when the resource has been actually paused. |
| unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check | It can be used to disable the webhook check on update that disallows a pre-existing Cluster to be populated with Topology information and Class. |
| unsafe.topology.cluster.x-k8s.io/disable-update-version-check | It can be used to disable the webhook checks on update that disallows updating the `.topology.spec.version` on certain conditions. |
| cluster.x-k8s.io/cluster-name | It is set on nodes identifying the name of the cluster the node belongs to. |
| cluster.x-k8s.io/cluster-namespace | It is set on nodes identifying the namespace of the cluster the node belongs to. |
| cluster.x-k8s.io/machine | It is set on nodes identifying the machine the node belongs to. |
Expand Down
56 changes: 7 additions & 49 deletions internal/controllers/topology/cluster/scope/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,14 @@ package scope

import (
"context"
"fmt"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/internal/topology/check"
)

// ClusterState holds all the objects representing the state of a managed Cluster topology.
Expand Down Expand Up @@ -68,7 +66,7 @@ type MachineDeploymentsStateMap map[string]*MachineDeploymentState

// Upgrading returns the list of the machine deployments
// that are upgrading.
func (mds MachineDeploymentsStateMap) Upgrading(ctx context.Context, c client.Client) ([]string, error) {
func (mds MachineDeploymentsStateMap) Upgrading(ctx context.Context, c client.Reader) ([]string, error) {
names := []string{}
for _, md := range mds {
upgrading, err := md.IsUpgrading(ctx, c)
Expand Down Expand Up @@ -101,40 +99,16 @@ type MachineDeploymentState struct {
// IsUpgrading determines if the MachineDeployment is upgrading.
// A machine deployment is considered upgrading if at least one of the Machines of this
// MachineDeployment has a different version.
func (md *MachineDeploymentState) IsUpgrading(ctx context.Context, c client.Client) (bool, error) {
// If the MachineDeployment has no version there is no definitive way to check if it is upgrading. Therefore, return false.
// Note: This case should not happen.
if md.Object.Spec.Template.Spec.Version == nil {
return false, nil
}
selectorMap, err := metav1.LabelSelectorAsMap(&md.Object.Spec.Selector)
if err != nil {
return false, errors.Wrapf(err, "failed to check if MachineDeployment %s is upgrading: failed to convert label selector to map", md.Object.Name)
}
machines := &clusterv1.MachineList{}
if err := c.List(ctx, machines, client.InNamespace(md.Object.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return false, errors.Wrapf(err, "failed to check if MachineDeployment %s is upgrading: failed to list Machines", md.Object.Name)
}
mdVersion := *md.Object.Spec.Template.Spec.Version
// Check if the versions of the all the Machines match the MachineDeployment version.
for i := range machines.Items {
machine := machines.Items[i]
if machine.Spec.Version == nil {
return false, fmt.Errorf("failed to check if MachineDeployment %s is upgrading: Machine %s has no version", md.Object.Name, machine.Name)
}
if *machine.Spec.Version != mdVersion {
return true, nil
}
}
return false, nil
func (md *MachineDeploymentState) IsUpgrading(ctx context.Context, c client.Reader) (bool, error) {
return check.IsMachineDeploymentUpgrading(ctx, c, md.Object)
}

// MachinePoolsStateMap holds a collection of MachinePool states.
type MachinePoolsStateMap map[string]*MachinePoolState

// Upgrading returns the list of the machine pools
// that are upgrading.
func (mps MachinePoolsStateMap) Upgrading(ctx context.Context, c client.Client) ([]string, error) {
func (mps MachinePoolsStateMap) Upgrading(ctx context.Context, c client.Reader) ([]string, error) {
names := []string{}
for _, mp := range mps {
upgrading, err := mp.IsUpgrading(ctx, c)
Expand Down Expand Up @@ -163,22 +137,6 @@ type MachinePoolState struct {
// IsUpgrading determines if the MachinePool is upgrading.
// A machine pool is considered upgrading if at least one of the Machines of this
// MachinePool has a different version.
func (mp *MachinePoolState) IsUpgrading(ctx context.Context, c client.Client) (bool, error) {
// If the MachinePool has no version there is no definitive way to check if it is upgrading. Therefore, return false.
// Note: This case should not happen.
if mp.Object.Spec.Template.Spec.Version == nil {
return false, nil
}
mpVersion := *mp.Object.Spec.Template.Spec.Version
// Check if the kubelet versions of the MachinePool noderefs match the MachinePool version.
for _, nodeRef := range mp.Object.Status.NodeRefs {
node := &corev1.Node{}
if err := c.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node); err != nil {
return false, fmt.Errorf("failed to check if MachinePool %s is upgrading: failed to get Node %s", mp.Object.Name, nodeRef.Name)
}
if mpVersion != node.Status.NodeInfo.KubeletVersion {
return true, nil
}
}
return false, nil
func (mp *MachinePoolState) IsUpgrading(ctx context.Context, c client.Reader) (bool, error) {
return check.IsMachinePoolUpgrading(ctx, c, mp.Object)
}
86 changes: 0 additions & 86 deletions internal/controllers/topology/cluster/scope/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,92 +29,6 @@ import (
"sigs.k8s.io/cluster-api/internal/test/builder"
)

func TestIsUpgrading(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed())

tests := []struct {
name string
md *clusterv1.MachineDeployment
machines []*clusterv1.Machine
want bool
wantErr bool
}{
{
name: "should return false if all the machines of MachineDeployment have the same version as the MachineDeployment",
md: builder.MachineDeployment("ns", "md1").
WithClusterName("cluster1").
WithVersion("v1.2.3").
Build(),
machines: []*clusterv1.Machine{
builder.Machine("ns", "machine1").
WithClusterName("cluster1").
WithVersion("v1.2.3").
Build(),
builder.Machine("ns", "machine2").
WithClusterName("cluster1").
WithVersion("v1.2.3").
Build(),
},
want: false,
wantErr: false,
},
{
name: "should return true if at least one of the machines of MachineDeployment has a different version",
md: builder.MachineDeployment("ns", "md1").
WithClusterName("cluster1").
WithVersion("v1.2.3").
Build(),
machines: []*clusterv1.Machine{
builder.Machine("ns", "machine1").
WithClusterName("cluster1").
WithVersion("v1.2.3").
Build(),
builder.Machine("ns", "machine2").
WithClusterName("cluster1").
WithVersion("v1.2.2").
Build(),
},
want: true,
wantErr: false,
},
{
name: "should return false if the MachineDeployment has no machines (creation phase)",
md: builder.MachineDeployment("ns", "md1").
WithClusterName("cluster1").
WithVersion("v1.2.3").
Build(),
machines: []*clusterv1.Machine{},
want: false,
wantErr: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
objs := []client.Object{}
objs = append(objs, tt.md)
for _, m := range tt.machines {
objs = append(objs, m)
}
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
mdState := &MachineDeploymentState{
Object: tt.md,
}
got, err := mdState.IsUpgrading(ctx, fakeClient)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).ToNot(HaveOccurred())
g.Expect(got).To(Equal(tt.want))
}
})
}
}

func TestUpgrading(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
Expand Down
Loading

0 comments on commit 68f7cb4

Please sign in to comment.