Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Add Node managed labels support #7173

Merged
merged 1 commit into from
Jan 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions api/v1beta1/machine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,13 @@ const (
// This annotation can be set on BootstrapConfig or Machine objects. The value set on the Machine object takes precedence.
// This annotation can only be used on Control Plane Machines.
MachineCertificatesExpiryDateAnnotation = "machine.cluster.x-k8s.io/certificates-expiry"

// NodeRoleLabelPrefix is one of the CAPI managed Node label prefixes.
NodeRoleLabelPrefix = "node-role.kubernetes.io"
// NodeRestrictionLabelDomain is one of the CAPI managed Node label domains.
NodeRestrictionLabelDomain = "node-restriction.kubernetes.io"
// ManagedNodeLabelDomain is one of the CAPI managed Node label domains.
ManagedNodeLabelDomain = "node.cluster.x-k8s.io"
)

// ANCHOR: MachineSpec
Expand Down
1 change: 1 addition & 0 deletions internal/controllers/machine/machine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ type Reconciler struct {
// nodeDeletionRetryTimeout determines how long the controller will retry deleting a node
// during a single reconciliation.
nodeDeletionRetryTimeout time.Duration
disableNodeLabelSync bool
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As a follow-up let's open an issue to track the migration of tests to envclient and the removal of this flag (another option might be to split up the func and then unit test the individual parts vs testing the entire controller).

In the meantime, we can use a follow-up PR to add a comment on this field and make it explicit that it is only for allowing partial testing of the controller logic using the fake client (because features based on SSA cannot be tested with it).

cc @ykakarap

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Follow-up PR to add the comment: #7965

Issue to migrate tests to envtest and drop the flag: #7964 (also added to the tracking issue).

}

func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
Expand Down
45 changes: 45 additions & 0 deletions internal/controllers/machine/machine_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,13 @@ package machine
import (
"context"
"fmt"
"strings"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -116,6 +119,17 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust
}
}

if !r.disableNodeLabelSync {
options := []client.PatchOption{
client.FieldOwner("capi-machine"),
enxebre marked this conversation as resolved.
Show resolved Hide resolved
client.ForceOwnership,
}
nodePatch := unstructuredNode(node.Name, node.UID, getManagedLabels(machine.Labels))
if err := remoteClient.Patch(ctx, nodePatch, client.Apply, options...); err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to apply patch label to the node")
}
}

// Do the remaining node health checks, then set the node health to true if all checks pass.
status, message := summarizeNodeConditions(node)
if status == corev1.ConditionFalse {
Expand All @@ -131,6 +145,37 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust
return ctrl.Result{}, nil
}

// unstructuredNode returns a raw unstructured from Node input.
func unstructuredNode(name string, uid types.UID, labels map[string]string) *unstructured.Unstructured {
obj := &unstructured.Unstructured{}
obj.SetAPIVersion("v1")
obj.SetKind("Node")
obj.SetName(name)
obj.SetUID(uid)
obj.SetLabels(labels)
return obj
}

// getManagedLabels gets a map[string]string and returns another map[string]string
// filtering out labels not managed by CAPI.
func getManagedLabels(labels map[string]string) map[string]string {
managedLabels := make(map[string]string)
for key, value := range labels {
sbueringer marked this conversation as resolved.
Show resolved Hide resolved
dnsSubdomainOrName := strings.Split(key, "/")[0]
if dnsSubdomainOrName == clusterv1.NodeRoleLabelPrefix {
managedLabels[key] = value
}
if dnsSubdomainOrName == clusterv1.NodeRestrictionLabelDomain || strings.HasSuffix(dnsSubdomainOrName, "."+clusterv1.NodeRestrictionLabelDomain) {
managedLabels[key] = value
}
if dnsSubdomainOrName == clusterv1.ManagedNodeLabelDomain || strings.HasSuffix(dnsSubdomainOrName, "."+clusterv1.ManagedNodeLabelDomain) {
managedLabels[key] = value
}
}

return managedLabels
}

// summarizeNodeConditions summarizes a Node's conditions and returns the summary of condition statuses and concatenate failed condition messages:
// if there is at least 1 semantically-negative condition, summarized status = False;
// if there is at least 1 semantically-positive condition when there is 0 semantically negative condition, summarized status = True;
Expand Down
26 changes: 26 additions & 0 deletions internal/controllers/machine/machine_controller_noderef_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,3 +227,29 @@ func TestSummarizeNodeConditions(t *testing.T) {
})
}
}

func TestGetManagedLabels(t *testing.T) {
// Create managedLabels map from known managed prefixes.
managedLabels := map[string]string{}
managedLabels[clusterv1.ManagedNodeLabelDomain] = ""
managedLabels["custom-prefix."+clusterv1.NodeRestrictionLabelDomain] = ""
managedLabels["custom-prefix."+clusterv1.NodeRestrictionLabelDomain+"/anything"] = ""
managedLabels[clusterv1.NodeRoleLabelPrefix+"/anything"] = ""

// Append arbitrary labels.
allLabels := map[string]string{
"foo": "",
"bar": "",
"company.xyz/node.cluster.x-k8s.io": "not-managed",
"gpu-node.cluster.x-k8s.io": "not-managed",
"company.xyz/node-restriction.kubernetes.io": "not-managed",
"gpu-node-restriction.kubernetes.io": "not-managed",
}
for k, v := range managedLabels {
allLabels[k] = v
}

g := NewWithT(t)
got := getManagedLabels(allLabels)
g.Expect(got).To(BeEquivalentTo(managedLabels))
}
42 changes: 25 additions & 17 deletions internal/controllers/machine/machine_controller_phases_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ func TestReconcileMachinePhases(t *testing.T) {
infraConfig := defaultInfra.DeepCopy()

r := &Reconciler{
disableNodeLabelSync: true,
Client: fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(defaultCluster,
Expand Down Expand Up @@ -155,6 +156,7 @@ func TestReconcileMachinePhases(t *testing.T) {
infraConfig := defaultInfra.DeepCopy()

r := &Reconciler{
disableNodeLabelSync: true,
Client: fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(defaultCluster,
Expand Down Expand Up @@ -198,6 +200,7 @@ func TestReconcileMachinePhases(t *testing.T) {
machine.Status.LastUpdated = &lastUpdated

r := &Reconciler{
disableNodeLabelSync: true,
Client: fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(defaultCluster,
Expand Down Expand Up @@ -268,8 +271,7 @@ func TestReconcileMachinePhases(t *testing.T) {

node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-test-node",
Namespace: metav1.NamespaceDefault,
Name: "machine-test-node",
},
Spec: corev1.NodeSpec{ProviderID: "test://id-1"},
}
Expand All @@ -285,8 +287,9 @@ func TestReconcileMachinePhases(t *testing.T) {
defaultKubeconfigSecret,
).Build()
r := &Reconciler{
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
disableNodeLabelSync: true,
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
}

res, err := r.reconcile(ctx, defaultCluster, machine)
Expand Down Expand Up @@ -334,8 +337,7 @@ func TestReconcileMachinePhases(t *testing.T) {

node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-test-node",
Namespace: metav1.NamespaceDefault,
Name: "machine-test-node",
},
Spec: corev1.NodeSpec{ProviderID: "test://id-1"},
}
Expand All @@ -351,8 +353,9 @@ func TestReconcileMachinePhases(t *testing.T) {
defaultKubeconfigSecret,
).Build()
r := &Reconciler{
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
disableNodeLabelSync: true,
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
}

res, err := r.reconcile(ctx, defaultCluster, machine)
Expand Down Expand Up @@ -410,8 +413,7 @@ func TestReconcileMachinePhases(t *testing.T) {
machine.Status.LastUpdated = &lastUpdated
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-test-node",
Namespace: metav1.NamespaceDefault,
Name: "machine-test-node",
},
Spec: corev1.NodeSpec{ProviderID: "test://id-1"},
}
Expand All @@ -427,8 +429,9 @@ func TestReconcileMachinePhases(t *testing.T) {
defaultKubeconfigSecret,
).Build()
r := &Reconciler{
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
disableNodeLabelSync: true,
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
}

res, err := r.reconcile(ctx, defaultCluster, machine)
Expand Down Expand Up @@ -487,8 +490,9 @@ func TestReconcileMachinePhases(t *testing.T) {
).Build()

r := &Reconciler{
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
disableNodeLabelSync: true,
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
}

res, err := r.reconcile(ctx, defaultCluster, machine)
Expand Down Expand Up @@ -568,9 +572,10 @@ func TestReconcileMachinePhases(t *testing.T) {
infraConfig,
).Build()
r := &Reconciler{
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
recorder: record.NewFakeRecorder(32),
disableNodeLabelSync: true,
Client: cl,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), cl, scheme.Scheme, client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}),
recorder: record.NewFakeRecorder(32),
}

res, err := r.reconcileDelete(ctx, defaultCluster, machine)
Expand Down Expand Up @@ -867,6 +872,7 @@ func TestReconcileBootstrap(t *testing.T) {

bootstrapConfig := &unstructured.Unstructured{Object: tc.bootstrapConfig}
r := &Reconciler{
disableNodeLabelSync: true,
Client: fake.NewClientBuilder().
WithObjects(tc.machine,
builder.GenericBootstrapConfigCRD.DeepCopy(),
Expand Down Expand Up @@ -1077,6 +1083,7 @@ func TestReconcileInfrastructure(t *testing.T) {

infraConfig := &unstructured.Unstructured{Object: tc.infraConfig}
r := &Reconciler{
disableNodeLabelSync: true,
Client: fake.NewClientBuilder().
WithObjects(tc.machine,
builder.GenericBootstrapConfigCRD.DeepCopy(),
Expand Down Expand Up @@ -1318,6 +1325,7 @@ func TestReconcileCertificateExpiry(t *testing.T) {
g := NewWithT(t)

r := &Reconciler{
disableNodeLabelSync: true,
Client: fake.NewClientBuilder().
WithObjects(
tc.machine,
Expand Down
16 changes: 8 additions & 8 deletions internal/controllers/machine/machine_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -615,8 +615,7 @@ func TestReconcileRequest(t *testing.T) {

node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: metav1.NamespaceDefault,
Name: "test",
},
Spec: corev1.NodeSpec{ProviderID: "test://id-1"},
}
Expand Down Expand Up @@ -726,8 +725,9 @@ func TestReconcileRequest(t *testing.T) {
).Build()

r := &Reconciler{
Client: clientFake,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}),
disableNodeLabelSync: true,
Client: clientFake,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}),
}

result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&tc.machine)})
Expand Down Expand Up @@ -830,8 +830,7 @@ func TestMachineConditions(t *testing.T) {

node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: metav1.NamespaceDefault,
Name: "test",
},
Spec: corev1.NodeSpec{ProviderID: "test://id-1"},
}
Expand Down Expand Up @@ -971,8 +970,9 @@ func TestMachineConditions(t *testing.T) {
).Build()

r := &Reconciler{
Client: clientFake,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}),
disableNodeLabelSync: true,
Client: clientFake,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}),
}

_, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&machine)})
Expand Down