From 759e5ddb9b7937fc7c3e3ea3eb962adea2ef315c Mon Sep 17 00:00:00 2001 From: Jont828 Date: Tue, 8 Aug 2023 17:22:24 -0400 Subject: [PATCH] Remove nodepool.go --- .../dockermachinepool_controller.go | 58 ++- .../dockermachinepool_controller_test.go | 456 +++++++++--------- .../docker/exp/internal/docker/nodepool.go | 327 ------------- .../exp/internal/docker/nodepool_test.go | 109 ----- .../controllers/dockermachine_controller.go | 4 + 5 files changed, 267 insertions(+), 687 deletions(-) delete mode 100644 test/infrastructure/docker/exp/internal/docker/nodepool.go delete mode 100644 test/infrastructure/docker/exp/internal/docker/nodepool_test.go diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index b6498e0794ae..5edcfc48cdf5 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -21,7 +21,9 @@ import ( "context" "fmt" "math/rand" + "sort" "strings" + "time" "github.com/blang/semver" "github.com/pkg/errors" @@ -207,25 +209,28 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) error { _ = ctrl.LoggerFrom(ctx) - // dockerMachineList, err := getDockerMachines(ctx, r.Client, *cluster, *machinePool, *dockerMachinePool) - // if err != nil { - // return err - // } - - // // Since nodepools don't persist the instances list, we need to construct it from the list of DockerMachines. - // nodePoolInstances, err := r.initNodePoolMachineStatusList(ctx, dockerMachineList.Items, dockerMachinePool) - // if err != nil { - // return err - // } + labelFilters := map[string]string{dockerMachinePoolLabel: dockerMachinePool.Name} + machines, err := docker.ListMachinesByCluster(ctx, cluster, labelFilters) + if err != nil { + return errors.Wrapf(err, "failed to list all machines in the cluster") + } - // pool, err := dockerexp.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool, nodePoolInstances) - // if err != nil { - // return errors.Wrap(err, "failed to build new node pool") - // } + for _, machine := range machines { + key := client.ObjectKey{ + Namespace: dockerMachinePool.Namespace, + Name: machine.Name(), + } + dockerMachine := &infrav1.DockerMachine{} + if err := r.Client.Get(ctx, key, dockerMachine); err != nil { + if err := machine.Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete machine %s", machine.Name()) + } + } - // if err := pool.Delete(ctx); err != nil { - // return errors.Wrap(err, "failed to delete all machines in the node pool") - // } + if err := r.deleteMachinePoolMachine(ctx, *dockerMachine, *machinePool); err != nil { + return errors.Wrapf(err, "failed to delete machine %s", dockerMachine.Name) + } + } controllerutil.RemoveFinalizer(dockerMachinePool, infraexpv1.MachinePoolFinalizer) return nil @@ -287,13 +292,8 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust dockerMachinePool.Status.Ready = false conditions.MarkFalse(dockerMachinePool, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") - return ctrl.Result{}, nil - - // // if some machine is still provisioning, force reconcile in few seconds to check again infrastructure. - // if res.IsZero() { - // return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - // } + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil // return res, nil } @@ -345,6 +345,18 @@ func getDockerMachinesToDelete(ctx context.Context, dockerMachines []infrav1.Doc // TODO: sort list of dockerMachines labelFilters := map[string]string{dockerMachinePoolLabel: dockerMachinePool.Name} + // Sort priority delete to the front of the list + sort.Slice(dockerMachines, func(i, j int) bool { + _, iHasAnnotation := dockerMachines[i].Annotations[clusterv1.DeleteMachineAnnotation] + _, jHasAnnotation := dockerMachines[j].Annotations[clusterv1.DeleteMachineAnnotation] + + if iHasAnnotation && jHasAnnotation { + return dockerMachines[i].Name < dockerMachines[j].Name + } + + return iHasAnnotation + }) + for _, dockerMachine := range dockerMachines { // externalMachine, err := docker.NewMachine(ctx, cluster, dockerMachine.Name, labelFilters) if numToDelete > 0 { diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_test.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_test.go index f0ca0a53b6ea..b81ae5168602 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_test.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_test.go @@ -17,246 +17,246 @@ limitations under the License. // Package controllers implements controller functionality. package controllers -import ( - "context" - "testing" +// import ( +// "context" +// "testing" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" +// . "github.com/onsi/gomega" +// corev1 "k8s.io/api/core/v1" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "k8s.io/utils/pointer" +// "sigs.k8s.io/controller-runtime/pkg/client" +// "sigs.k8s.io/controller-runtime/pkg/client/fake" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" - infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/docker" -) +// clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +// infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" +// infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" +// "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/docker" +// ) -const ( - clusterName = "test-cluster" -) +// const ( +// clusterName = "test-cluster" +// ) -func TestInitNodePoolMachineStatuses(t *testing.T) { - machine1 := clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "machine1", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - clusterv1.MachinePoolNameLabel: "machinepool-test", - }, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: clusterName, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "DockerMachine", - Name: "docker-machine1", - Namespace: metav1.NamespaceDefault, - }, - }, - } +// func TestInitNodePoolMachineStatuses(t *testing.T) { +// machine1 := clusterv1.Machine{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "machine1", +// Namespace: metav1.NamespaceDefault, +// Labels: map[string]string{ +// clusterv1.ClusterNameLabel: clusterName, +// clusterv1.MachinePoolNameLabel: "machinepool-test", +// }, +// }, +// Spec: clusterv1.MachineSpec{ +// ClusterName: clusterName, +// InfrastructureRef: corev1.ObjectReference{ +// APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", +// Kind: "DockerMachine", +// Name: "docker-machine1", +// Namespace: metav1.NamespaceDefault, +// }, +// }, +// } - machine2 := clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "machine2", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - clusterv1.MachinePoolNameLabel: "machinepool-test", - }, - Annotations: map[string]string{ - clusterv1.DeleteMachineAnnotation: "true", - }, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: clusterName, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "DockerMachine", - Name: "docker-machine2", - Namespace: metav1.NamespaceDefault, - }, - }, - } +// machine2 := clusterv1.Machine{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "machine2", +// Namespace: metav1.NamespaceDefault, +// Labels: map[string]string{ +// clusterv1.ClusterNameLabel: clusterName, +// clusterv1.MachinePoolNameLabel: "machinepool-test", +// }, +// Annotations: map[string]string{ +// clusterv1.DeleteMachineAnnotation: "true", +// }, +// }, +// Spec: clusterv1.MachineSpec{ +// ClusterName: clusterName, +// InfrastructureRef: corev1.ObjectReference{ +// APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", +// Kind: "DockerMachine", +// Name: "docker-machine2", +// Namespace: metav1.NamespaceDefault, +// }, +// }, +// } - dockerMachine1 := infrav1.DockerMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "docker-machine1", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - clusterv1.MachinePoolNameLabel: "machinepool-test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "cluster.x-k8s.io/v1beta1", - Kind: "Machine", - Name: machine1.Name, - }, - }, - }, - Spec: infrav1.DockerMachineSpec{ - ProviderID: pointer.String("docker:////docker-machine1"), - }, - Status: infrav1.DockerMachineStatus{ - Addresses: []clusterv1.MachineAddress{ - { - Type: clusterv1.MachineInternalIP, - Address: "test-address-1", - }, - }, - Conditions: clusterv1.Conditions{ - { - Type: infrav1.BootstrapExecSucceededCondition, - Status: corev1.ConditionTrue, - }, - }, - }, - } +// dockerMachine1 := infrav1.DockerMachine{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "docker-machine1", +// Namespace: metav1.NamespaceDefault, +// Labels: map[string]string{ +// clusterv1.ClusterNameLabel: clusterName, +// clusterv1.MachinePoolNameLabel: "machinepool-test", +// }, +// OwnerReferences: []metav1.OwnerReference{ +// { +// APIVersion: "cluster.x-k8s.io/v1beta1", +// Kind: "Machine", +// Name: machine1.Name, +// }, +// }, +// }, +// Spec: infrav1.DockerMachineSpec{ +// ProviderID: pointer.String("docker:////docker-machine1"), +// }, +// Status: infrav1.DockerMachineStatus{ +// Addresses: []clusterv1.MachineAddress{ +// { +// Type: clusterv1.MachineInternalIP, +// Address: "test-address-1", +// }, +// }, +// Conditions: clusterv1.Conditions{ +// { +// Type: infrav1.BootstrapExecSucceededCondition, +// Status: corev1.ConditionTrue, +// }, +// }, +// }, +// } - dockerMachine2 := infrav1.DockerMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "docker-machine2", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - clusterv1.MachinePoolNameLabel: "machinepool-test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "cluster.x-k8s.io/v1beta1", - Kind: "Machine", - Name: machine2.Name, - }, - }, - }, - Spec: infrav1.DockerMachineSpec{ - ProviderID: pointer.String("docker:////docker-machine2"), - }, - Status: infrav1.DockerMachineStatus{ - Addresses: []clusterv1.MachineAddress{ - { - Type: clusterv1.MachineInternalIP, - Address: "test-address-2", - }, - }, - }, - } +// dockerMachine2 := infrav1.DockerMachine{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "docker-machine2", +// Namespace: metav1.NamespaceDefault, +// Labels: map[string]string{ +// clusterv1.ClusterNameLabel: clusterName, +// clusterv1.MachinePoolNameLabel: "machinepool-test", +// }, +// OwnerReferences: []metav1.OwnerReference{ +// { +// APIVersion: "cluster.x-k8s.io/v1beta1", +// Kind: "Machine", +// Name: machine2.Name, +// }, +// }, +// }, +// Spec: infrav1.DockerMachineSpec{ +// ProviderID: pointer.String("docker:////docker-machine2"), +// }, +// Status: infrav1.DockerMachineStatus{ +// Addresses: []clusterv1.MachineAddress{ +// { +// Type: clusterv1.MachineInternalIP, +// Address: "test-address-2", +// }, +// }, +// }, +// } - dockerMachine3 := infrav1.DockerMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "docker-machine3", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - clusterv1.MachinePoolNameLabel: "machinepool-test", - }, - }, - Spec: infrav1.DockerMachineSpec{ - ProviderID: pointer.String("docker:////docker-machine3"), - }, - Status: infrav1.DockerMachineStatus{ - Addresses: []clusterv1.MachineAddress{ - { - Type: clusterv1.MachineInternalIP, - Address: "test-address-2", - }, - }, - }, - } +// dockerMachine3 := infrav1.DockerMachine{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "docker-machine3", +// Namespace: metav1.NamespaceDefault, +// Labels: map[string]string{ +// clusterv1.ClusterNameLabel: clusterName, +// clusterv1.MachinePoolNameLabel: "machinepool-test", +// }, +// }, +// Spec: infrav1.DockerMachineSpec{ +// ProviderID: pointer.String("docker:////docker-machine3"), +// }, +// Status: infrav1.DockerMachineStatus{ +// Addresses: []clusterv1.MachineAddress{ +// { +// Type: clusterv1.MachineInternalIP, +// Address: "test-address-2", +// }, +// }, +// }, +// } - testCases := []struct { - name string - dockerMachines []infrav1.DockerMachine - machines []clusterv1.Machine - expectedInstances []docker.NodePoolMachineStatus - expectErr bool - }{ - { - name: "should return a list of instances", - dockerMachines: []infrav1.DockerMachine{ - dockerMachine1, - dockerMachine2, - }, - machines: []clusterv1.Machine{ - machine1, - machine2, - }, - expectedInstances: []docker.NodePoolMachineStatus{ - { - Name: dockerMachine1.Name, - PrioritizeDelete: false, - ProviderID: dockerMachine1.Spec.ProviderID, - }, - { - Name: dockerMachine2.Name, - PrioritizeDelete: true, - ProviderID: dockerMachine2.Spec.ProviderID, - }, - }, - }, - { - name: "do not return error if owner is missing", - dockerMachines: []infrav1.DockerMachine{ - dockerMachine1, - dockerMachine2, - dockerMachine3, - }, - machines: []clusterv1.Machine{ - machine1, - machine2, - }, - expectedInstances: []docker.NodePoolMachineStatus{ - { - Name: dockerMachine1.Name, - PrioritizeDelete: false, - ProviderID: dockerMachine1.Spec.ProviderID, - }, - { - Name: dockerMachine2.Name, - PrioritizeDelete: true, - ProviderID: dockerMachine2.Spec.ProviderID, - }, - { - Name: dockerMachine3.Name, - PrioritizeDelete: false, - ProviderID: dockerMachine3.Spec.ProviderID, - }, - }, - }, - } +// testCases := []struct { +// name string +// dockerMachines []infrav1.DockerMachine +// machines []clusterv1.Machine +// expectedInstances []docker.NodePoolMachineStatus +// expectErr bool +// }{ +// { +// name: "should return a list of instances", +// dockerMachines: []infrav1.DockerMachine{ +// dockerMachine1, +// dockerMachine2, +// }, +// machines: []clusterv1.Machine{ +// machine1, +// machine2, +// }, +// expectedInstances: []docker.NodePoolMachineStatus{ +// { +// Name: dockerMachine1.Name, +// PrioritizeDelete: false, +// ProviderID: dockerMachine1.Spec.ProviderID, +// }, +// { +// Name: dockerMachine2.Name, +// PrioritizeDelete: true, +// ProviderID: dockerMachine2.Spec.ProviderID, +// }, +// }, +// }, +// { +// name: "do not return error if owner is missing", +// dockerMachines: []infrav1.DockerMachine{ +// dockerMachine1, +// dockerMachine2, +// dockerMachine3, +// }, +// machines: []clusterv1.Machine{ +// machine1, +// machine2, +// }, +// expectedInstances: []docker.NodePoolMachineStatus{ +// { +// Name: dockerMachine1.Name, +// PrioritizeDelete: false, +// ProviderID: dockerMachine1.Spec.ProviderID, +// }, +// { +// Name: dockerMachine2.Name, +// PrioritizeDelete: true, +// ProviderID: dockerMachine2.Spec.ProviderID, +// }, +// { +// Name: dockerMachine3.Name, +// PrioritizeDelete: false, +// ProviderID: dockerMachine3.Spec.ProviderID, +// }, +// }, +// }, +// } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) +// for _, tc := range testCases { +// t.Run(tc.name, func(t *testing.T) { +// g := NewWithT(t) - objs := []client.Object{} +// objs := []client.Object{} - for _, ma := range tc.machines { - objs = append(objs, ma.DeepCopy()) - } +// for _, ma := range tc.machines { +// objs = append(objs, ma.DeepCopy()) +// } - for _, dm := range tc.dockerMachines { - objs = append(objs, dm.DeepCopy()) - } +// for _, dm := range tc.dockerMachines { +// objs = append(objs, dm.DeepCopy()) +// } - r := &DockerMachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(objs...).Build(), - } +// r := &DockerMachinePoolReconciler{ +// Client: fake.NewClientBuilder().WithObjects(objs...).Build(), +// } - result, err := r.initNodePoolMachineStatusList(context.Background(), tc.dockerMachines, &infraexpv1.DockerMachinePool{}) - if tc.expectErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(HaveLen(len(tc.expectedInstances))) - for i, instance := range result { - g.Expect(instance).To(Equal(tc.expectedInstances[i])) - } - } - }) - } -} +// result, err := r.initNodePoolMachineStatusList(context.Background(), tc.dockerMachines, &infraexpv1.DockerMachinePool{}) +// if tc.expectErr { +// g.Expect(err).To(HaveOccurred()) +// } else { +// g.Expect(err).ToNot(HaveOccurred()) +// g.Expect(result).To(HaveLen(len(tc.expectedInstances))) +// for i, instance := range result { +// g.Expect(instance).To(Equal(tc.expectedInstances[i])) +// } +// } +// }) +// } +// } diff --git a/test/infrastructure/docker/exp/internal/docker/nodepool.go b/test/infrastructure/docker/exp/internal/docker/nodepool.go deleted file mode 100644 index 5cff8ee7350c..000000000000 --- a/test/infrastructure/docker/exp/internal/docker/nodepool.go +++ /dev/null @@ -1,327 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package docker implements docker functionality. -package docker - -import ( - "context" - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/blang/semver" - "github.com/pkg/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/kind/pkg/cluster/constants" - - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" - "sigs.k8s.io/cluster-api/test/infrastructure/kind" - "sigs.k8s.io/cluster-api/util" -) - -const ( - dockerMachinePoolLabel = "docker.cluster.x-k8s.io/machine-pool" -) - -// NodePool is a wrapper around a collection of like machines which are owned by a DockerMachinePool. A node pool -// provides a friendly way of managing (adding, deleting, updating) a set of docker machines. The node pool will also -// sync the docker machine pool status Instances field with the state of the docker machines. -type NodePool struct { - client client.Client - cluster *clusterv1.Cluster - machinePool *expv1.MachinePool - dockerMachinePool *infraexpv1.DockerMachinePool - labelFilters map[string]string - nodePoolMachines NodePoolMachines // Note: This must be initialized when creating a new node pool and updated to reflect the `machines` slice. -} - -// NodePoolMachine is a wrapper around a docker.Machine and a NodePoolMachineStatus, which maintains additional information about the machine. -// At least one of Machine or Status must be non-nil. -type NodePoolMachine struct { - Machine *docker.Machine - Status *NodePoolMachineStatus -} - -// NodePoolMachineStatus is a representation of the additional information needed to reconcile a docker.Machine. -// This is the only source of truth for understanding if the machine is already bootstrapped, ready, etc. so this -// information must be maintained through reconciliation. -type NodePoolMachineStatus struct { - Name string - PrioritizeDelete bool - ProviderID *string -} - -// NodePoolMachines is a sortable slice of NodePoolMachine based on the deletion priority. -// Machines with PrioritizeDelete set to true will be sorted to the front of the list. -type NodePoolMachines []NodePoolMachine - -func (n NodePoolMachines) Len() int { return len(n) } -func (n NodePoolMachines) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n NodePoolMachines) Less(i, j int) bool { - var prioritizeI, prioritizeJ bool - var nameI, nameJ string - if n[i].Status != nil { - prioritizeI = n[i].Status.PrioritizeDelete - nameI = n[i].Status.Name - } else { - nameI = n[i].Machine.Name() - } - - if n[j].Status != nil { - prioritizeJ = n[j].Status.PrioritizeDelete - nameJ = n[j].Status.Name - } else { - nameJ = n[j].Machine.Name() - } - - if prioritizeI == prioritizeJ { - return nameI < nameJ - } - - return prioritizeI -} - -// NewNodePool creates a new node pool. -func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *expv1.MachinePool, dmp *infraexpv1.DockerMachinePool, nodePoolMachineStatuses []NodePoolMachineStatus) (*NodePool, error) { - log := ctrl.LoggerFrom(ctx) - np := &NodePool{ - client: c, - cluster: cluster, - machinePool: mp, - dockerMachinePool: dmp, - labelFilters: map[string]string{dockerMachinePoolLabel: dmp.Name}, - } - - log.Info("NewNodePool got nodePoolMachineStatuses", "nodePoolMachineStatuses", nodePoolMachineStatuses) - np.nodePoolMachines = make([]NodePoolMachine, 0, len(nodePoolMachineStatuses)) - for i := range nodePoolMachineStatuses { - np.nodePoolMachines = append(np.nodePoolMachines, NodePoolMachine{ - Status: &nodePoolMachineStatuses[i], - }) - } - log.Info("nodePoolMachines init with statuses and no machine", "nodePoolMachines", np.nodePoolMachines) - - if err := np.refresh(ctx); err != nil { - return np, errors.Wrapf(err, "failed to refresh the node pool") - } - return np, nil -} - -// GetNodePoolMachineStatuses returns the node pool machines providing the information to construct DockerMachines. -func (np *NodePool) GetNodePoolMachineStatuses() []NodePoolMachineStatus { - statusList := make([]NodePoolMachineStatus, len(np.nodePoolMachines)) - for i, nodePoolMachine := range np.nodePoolMachines { - statusList[i] = *nodePoolMachine.Status - } - - return statusList -} - -// ReconcileMachines will build enough machines to satisfy the machine pool / docker machine pool spec -// eventually delete all the machine in excess, and update the status for all the machines. -// -// NOTE: The goal for the current implementation is to verify MachinePool construct; accordingly, -// currently the nodepool supports only a recreate strategy for replacing old nodes with new ones -// (all existing machines are killed before new ones are created). -// TODO: consider if to support a Rollout strategy (a more progressive node replacement). -func (np *NodePool) ReconcileMachines(ctx context.Context) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx) - desiredReplicas := int(*np.machinePool.Spec.Replicas) - - // Delete all the machines in excess (outdated machines or machines exceeding desired replica count). - machineDeleted := false - totalNumberOfMachines := 0 - // We start deleting machines from the front of the list, so we need to sort the machines prioritized for deletion to the beginning. - for _, nodePoolMachine := range np.nodePoolMachines { - machine := nodePoolMachine.Machine - totalNumberOfMachines++ - if totalNumberOfMachines > desiredReplicas || !np.isMachineMatchingInfrastructureSpec(machine) { - externalMachine, err := docker.NewMachine(ctx, np.cluster, machine.Name(), np.labelFilters) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", machine.Name()) - } - if err := externalMachine.Delete(ctx); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to delete machine %s", machine.Name()) - } - machineDeleted = true - totalNumberOfMachines-- // remove deleted machine from the count - } - } - if machineDeleted { - if err := np.refresh(ctx); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to refresh the node pool") - } - } - - // Add new machines if missing. - machineAdded := false - matchingMachineCount := len(np.machinesMatchingInfrastructureSpec()) - log.Info("MatchingMachineCount", "count", matchingMachineCount) - if matchingMachineCount < desiredReplicas { - for i := 0; i < desiredReplicas-matchingMachineCount; i++ { - if err := np.addMachine(ctx); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create a new docker machine") - } - machineAdded = true - } - } - if machineAdded { - if err := np.refresh(ctx); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to refresh the node pool") - } - } - - return ctrl.Result{}, nil -} - -// Delete will delete all of the machines in the node pool. -func (np *NodePool) Delete(ctx context.Context) error { - for _, nodePoolMachine := range np.nodePoolMachines { - machine := nodePoolMachine.Machine - externalMachine, err := docker.NewMachine(ctx, np.cluster, machine.Name(), np.labelFilters) - if err != nil { - return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", machine.Name()) - } - - if err := externalMachine.Delete(ctx); err != nil { - return errors.Wrapf(err, "failed to delete machine %s", machine.Name()) - } - } - - // Note: We can set `np.nodePoolMachines = nil` here, but it shouldn't be necessary on Delete(). - - return nil -} - -func (np *NodePool) isMachineMatchingInfrastructureSpec(machine *docker.Machine) bool { - // NOTE: With the current implementation we are checking if the machine is using a kindest/node image for the expected version, - // but not checking if the machine has the expected extra.mounts or pre.loaded images. - - semVer, err := semver.Parse(strings.TrimPrefix(*np.machinePool.Spec.Template.Spec.Version, "v")) - if err != nil { - // TODO: consider if to return an error - panic(errors.Wrap(err, "failed to parse DockerMachine version").Error()) - } - - kindMapping := kind.GetMapping(semVer, np.dockerMachinePool.Spec.Template.CustomImage) - - return machine.ContainerImage() == kindMapping.Image -} - -// machinesMatchingInfrastructureSpec returns all of the docker.Machines which match the machine pool / docker machine pool spec. -func (np *NodePool) machinesMatchingInfrastructureSpec() []*docker.Machine { - var matchingMachines []*docker.Machine - for _, nodePoolMachine := range np.nodePoolMachines { - machine := nodePoolMachine.Machine - if np.isMachineMatchingInfrastructureSpec(machine) { - matchingMachines = append(matchingMachines, machine) - } - } - - return matchingMachines -} - -// addMachine will add a new machine to the node pool and update the docker machine pool status. -func (np *NodePool) addMachine(ctx context.Context) error { - name := fmt.Sprintf("worker-%s", util.RandomString(6)) - externalMachine, err := docker.NewMachine(ctx, np.cluster, name, np.labelFilters) - if err != nil { - return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", name) - } - - // NOTE: FailureDomains don't mean much in CAPD since it's all local, but we are setting a label on - // each container, so we can check placement. - labels := map[string]string{} - for k, v := range np.labelFilters { - labels[k] = v - } - - if len(np.machinePool.Spec.FailureDomains) > 0 { - // For MachinePools placement is expected to be managed by the underlying infrastructure primitive, but - // given that there is no such an thing in CAPD, we are picking a random failure domain. - randomIndex := rand.Intn(len(np.machinePool.Spec.FailureDomains)) //nolint:gosec - for k, v := range docker.FailureDomainLabel(&np.machinePool.Spec.FailureDomains[randomIndex]) { - labels[k] = v - } - } - - if err := externalMachine.Create(ctx, np.dockerMachinePool.Spec.Template.CustomImage, constants.WorkerNodeRoleValue, np.machinePool.Spec.Template.Spec.Version, labels, np.dockerMachinePool.Spec.Template.ExtraMounts); err != nil { - return errors.Wrapf(err, "failed to create docker machine with name %s", name) - } - return nil -} - -// refresh asks docker to list all the machines matching the node pool label and updates the cached list of node pool -// machines. -func (np *NodePool) refresh(ctx context.Context) error { - log := ctrl.LoggerFrom(ctx) - log.Info("nodePoolMachines are", "nodePoolMachines", np.nodePoolMachines) - - // Construct a map of the existing nodePoolMachines so we can look up the status as it's the source of truth about the state of the docker.Machine. - nodePoolMachineStatusMap := make(map[string]*NodePoolMachineStatus, len(np.nodePoolMachines)) - for i := range np.nodePoolMachines { - var name string - // At least one of Machine or Status should be non-nil. - // When this is called in NewNodePool(), we expect Machine to be nil as we're passing in an existing list of statuses. - // When this is being called during reconciliation, we expect both Machine and Status to be non-nil. - if np.nodePoolMachines[i].Machine != nil { - name = np.nodePoolMachines[i].Machine.Name() - } else { - name = np.nodePoolMachines[i].Status.Name - } - nodePoolMachineStatusMap[name] = np.nodePoolMachines[i].Status - } - - // Update the list of machines - machines, err := docker.ListMachinesByCluster(ctx, np.cluster, np.labelFilters) - if err != nil { - return errors.Wrapf(err, "failed to list all machines in the cluster") - } - log.Info("Machines by cluster") - for _, machine := range machines { - log.Info("Machine", "name", machine.Name()) - } - - // Construct a new list of nodePoolMachines but keep the existing status. - np.nodePoolMachines = make(NodePoolMachines, 0, len(machines)) - for i := range machines { - machine := machines[i] - // makes sure no control plane machines gets selected by chance. - if !machine.IsControlPlane() { - nodePoolMachine := NodePoolMachine{ - Machine: machine, - } - if existingStatus, ok := nodePoolMachineStatusMap[machine.Name()]; ok { - nodePoolMachine.Status = existingStatus - } else { - nodePoolMachine.Status = &NodePoolMachineStatus{ - Name: nodePoolMachine.Machine.Name(), - } - } - np.nodePoolMachines = append(np.nodePoolMachines, nodePoolMachine) - } - } - - sort.Sort(np.nodePoolMachines) - - return nil -} diff --git a/test/infrastructure/docker/exp/internal/docker/nodepool_test.go b/test/infrastructure/docker/exp/internal/docker/nodepool_test.go deleted file mode 100644 index 68f09a5c97a8..000000000000 --- a/test/infrastructure/docker/exp/internal/docker/nodepool_test.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package docker implements docker functionality. -package docker - -import ( - "sort" - "testing" - - . "github.com/onsi/gomega" -) - -func TestMachineDeleteOrder(t *testing.T) { - testcases := []struct { - name string - nodePoolMachines NodePoolMachines - }{ - { - name: "no prioritized instances", - nodePoolMachines: NodePoolMachines{ - { - Status: &NodePoolMachineStatus{ - Name: "instance-1", - PrioritizeDelete: false, - }, - }, - { - Status: &NodePoolMachineStatus{ - Name: "instance-2", - PrioritizeDelete: false, - }, - }, - { - Status: &NodePoolMachineStatus{ - Name: "instance-3", - PrioritizeDelete: false, - }, - }, - { - Status: &NodePoolMachineStatus{ - Name: "instance-4", - PrioritizeDelete: false, - }, - }, - }, - }, - { - name: "sort prioritized instances to front", - nodePoolMachines: NodePoolMachines{ - { - Status: &NodePoolMachineStatus{ - Name: "instance-1", - PrioritizeDelete: false, - }, - }, - { - Status: &NodePoolMachineStatus{ - Name: "instance-2", - PrioritizeDelete: false, - }, - }, - { - Status: &NodePoolMachineStatus{ - Name: "instance-3", - PrioritizeDelete: true, - }, - }, - { - Status: &NodePoolMachineStatus{ - Name: "instance-4", - PrioritizeDelete: true, - }, - }, - }, - }, - } - - for _, tc := range testcases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - - sort.Sort(tc.nodePoolMachines) - - var prevMachine NodePoolMachine - for i, nodePoolMachine := range tc.nodePoolMachines { - if nodePoolMachine.Status.PrioritizeDelete && i > 0 { - g.Expect(prevMachine.Status.PrioritizeDelete).To(BeTrue()) - } - prevMachine = nodePoolMachine - } - }) - } -} diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 0ee89318ca9d..fc3a3008dae9 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -152,6 +152,10 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } + if _, hasDeleteAnnotation := machine.Annotations[clusterv1.DeleteMachineAnnotation]; hasDeleteAnnotation { + dockerMachine.Annotations[clusterv1.DeleteMachineAnnotation] = machine.Annotations[clusterv1.DeleteMachineAnnotation] + } + // Create a helper for managing the docker container hosting the machine. externalMachine, err := docker.NewMachine(ctx, cluster, dockerMachine.Name, nil) if err != nil {