Skip to content

Commit

Permalink
own
Browse files Browse the repository at this point in the history
  • Loading branch information
killianmuldoon committed Mar 16, 2023
1 parent a32f66e commit 585e908
Show file tree
Hide file tree
Showing 11 changed files with 212 additions and 102 deletions.
97 changes: 68 additions & 29 deletions cmd/clusterctl/client/cluster/ownergraph.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
)

// OwnerGraph contains a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship
Expand All @@ -32,22 +36,6 @@ type OwnerGraphNode struct {
Owners []metav1.OwnerReference
}

func nodeToOwnerRef(n *node, attributes ownerReferenceAttributes) metav1.OwnerReference {
ref := metav1.OwnerReference{
Name: n.identity.Name,
APIVersion: n.identity.APIVersion,
Kind: n.identity.Kind,
UID: n.identity.UID,
}
if attributes.BlockOwnerDeletion != nil {
ref.BlockOwnerDeletion = attributes.BlockOwnerDeletion
}
if attributes.Controller != nil {
ref.Controller = attributes.Controller
}
return ref
}

// GetOwnerGraph returns a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship between those objects as edges.
// NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its
// own owner references; there is no guarantee about the stability of this API. Using this test with providers may require
Expand All @@ -64,20 +52,71 @@ func GetOwnerGraph(namespace, kubeconfigPath string) (OwnerGraph, error) {
return OwnerGraph{}, errors.Wrap(err, "failed to retrieve discovery types")
}

// Discovery the object graph for the selected types:
// - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process.
// - Edges are derived by the OwnerReferences between nodes.
if err := graph.Discovery(namespace); err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to discover the object graph")
owners, err := discoverOwnerGraph(namespace, graph)
if err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to discovery ownerGraph types")
}
return owners, nil
}

func discoverOwnerGraph(namespace string, o *objectGraph) (OwnerGraph, error) {
selectors := []client.ListOption{}
if namespace != "" {
selectors = append(selectors, client.InNamespace(namespace))
}
owners := OwnerGraph{}
// Using getMoveNodes here ensures only objects that are part of the Cluster are added to the OwnerGraph.
for _, v := range graph.getMoveNodes() {
n := OwnerGraphNode{Object: v.identity, Owners: []metav1.OwnerReference{}}
for owner, attributes := range v.owners {
n.Owners = append(n.Owners, nodeToOwnerRef(owner, attributes))
ownerGraph := OwnerGraph{}

discoveryBackoff := newReadBackoff()
for _, discoveryType := range o.types {
typeMeta := discoveryType.typeMeta
objList := new(unstructured.UnstructuredList)

if err := retryWithExponentialBackoff(discoveryBackoff, func() error {
return getObjList(o.proxy, typeMeta, selectors, objList)
}); err != nil {
return nil, err
}

// if we are discovering Secrets, also secrets from the providers namespace should be included.
if discoveryType.typeMeta.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("SecretList").GroupKind() {
providers, err := o.providerInventory.List()
if err != nil {
return nil, err
}
for _, p := range providers.Items {
if p.Type == string(clusterctlv1.InfrastructureProviderType) {
providerNamespaceSelector := []client.ListOption{client.InNamespace(p.Namespace)}
providerNamespaceSecretList := new(unstructured.UnstructuredList)
if err := retryWithExponentialBackoff(discoveryBackoff, func() error {
return getObjList(o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList)
}); err != nil {
return nil, err
}
objList.Items = append(objList.Items, providerNamespaceSecretList.Items...)
}
}
}
for _, obj := range objList.Items {
// Exclude the kube-root-ca.crt ConfigMap from the owner graph.
if obj.GetKind() == "ConfigMap" && obj.GetName() == "kube-root-ca.crt" {
continue
}
ownerGraph = addNodeToOwnerGraph(ownerGraph, obj)
}
owners[string(v.identity.UID)] = n
}
return owners, nil
return ownerGraph, nil
}

func addNodeToOwnerGraph(graph OwnerGraph, obj unstructured.Unstructured) OwnerGraph {
// write code to add a node to the ownerGraph
graph[string(obj.GetUID())] = OwnerGraphNode{
Owners: obj.GetOwnerReferences(),
Object: corev1.ObjectReference{
APIVersion: obj.GetAPIVersion(),
Kind: obj.GetKind(),
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
},
}
return graph
}
28 changes: 12 additions & 16 deletions controlplane/kubeadm/internal/controllers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,22 +104,27 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context,
// Ensure the KubeadmConfigSecret has an owner reference to the control plane if it is not a user-provided secret.
func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, kcp *controlplanev1.KubeadmControlPlane) error {
log := ctrl.LoggerFrom(ctx)
controller := metav1.GetControllerOf(configSecret)

// If the Type doesn't match the CAPI-created secret type this is a no-op.
if configSecret.Type != clusterv1.ClusterSecretType {
return nil
}
// If the secret is already controlled by KCP this is a no-op.
if controller != nil && controller.Kind == "KubeadmControlPlane" {
return nil
}

log.Info("Adopting KubeConfig secret", "Secret", klog.KObj(configSecret))
patch, err := patch.NewHelper(configSecret, r.Client)
patchHelper, err := patch.NewHelper(configSecret, r.Client)
if err != nil {
return errors.Wrap(err, "failed to create patch helper for the kubeconfig secret")
}

// Remove the current controller if one exists.
controller := metav1.GetControllerOf(configSecret)
if controller != nil {
configSecret.SetOwnerReferences(util.RemoveOwnerRef(configSecret.OwnerReferences, *controller))
}

// Add the KubeadmControlPlane as the controller for this secret.
configSecret.OwnerReferences = util.EnsureOwnerRef(configSecret.OwnerReferences, *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))

// If the kubeconfig secret was created by v1alpha2 controllers, and thus it has the Cluster as the owner instead of KCP.
// In this case remove the ownerReference to the Cluster.
if util.IsOwnedByObject(configSecret, cluster) {
Expand All @@ -131,16 +136,7 @@ func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Contex
}))
}

// Remove the current controller if one exists.
if controller != nil {
configSecret.SetOwnerReferences(util.RemoveOwnerRef(configSecret.OwnerReferences, *controller))
}

// Add the KubeadmControlPlane as the controller for this secret.
configSecret.OwnerReferences = util.EnsureOwnerRef(configSecret.OwnerReferences,
*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))

if err := patch.Patch(ctx, configSecret); err != nil {
if err := patchHelper.Patch(ctx, configSecret); err != nil {
return errors.Wrap(err, "failed to patch the kubeconfig secret")
}
return nil
Expand Down
25 changes: 14 additions & 11 deletions exp/addons/internal/controllers/clusterresourceset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,12 @@ func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Conte
}()

// Ensure that the owner references are set on the ClusterResourceSetBinding.
clusterResourceSetBinding.OwnerReferences = ensureOwnerRefs(clusterResourceSetBinding, clusterResourceSet)
clusterResourceSetBinding.OwnerReferences = util.EnsureOwnerRef(clusterResourceSetBinding.GetOwnerReferences(), metav1.OwnerReference{
APIVersion: addonsv1.GroupVersion.String(),
Kind: clusterResourceSet.Kind,
Name: clusterResourceSet.Name,
UID: clusterResourceSet.UID,
})
errList := []error{}
resourceSetBinding := clusterResourceSetBinding.GetOrCreateBinding(clusterResourceSet)

Expand Down Expand Up @@ -395,21 +400,19 @@ func (r *ClusterResourceSetReconciler) getResource(ctx context.Context, resource

// ensureResourceOwnerRef adds the ClusterResourceSet as a OwnerReference to the resource.
func (r *ClusterResourceSetReconciler) ensureResourceOwnerRef(ctx context.Context, clusterResourceSet *addonsv1.ClusterResourceSet, resource *unstructured.Unstructured) error {
obj := resource.DeepCopy()
patchHelper, err := patch.NewHelper(obj, r.Client)
if err != nil {
return err
}
newRef := metav1.OwnerReference{
APIVersion: clusterResourceSet.GroupVersionKind().GroupVersion().String(),
APIVersion: addonsv1.GroupVersion.String(),
Kind: clusterResourceSet.GroupVersionKind().Kind,
Name: clusterResourceSet.GetName(),
UID: clusterResourceSet.GetUID(),
}

if !util.IsOwnedByObject(resource, clusterResourceSet) {
refs := resource.GetOwnerReferences()
patch := client.MergeFrom(resource.DeepCopy())
refs = append(refs, newRef)
resource.SetOwnerReferences(refs)
return r.Client.Patch(ctx, resource, patch)
}
return nil
obj.SetOwnerReferences(util.EnsureOwnerRef(obj.GetOwnerReferences(), newRef))
return patchHelper.Patch(ctx, obj)
}

// clusterToClusterResourceSet is mapper function that maps clusters to ClusterResourceSet.
Expand Down
24 changes: 8 additions & 16 deletions exp/addons/internal/controllers/clusterresourceset_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
utilresource "sigs.k8s.io/cluster-api/util/resource"
utilyaml "sigs.k8s.io/cluster-api/util/yaml"
)
Expand Down Expand Up @@ -121,7 +120,14 @@ func (r *ClusterResourceSetReconciler) getOrCreateClusterResourceSetBinding(ctx
}
clusterResourceSetBinding.Name = cluster.Name
clusterResourceSetBinding.Namespace = cluster.Namespace
clusterResourceSetBinding.OwnerReferences = ensureOwnerRefs(clusterResourceSetBinding, clusterResourceSet)
clusterResourceSetBinding.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: addonsv1.GroupVersion.String(),
Kind: clusterResourceSet.GroupVersionKind().Kind,
Name: clusterResourceSet.Name,
UID: clusterResourceSet.UID,
},
}
clusterResourceSetBinding.Spec.Bindings = []*addonsv1.ResourceSetBinding{}
clusterResourceSetBinding.Spec.ClusterName = cluster.Name
if err := r.Client.Create(ctx, clusterResourceSetBinding); err != nil {
Expand All @@ -137,20 +143,6 @@ func (r *ClusterResourceSetReconciler) getOrCreateClusterResourceSetBinding(ctx
return clusterResourceSetBinding, nil
}

// ensureOwnerRefs ensure ClusterResourceSet owner references are set on the ClusterResourceSetBinding.
func ensureOwnerRefs(clusterResourceSetBinding *addonsv1.ClusterResourceSetBinding, clusterResourceSet *addonsv1.ClusterResourceSet) []metav1.OwnerReference {
ownerRefs := make([]metav1.OwnerReference, len(clusterResourceSetBinding.GetOwnerReferences()))
copy(ownerRefs, clusterResourceSetBinding.GetOwnerReferences())
ownerRefs = util.EnsureOwnerRef(ownerRefs,
metav1.OwnerReference{
APIVersion: clusterResourceSet.GroupVersionKind().GroupVersion().String(),
Kind: clusterResourceSet.GroupVersionKind().Kind,
Name: clusterResourceSet.Name,
UID: clusterResourceSet.UID,
})
return ownerRefs
}

// getConfigMap retrieves any ConfigMap from the given name and namespace.
func getConfigMap(ctx context.Context, c client.Client, configmapName types.NamespacedName) (*corev1.ConfigMap, error) {
configMap := &corev1.ConfigMap{}
Expand Down
2 changes: 1 addition & 1 deletion exp/internal/controllers/machinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request)
func (r *MachinePoolReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
// Ensure the MachinePool is owned by the Cluster it belongs to.
mp.OwnerReferences = util.EnsureOwnerRef(mp.OwnerReferences, metav1.OwnerReference{
APIVersion: cluster.APIVersion,
APIVersion: clusterv1.GroupVersion.String(),
Kind: cluster.Kind,
Name: cluster.Name,
UID: cluster.UID,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,15 +206,12 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster,
md.Labels[clusterv1.ClusterNameLabel] = md.Spec.ClusterName

// Set the MachineDeployment as directly owned by the Cluster (if not already present).
if r.shouldAdopt(md) {
md.OwnerReferences = util.EnsureOwnerRef(md.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: cluster.Name,
UID: cluster.UID,
})
return ctrl.Result{}, nil
}
md.OwnerReferences = util.EnsureOwnerRef(md.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: cluster.Name,
UID: cluster.UID,
})

// Make sure to reconcile the external infrastructure reference.
if err := reconcileExternalTemplateReference(ctx, r.Client, cluster, &md.Spec.Template.Spec.InfrastructureRef); err != nil {
Expand Down Expand Up @@ -413,10 +410,6 @@ func (r *Reconciler) MachineSetToDeployments(o client.Object) []ctrl.Request {
return result
}

func (r *Reconciler) shouldAdopt(md *clusterv1.MachineDeployment) bool {
return !util.HasOwner(md.OwnerReferences, clusterv1.GroupVersion.String(), []string{"Cluster"})
}

func reconcileExternalTemplateReference(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, ref *corev1.ObjectReference) error {
if !strings.HasSuffix(ref.Kind, clusterv1.TemplateSuffix) {
return nil
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/clusterctl_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ type ClusterctlUpgradeSpecInput struct {
ControlPlaneWaiters clusterctl.ControlPlaneWaiters
PreInit func(managementClusterProxy framework.ClusterProxy)
PreUpgrade func(managementClusterProxy framework.ClusterProxy)
PostUpgrade func(managementClusterProxy framework.ClusterProxy)
PostUpgrade func(managementClusterProxy framework.ClusterProxy, namespace, clusterName string)
// PreCleanupManagementCluster hook can be used for extra steps that might be required from providers, for example, remove conflicting service (such as DHCP) running on
// the target management cluster and run it on bootstrap (before the latter resumes LCM) if both clusters share the same LAN
PreCleanupManagementCluster func(managementClusterProxy framework.ClusterProxy)
Expand Down Expand Up @@ -432,7 +432,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg

if input.PostUpgrade != nil {
By("Running Post-upgrade steps against the management cluster")
input.PostUpgrade(managementClusterProxy)
input.PostUpgrade(managementClusterProxy, testNamespace.Name, managementClusterName)
}

// After the upgrade check that there were no unexpected rollouts.
Expand Down
Loading

0 comments on commit 585e908

Please sign in to comment.