Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Continue updating deployment manifests when uninstalling #226

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 107 additions & 2 deletions pkg/addonmanager/controllers/agentdeploy/default_hook_sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,35 @@ import (
workapiv1 "open-cluster-management.io/api/work/v1"
)

func getDeployWork() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
fmt.Sprintf("%s-%d", constants.DeployWorkNamePrefix("test"), 0),
"cluster1",
addontesting.NewUnstructured("v1", "ConfigMap", "default", "test"),
)
work.Labels = map[string]string{
addonapiv1alpha1.AddonLabelKey: "test",
}

pTrue := true

work.SetOwnerReferences([]metav1.OwnerReference{
{
APIVersion: "addon.open-cluster-management.io/v1alpha1",
Kind: "ManagedClusterAddOn",
Name: "test",
UID: "",
Controller: &pTrue,
BlockOwnerDeletion: &pTrue,
},
})
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{}
work.Status.Conditions = []metav1.Condition{}
work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{}

return work
}

func TestDefaultHookReconcile(t *testing.T) {
cases := []struct {
name string
Expand Down Expand Up @@ -79,8 +108,9 @@ func TestDefaultHookReconcile(t *testing.T) {
cluster: []runtime.Object{addontesting.NewManagedCluster("cluster1")},
testaddon: &testAgent{name: "test", objects: []runtime.Object{
addontesting.NewUnstructured("v1", "ConfigMap", "default", "test"),
addontesting.NewHookJob("default", "test")}},
existingWork: []runtime.Object{},
addontesting.NewHookJob("default", "test"),
}},
existingWork: []runtime.Object{getDeployWork()},
validateWorkActions: func(t *testing.T, actions []clienttesting.Action) {
addontesting.AssertActions(t, actions, "create")
actual := actions[0].(clienttesting.CreateActionImpl).Object
Expand All @@ -102,6 +132,80 @@ func TestDefaultHookReconcile(t *testing.T) {
}
},
},
{
name: "deploy hook manifest for a deleting addon with finalizer, not completed, updated deploy work",
key: "cluster1/test",
addon: []runtime.Object{
func() runtime.Object {
addon := addontesting.NewAddonWithConditions("test", "cluster1", registrationAppliedCondition)
addon.SetFinalizers([]string{addonapiv1alpha1.AddonPreDeleteHookFinalizer})
addon.DeletionTimestamp = &metav1.Time{Time: time.Now()}
return addon
}(),
},
cluster: []runtime.Object{addontesting.NewManagedCluster("cluster1")},
testaddon: &testAgent{name: "test", objects: []runtime.Object{
addontesting.NewUnstructured("v1", "ConfigMap", "default", "test"),
addontesting.NewHookJob("default", "test"),
}},
existingWork: []runtime.Object{
func() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
fmt.Sprintf("%s-%d", constants.DeployWorkNamePrefix("test"), 0),
"cluster1",
// Setting the wrong name so that the ManifestWork is patched to account for the deployment
// work change during predelete hook.
addontesting.NewUnstructured("v1", "ConfigMap", "default", "test2"),
)
work.Labels = map[string]string{
addonapiv1alpha1.AddonLabelKey: "test",
}

pTrue := true

work.SetOwnerReferences([]metav1.OwnerReference{
{
APIVersion: "addon.open-cluster-management.io/v1alpha1",
Kind: "ManagedClusterAddOn",
Name: "test",
UID: "",
Controller: &pTrue,
BlockOwnerDeletion: &pTrue,
},
})
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{}
work.Status.Conditions = []metav1.Condition{}
work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{}

return work
}(),
},
validateWorkActions: func(t *testing.T, actions []clienttesting.Action) {
addontesting.AssertActions(t, actions, "patch", "create")
actual := actions[0].(clienttesting.PatchActionImpl)
if actual.Namespace != "cluster1" || actual.Name != fmt.Sprintf("%s-%d", constants.DeployWorkNamePrefix("test"), 0) {
t.Errorf("the deployWork %v/%v is incorrect.", actual.Namespace, actual.Name)
}

actual2 := actions[1].(clienttesting.CreateActionImpl).Object
hookWork := actual2.(*workapiv1.ManifestWork)
if hookWork.Namespace != "cluster1" || hookWork.Name != constants.PreDeleteHookWorkName("test") {
t.Errorf("the hookWork %v/%v is not the hook job.", hookWork.Namespace, hookWork.Name)
}
},
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
addontesting.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchActionImpl).Patch
addOn := &addonapiv1alpha1.ManagedClusterAddOn{}
err := json.Unmarshal(patch, addOn)
if err != nil {
t.Fatal(err)
}
if !meta.IsStatusConditionFalse(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted) {
t.Errorf("HookManifestCompleted condition should be false,but got true.")
}
},
},
{
name: "deploy hook manifest for a deleting addon with finalizer, completed",
key: "cluster1/test",
Expand All @@ -117,6 +221,7 @@ func TestDefaultHookReconcile(t *testing.T) {
addontesting.NewUnstructured("v1", "ConfigMap", "default", "test"),
addontesting.NewHookJob("test", "default")}},
existingWork: []runtime.Object{
getDeployWork(),
func() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
constants.PreDeleteHookWorkName("test"),
Expand Down
18 changes: 11 additions & 7 deletions pkg/addonmanager/controllers/agentdeploy/default_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,18 @@ func (s *defaultSyncer) sync(ctx context.Context,

var errs []error

if !addon.DeletionTimestamp.IsZero() {
return addon, nil
}
// Don't skip syncing if the addon is deleting and there is a predelete hook, since the deployment manifests may
// need to be updated during the uninstall.
if !addonHasFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) {
if !addon.DeletionTimestamp.IsZero() {
return addon, nil
}

// waiting for the addon to be deleted when cluster is deleting.
// TODO: consider to delete addon in this scenario.
if !cluster.DeletionTimestamp.IsZero() {
return addon, nil
// waiting for the addon to be deleted when cluster is deleting.
// TODO: consider to delete addon in this scenario.
if !cluster.DeletionTimestamp.IsZero() {
return addon, nil
}
}

currentWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package agentdeploy
import (
"context"
"encoding/json"
"fmt"
"testing"
"time"

Expand All @@ -28,6 +29,23 @@ import (
workapiv1 "open-cluster-management.io/api/work/v1"
)

func getHostedDeployWork() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
fmt.Sprintf("%s-%d", constants.DeployHostingWorkNamePrefix("cluster1", "test"), 0),
"cluster2",
addontesting.NewHostingUnstructured("v1", "ConfigMap", "default", "test"),
)
work.Labels = map[string]string{
addonapiv1alpha1.AddonLabelKey: "test",
addonapiv1alpha1.AddonNamespaceLabelKey: "cluster1",
}
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{}
work.Status.Conditions = []metav1.Condition{}
work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{}

return work
}

func TestHostingHookReconcile(t *testing.T) {
cases := []struct {
name string
Expand Down Expand Up @@ -106,7 +124,7 @@ func TestHostingHookReconcile(t *testing.T) {
addontesting.NewHostingUnstructured("v1", "ConfigMap", "default", "test"),
addontesting.NewHostedHookJob("test", "default"),
}},
existingWork: []runtime.Object{},
existingWork: []runtime.Object{getHostedDeployWork()},
validateWorkActions: func(t *testing.T, actions []clienttesting.Action) {
// hosted sync deploy the hook work in the hosting cluster ns
addontesting.AssertActions(t, actions, "create")
Expand All @@ -129,6 +147,72 @@ func TestHostingHookReconcile(t *testing.T) {
}
},
},
{
name: "deploy hook manifest for a deleting addon with finalizer, not completed, updated deployment",
key: "cluster1/test",
addon: []runtime.Object{
addontesting.SetAddonFinalizers(
addontesting.SetAddonDeletionTimestamp(
addontesting.NewHostedModeAddon("test", "cluster1", "cluster2",
registrationAppliedCondition), time.Now()),
addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer, addonapiv1alpha1.AddonHostingManifestFinalizer),
},
cluster: []runtime.Object{
addontesting.NewManagedCluster("cluster1"),
addontesting.NewManagedCluster("cluster2"),
},
testaddon: &testHostedAgent{name: "test", objects: []runtime.Object{
addontesting.NewHostingUnstructured("v1", "ConfigMap", "default", "test"),
addontesting.NewHostedHookJob("test", "default"),
}},
existingWork: []runtime.Object{
func() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
fmt.Sprintf("%s-%d", constants.DeployHostingWorkNamePrefix("cluster1", "test"), 0),
"cluster2",
// Setting the wrong name so that the ManifestWork is patched to account for the deployment
// work change during predelete hook.
addontesting.NewHostingUnstructured("v1", "ConfigMap", "default", "test2"),
)
work.Labels = map[string]string{
addonapiv1alpha1.AddonLabelKey: "test",
addonapiv1alpha1.AddonNamespaceLabelKey: "cluster1",
}
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{}
work.Status.Conditions = []metav1.Condition{}
work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{}
return work
}(),
},
validateWorkActions: func(t *testing.T, actions []clienttesting.Action) {
// hosted sync deploy the hook work in the hosting cluster ns
addontesting.AssertActions(t, actions, "patch", "create")
actual := actions[0].(clienttesting.PatchActionImpl)
expectedDeployWorkName := fmt.Sprintf("%s-%v", constants.DeployHostingWorkNamePrefix("cluster1", "test"), 0)

if actual.Namespace != "cluster2" || actual.Name != expectedDeployWorkName {
t.Errorf("the deployWork %v/%v is not the deploy job.", actual.Namespace, actual.Name)
}

actual2 := actions[1].(clienttesting.CreateActionImpl).Object
hookWork := actual2.(*workapiv1.ManifestWork)
if hookWork.Namespace != "cluster2" || hookWork.Name != constants.PreDeleteHookHostingWorkName("cluster1", "test") {
t.Errorf("the hookWork %v/%v is not the hook job.", hookWork.Namespace, hookWork.Name)
}
},
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
addontesting.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchActionImpl).Patch
addOn := &addonapiv1alpha1.ManagedClusterAddOn{}
err := json.Unmarshal(patch, addOn)
if err != nil {
t.Fatal(err)
}
if !meta.IsStatusConditionFalse(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted) {
t.Errorf("HookManifestCompleted condition should be false,but got true.")
}
},
},
{
name: "deploy hook manifest for a deleting addon with 2 finalizer, completed",
key: "cluster1/test",
Expand All @@ -148,6 +232,7 @@ func TestHostingHookReconcile(t *testing.T) {
addontesting.NewHostedHookJob("test", "default"),
}},
existingWork: []runtime.Object{
getHostedDeployWork(),
func() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
constants.PreDeleteHookHostingWorkName("cluster1", "test"),
Expand Down Expand Up @@ -246,6 +331,7 @@ func TestHostingHookReconcile(t *testing.T) {
addontesting.NewHostedHookJob("test", "default"),
}},
existingWork: []runtime.Object{
getHostedDeployWork(),
func() *workapiv1.ManifestWork {
work := addontesting.NewManifestWork(
constants.PreDeleteHookHostingWorkName("cluster1", "test"),
Expand Down
37 changes: 18 additions & 19 deletions pkg/addonmanager/controllers/agentdeploy/hosted_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,37 +80,36 @@ func (s *hostedSyncer) sync(ctx context.Context,
Message: fmt.Sprintf("hosting cluster %s is a managed cluster of the hub", hostingClusterName),
})

if !hostingCluster.DeletionTimestamp.IsZero() {
if err = s.cleanupDeployWork(ctx, addon); err != nil {
return addon, err
// Don't skip syncing if the addon is deleting and there is a predelete hook, since the deployment manifests may
// need to be updated during the uninstall.
if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) {
if !hostingCluster.DeletionTimestamp.IsZero() {
if err = s.cleanupDeployWork(ctx, addon); err != nil {
return addon, err
}
addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer)
return addon, nil
}
addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer)
return addon, nil
}

if !addon.DeletionTimestamp.IsZero() {
// clean up the deploy work until the hook work is completed
if addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) {
if !addon.DeletionTimestamp.IsZero() {
if err = s.cleanupDeployWork(ctx, addon); err != nil {
return addon, err
}
addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer)
return addon, nil
}

if err = s.cleanupDeployWork(ctx, addon); err != nil {
return addon, err
// waiting for the addon to be deleted when cluster is deleting.
// TODO: consider to delete addon in this scenario.
if !cluster.DeletionTimestamp.IsZero() {
return addon, nil
}
addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer)
return addon, nil
}

if addonAddFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) {
return addon, nil
}

// waiting for the addon to be deleted when cluster is deleting.
// TODO: consider to delete addon in this scenario.
if !cluster.DeletionTimestamp.IsZero() {
return addon, nil
}

currentWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace)
if err != nil {
return addon, err
Expand Down
Loading