From 29164eebf47cc7412092195f1baae217fdd6cdf6 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:57:51 +0300 Subject: [PATCH] Move addons related tasks to actions package (#7077) * Move addons related tasks to actions package * improve error message displaying format Co-authored-by: Amine * improve error message displaying format (2.0) Co-authored-by: Amine --------- Co-authored-by: Amine --- pkg/actions/addon/addon.go | 19 +++- pkg/actions/addon/delete.go | 33 ++++++ pkg/actions/addon/tasks.go | 26 +++++ pkg/actions/cluster/owned.go | 4 +- pkg/actions/cluster/unowned.go | 3 +- pkg/cfn/manager/delete_tasks.go | 50 +++------ pkg/cfn/manager/fakes/fake_stack_manager.go | 115 +++----------------- pkg/cfn/manager/interface.go | 3 +- 8 files changed, 115 insertions(+), 138 deletions(-) diff --git a/pkg/actions/addon/addon.go b/pkg/actions/addon/addon.go index 8c305e326a..006fce1a92 100644 --- a/pkg/actions/addon/addon.go +++ b/pkg/actions/addon/addon.go @@ -7,6 +7,7 @@ import ( "strings" "time" + cfntypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/go-version" "github.com/kris-nova/logger" @@ -14,20 +15,34 @@ import ( api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/awsapi" + "github.com/weaveworks/eksctl/pkg/cfn/builder" "github.com/weaveworks/eksctl/pkg/cfn/manager" iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc" ) +// StackManager manages CloudFormation stacks for addons. +// +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//counterfeiter:generate -o fakes/fake_stack_manager.go . StackManager +type StackManager interface { + CreateStack(ctx context.Context, name string, stack builder.ResourceSetReader, tags, parameters map[string]string, errs chan error) error + DeleteStackBySpec(ctx context.Context, s *cfntypes.Stack) (*cfntypes.Stack, error) + DeleteStackBySpecSync(ctx context.Context, s *cfntypes.Stack, errs chan error) error + DescribeStack(ctx context.Context, i *cfntypes.Stack) (*cfntypes.Stack, error) + GetIAMAddonsStacks(ctx context.Context) ([]*cfntypes.Stack, error) + UpdateStack(ctx context.Context, options manager.UpdateStackOptions) error +} + type Manager struct { clusterConfig *api.ClusterConfig eksAPI awsapi.EKS withOIDC bool oidcManager *iamoidc.OpenIDConnectManager - stackManager manager.StackManager + stackManager StackManager clientSet kubeclient.Interface } -func New(clusterConfig *api.ClusterConfig, eksAPI awsapi.EKS, stackManager manager.StackManager, withOIDC bool, oidcManager *iamoidc.OpenIDConnectManager, clientSet kubeclient.Interface) (*Manager, error) { +func New(clusterConfig *api.ClusterConfig, eksAPI awsapi.EKS, stackManager StackManager, withOIDC bool, oidcManager *iamoidc.OpenIDConnectManager, clientSet kubeclient.Interface) (*Manager, error) { return &Manager{ clusterConfig: clusterConfig, eksAPI: eksAPI, diff --git a/pkg/actions/addon/delete.go b/pkg/actions/addon/delete.go index 245ab3bd02..9ef44acb27 100644 --- a/pkg/actions/addon/delete.go +++ b/pkg/actions/addon/delete.go @@ -12,6 +12,7 @@ import ( api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/cfn/manager" + "github.com/weaveworks/eksctl/pkg/utils/tasks" ) func (a *Manager) DeleteWithPreserve(ctx context.Context, addon *api.Addon) error { @@ -69,3 +70,35 @@ func (a *Manager) deleteAddon(ctx context.Context, addon *api.Addon, preserve bo } return true, nil } + +type Remover struct { + stackManager StackManager +} + +func NewRemover(stackManager StackManager) *Remover { + return &Remover{ + stackManager: stackManager, + } +} + +func (ar *Remover) DeleteAddonIAMTasks(ctx context.Context, wait bool) (*tasks.TaskTree, error) { + stacks, err := ar.stackManager.GetIAMAddonsStacks(ctx) + if err != nil { + return nil, err + } + taskTree := &tasks.TaskTree{Parallel: true} + for _, s := range stacks { + deleteStackTasks := &tasks.TaskTree{ + Parallel: false, + IsSubTask: true, + } + deleteStackTasks.Append(&deleteAddonIAMTask{ + ctx: ctx, + info: fmt.Sprintf("deleting addon IAM %q", *s.StackName), + stack: s, + stackManager: ar.stackManager, + wait: wait, + }) + } + return taskTree, nil +} diff --git a/pkg/actions/addon/tasks.go b/pkg/actions/addon/tasks.go index bcf5ff0d4b..07ceacc41e 100644 --- a/pkg/actions/addon/tasks.go +++ b/pkg/actions/addon/tasks.go @@ -2,9 +2,11 @@ package addon import ( "context" + "fmt" "strings" "time" + cfntypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/utils/tasks" @@ -105,3 +107,27 @@ func (t *createAddonTask) Do(errorCh chan error) error { }() return nil } + +type deleteAddonIAMTask struct { + ctx context.Context + info string + stack *cfntypes.Stack + stackManager StackManager + wait bool +} + +func (t *deleteAddonIAMTask) Describe() string { return t.info } + +func (t *deleteAddonIAMTask) Do(errorCh chan error) error { + errMsg := fmt.Sprintf("deleting addon IAM %q", *t.stack.StackName) + if t.wait { + if err := t.stackManager.DeleteStackBySpecSync(t.ctx, t.stack, errorCh); err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + return nil + } + if _, err := t.stackManager.DeleteStackBySpec(t.ctx, t.stack); err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + return nil +} diff --git a/pkg/actions/cluster/owned.go b/pkg/actions/cluster/owned.go index 205f5896de..6e996c8640 100644 --- a/pkg/actions/cluster/owned.go +++ b/pkg/actions/cluster/owned.go @@ -9,6 +9,7 @@ import ( "github.com/kris-nova/logger" + "github.com/weaveworks/eksctl/pkg/actions/addon" "github.com/weaveworks/eksctl/pkg/actions/nodegroup" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/cfn/manager" @@ -119,7 +120,8 @@ func (c *OwnedCluster) Delete(ctx context.Context, _, podEvictionWaitPeriod time newOIDCManager := func() (*iamoidc.OpenIDConnectManager, error) { return c.ctl.NewOpenIDConnectManager(ctx, c.cfg) } - tasks, err := c.stackManager.NewTasksToDeleteClusterWithNodeGroups(ctx, c.clusterStack, allStacks, clusterOperable, newOIDCManager, c.ctl.Status.ClusterInfo.Cluster, kubernetes.NewCachedClientSet(clientSet), wait, force, func(errs chan error, _ string) error { + newTasksToDeleteAddonIAM := addon.NewRemover(c.stackManager).DeleteAddonIAMTasks + tasks, err := c.stackManager.NewTasksToDeleteClusterWithNodeGroups(ctx, c.clusterStack, allStacks, clusterOperable, newOIDCManager, newTasksToDeleteAddonIAM, c.ctl.Status.ClusterInfo.Cluster, kubernetes.NewCachedClientSet(clientSet), wait, force, func(errs chan error, _ string) error { logger.Info("trying to cleanup dangling network interfaces") stack, err := c.stackManager.DescribeClusterStack(ctx) if err != nil { diff --git a/pkg/actions/cluster/unowned.go b/pkg/actions/cluster/unowned.go index a231d38a78..a998c4a92e 100644 --- a/pkg/actions/cluster/unowned.go +++ b/pkg/actions/cluster/unowned.go @@ -11,6 +11,7 @@ import ( "github.com/kris-nova/logger" "github.com/pkg/errors" + "github.com/weaveworks/eksctl/pkg/actions/addon" "github.com/weaveworks/eksctl/pkg/actions/nodegroup" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/cfn/manager" @@ -184,7 +185,7 @@ func (c *UnownedCluster) deleteIAMAndOIDC(ctx context.Context, wait bool, cluste } } - deleteAddonIAMTasks, err := c.stackManager.NewTaskToDeleteAddonIAM(ctx, wait) + deleteAddonIAMTasks, err := addon.NewRemover(c.stackManager).DeleteAddonIAMTasks(ctx, wait) if err != nil { return err } diff --git a/pkg/cfn/manager/delete_tasks.go b/pkg/cfn/manager/delete_tasks.go index 3db515b2d2..303e53f86a 100644 --- a/pkg/cfn/manager/delete_tasks.go +++ b/pkg/cfn/manager/delete_tasks.go @@ -26,8 +26,21 @@ func deleteAll(_ string) bool { return true } type NewOIDCManager func() (*iamoidc.OpenIDConnectManager, error) +// NewTasksToDeleteAddonIAM temporary type, to be removed after moving NewTasksToDeleteClusterWithNodeGroups to actions package +type NewTasksToDeleteAddonIAM func(ctx context.Context, wait bool) (*tasks.TaskTree, error) + // NewTasksToDeleteClusterWithNodeGroups defines tasks required to delete the given cluster along with all of its resources -func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups(ctx context.Context, clusterStack *Stack, nodeGroupStacks []NodeGroupStack, clusterOperable bool, newOIDCManager NewOIDCManager, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, wait, force bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) { +func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups( + ctx context.Context, + clusterStack *Stack, + nodeGroupStacks []NodeGroupStack, + clusterOperable bool, + newOIDCManager NewOIDCManager, + newTasksToDeleteAddonIAM NewTasksToDeleteAddonIAM, + cluster *ekstypes.Cluster, + clientSetGetter kubernetes.ClientSetGetter, + wait, force bool, + cleanup func(chan error, string) error) (*tasks.TaskTree, error) { taskTree := &tasks.TaskTree{Parallel: false} nodeGroupTasks, err := c.NewTasksToDeleteNodeGroups(nodeGroupStacks, deleteAll, true, cleanup) @@ -52,7 +65,7 @@ func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups(ctx context.Cont } } - deleteAddonIAMTasks, err := c.NewTaskToDeleteAddonIAM(ctx, wait) + deleteAddonIAMTasks, err := newTasksToDeleteAddonIAM(ctx, wait) if err != nil { return nil, err } @@ -323,39 +336,6 @@ func stacksToServiceAccountMap(stacks []*types.Stack) map[string]*types.Stack { return stackMap } -// NewTaskToDeleteAddonIAM defines tasks required to delete all of the addons -func (c *StackCollection) NewTaskToDeleteAddonIAM(ctx context.Context, wait bool) (*tasks.TaskTree, error) { - stacks, err := c.GetIAMAddonsStacks(ctx) - if err != nil { - return nil, err - } - taskTree := &tasks.TaskTree{Parallel: true} - for _, s := range stacks { - info := fmt.Sprintf("delete addon IAM %q", *s.StackName) - - deleteStackTasks := &tasks.TaskTree{ - Parallel: false, - IsSubTask: true, - } - if wait { - deleteStackTasks.Append(&taskWithStackSpec{ - info: info, - stack: s, - call: c.DeleteStackBySpecSync, - }) - } else { - deleteStackTasks.Append(&asyncTaskWithStackSpec{ - info: info, - stack: s, - call: c.DeleteStackBySpec, - }) - } - taskTree.Append(deleteStackTasks) - } - return taskTree, nil - -} - func clusterHasOIDCProvider(cluster *ekstypes.Cluster) (hasOIDC bool, found bool) { for k, v := range cluster.Tags { if k == api.ClusterOIDCEnabledTag { diff --git a/pkg/cfn/manager/fakes/fake_stack_manager.go b/pkg/cfn/manager/fakes/fake_stack_manager.go index 98204c742c..4c95b38e23 100644 --- a/pkg/cfn/manager/fakes/fake_stack_manager.go +++ b/pkg/cfn/manager/fakes/fake_stack_manager.go @@ -606,20 +606,6 @@ type FakeStackManager struct { newManagedNodeGroupTaskReturnsOnCall map[int]struct { result1 *tasks.TaskTree } - NewTaskToDeleteAddonIAMStub func(context.Context, bool) (*tasks.TaskTree, error) - newTaskToDeleteAddonIAMMutex sync.RWMutex - newTaskToDeleteAddonIAMArgsForCall []struct { - arg1 context.Context - arg2 bool - } - newTaskToDeleteAddonIAMReturns struct { - result1 *tasks.TaskTree - result2 error - } - newTaskToDeleteAddonIAMReturnsOnCall map[int]struct { - result1 *tasks.TaskTree - result2 error - } NewTaskToDeleteUnownedNodeGroupStub func(context.Context, string, string, awsapi.EKS, *manager.DeleteWaitCondition) tasks.Task newTaskToDeleteUnownedNodeGroupMutex sync.RWMutex newTaskToDeleteUnownedNodeGroupArgsForCall []struct { @@ -662,7 +648,7 @@ type FakeStackManager struct { newTasksToCreateIAMServiceAccountsReturnsOnCall map[int]struct { result1 *tasks.TaskTree } - NewTasksToDeleteClusterWithNodeGroupsStub func(context.Context, *types.Stack, []manager.NodeGroupStack, bool, manager.NewOIDCManager, *typesc.Cluster, kubernetes.ClientSetGetter, bool, bool, func(chan error, string) error) (*tasks.TaskTree, error) + NewTasksToDeleteClusterWithNodeGroupsStub func(context.Context, *types.Stack, []manager.NodeGroupStack, bool, manager.NewOIDCManager, manager.NewTasksToDeleteAddonIAM, *typesc.Cluster, kubernetes.ClientSetGetter, bool, bool, func(chan error, string) error) (*tasks.TaskTree, error) newTasksToDeleteClusterWithNodeGroupsMutex sync.RWMutex newTasksToDeleteClusterWithNodeGroupsArgsForCall []struct { arg1 context.Context @@ -670,11 +656,12 @@ type FakeStackManager struct { arg3 []manager.NodeGroupStack arg4 bool arg5 manager.NewOIDCManager - arg6 *typesc.Cluster - arg7 kubernetes.ClientSetGetter - arg8 bool + arg6 manager.NewTasksToDeleteAddonIAM + arg7 *typesc.Cluster + arg8 kubernetes.ClientSetGetter arg9 bool - arg10 func(chan error, string) error + arg10 bool + arg11 func(chan error, string) error } newTasksToDeleteClusterWithNodeGroupsReturns struct { result1 *tasks.TaskTree @@ -3634,71 +3621,6 @@ func (fake *FakeStackManager) NewManagedNodeGroupTaskReturnsOnCall(i int, result }{result1} } -func (fake *FakeStackManager) NewTaskToDeleteAddonIAM(arg1 context.Context, arg2 bool) (*tasks.TaskTree, error) { - fake.newTaskToDeleteAddonIAMMutex.Lock() - ret, specificReturn := fake.newTaskToDeleteAddonIAMReturnsOnCall[len(fake.newTaskToDeleteAddonIAMArgsForCall)] - fake.newTaskToDeleteAddonIAMArgsForCall = append(fake.newTaskToDeleteAddonIAMArgsForCall, struct { - arg1 context.Context - arg2 bool - }{arg1, arg2}) - stub := fake.NewTaskToDeleteAddonIAMStub - fakeReturns := fake.newTaskToDeleteAddonIAMReturns - fake.recordInvocation("NewTaskToDeleteAddonIAM", []interface{}{arg1, arg2}) - fake.newTaskToDeleteAddonIAMMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeStackManager) NewTaskToDeleteAddonIAMCallCount() int { - fake.newTaskToDeleteAddonIAMMutex.RLock() - defer fake.newTaskToDeleteAddonIAMMutex.RUnlock() - return len(fake.newTaskToDeleteAddonIAMArgsForCall) -} - -func (fake *FakeStackManager) NewTaskToDeleteAddonIAMCalls(stub func(context.Context, bool) (*tasks.TaskTree, error)) { - fake.newTaskToDeleteAddonIAMMutex.Lock() - defer fake.newTaskToDeleteAddonIAMMutex.Unlock() - fake.NewTaskToDeleteAddonIAMStub = stub -} - -func (fake *FakeStackManager) NewTaskToDeleteAddonIAMArgsForCall(i int) (context.Context, bool) { - fake.newTaskToDeleteAddonIAMMutex.RLock() - defer fake.newTaskToDeleteAddonIAMMutex.RUnlock() - argsForCall := fake.newTaskToDeleteAddonIAMArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeStackManager) NewTaskToDeleteAddonIAMReturns(result1 *tasks.TaskTree, result2 error) { - fake.newTaskToDeleteAddonIAMMutex.Lock() - defer fake.newTaskToDeleteAddonIAMMutex.Unlock() - fake.NewTaskToDeleteAddonIAMStub = nil - fake.newTaskToDeleteAddonIAMReturns = struct { - result1 *tasks.TaskTree - result2 error - }{result1, result2} -} - -func (fake *FakeStackManager) NewTaskToDeleteAddonIAMReturnsOnCall(i int, result1 *tasks.TaskTree, result2 error) { - fake.newTaskToDeleteAddonIAMMutex.Lock() - defer fake.newTaskToDeleteAddonIAMMutex.Unlock() - fake.NewTaskToDeleteAddonIAMStub = nil - if fake.newTaskToDeleteAddonIAMReturnsOnCall == nil { - fake.newTaskToDeleteAddonIAMReturnsOnCall = make(map[int]struct { - result1 *tasks.TaskTree - result2 error - }) - } - fake.newTaskToDeleteAddonIAMReturnsOnCall[i] = struct { - result1 *tasks.TaskTree - result2 error - }{result1, result2} -} - func (fake *FakeStackManager) NewTaskToDeleteUnownedNodeGroup(arg1 context.Context, arg2 string, arg3 string, arg4 awsapi.EKS, arg5 *manager.DeleteWaitCondition) tasks.Task { fake.newTaskToDeleteUnownedNodeGroupMutex.Lock() ret, specificReturn := fake.newTaskToDeleteUnownedNodeGroupReturnsOnCall[len(fake.newTaskToDeleteUnownedNodeGroupArgsForCall)] @@ -3906,7 +3828,7 @@ func (fake *FakeStackManager) NewTasksToCreateIAMServiceAccountsReturnsOnCall(i }{result1} } -func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroups(arg1 context.Context, arg2 *types.Stack, arg3 []manager.NodeGroupStack, arg4 bool, arg5 manager.NewOIDCManager, arg6 *typesc.Cluster, arg7 kubernetes.ClientSetGetter, arg8 bool, arg9 bool, arg10 func(chan error, string) error) (*tasks.TaskTree, error) { +func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroups(arg1 context.Context, arg2 *types.Stack, arg3 []manager.NodeGroupStack, arg4 bool, arg5 manager.NewOIDCManager, arg6 manager.NewTasksToDeleteAddonIAM, arg7 *typesc.Cluster, arg8 kubernetes.ClientSetGetter, arg9 bool, arg10 bool, arg11 func(chan error, string) error) (*tasks.TaskTree, error) { var arg3Copy []manager.NodeGroupStack if arg3 != nil { arg3Copy = make([]manager.NodeGroupStack, len(arg3)) @@ -3920,18 +3842,19 @@ func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroups(arg1 context arg3 []manager.NodeGroupStack arg4 bool arg5 manager.NewOIDCManager - arg6 *typesc.Cluster - arg7 kubernetes.ClientSetGetter - arg8 bool + arg6 manager.NewTasksToDeleteAddonIAM + arg7 *typesc.Cluster + arg8 kubernetes.ClientSetGetter arg9 bool - arg10 func(chan error, string) error - }{arg1, arg2, arg3Copy, arg4, arg5, arg6, arg7, arg8, arg9, arg10}) + arg10 bool + arg11 func(chan error, string) error + }{arg1, arg2, arg3Copy, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11}) stub := fake.NewTasksToDeleteClusterWithNodeGroupsStub fakeReturns := fake.newTasksToDeleteClusterWithNodeGroupsReturns - fake.recordInvocation("NewTasksToDeleteClusterWithNodeGroups", []interface{}{arg1, arg2, arg3Copy, arg4, arg5, arg6, arg7, arg8, arg9, arg10}) + fake.recordInvocation("NewTasksToDeleteClusterWithNodeGroups", []interface{}{arg1, arg2, arg3Copy, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11}) fake.newTasksToDeleteClusterWithNodeGroupsMutex.Unlock() if stub != nil { - return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) } if specificReturn { return ret.result1, ret.result2 @@ -3945,17 +3868,17 @@ func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroupsCallCount() i return len(fake.newTasksToDeleteClusterWithNodeGroupsArgsForCall) } -func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroupsCalls(stub func(context.Context, *types.Stack, []manager.NodeGroupStack, bool, manager.NewOIDCManager, *typesc.Cluster, kubernetes.ClientSetGetter, bool, bool, func(chan error, string) error) (*tasks.TaskTree, error)) { +func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroupsCalls(stub func(context.Context, *types.Stack, []manager.NodeGroupStack, bool, manager.NewOIDCManager, manager.NewTasksToDeleteAddonIAM, *typesc.Cluster, kubernetes.ClientSetGetter, bool, bool, func(chan error, string) error) (*tasks.TaskTree, error)) { fake.newTasksToDeleteClusterWithNodeGroupsMutex.Lock() defer fake.newTasksToDeleteClusterWithNodeGroupsMutex.Unlock() fake.NewTasksToDeleteClusterWithNodeGroupsStub = stub } -func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroupsArgsForCall(i int) (context.Context, *types.Stack, []manager.NodeGroupStack, bool, manager.NewOIDCManager, *typesc.Cluster, kubernetes.ClientSetGetter, bool, bool, func(chan error, string) error) { +func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroupsArgsForCall(i int) (context.Context, *types.Stack, []manager.NodeGroupStack, bool, manager.NewOIDCManager, manager.NewTasksToDeleteAddonIAM, *typesc.Cluster, kubernetes.ClientSetGetter, bool, bool, func(chan error, string) error) { fake.newTasksToDeleteClusterWithNodeGroupsMutex.RLock() defer fake.newTasksToDeleteClusterWithNodeGroupsMutex.RUnlock() argsForCall := fake.newTasksToDeleteClusterWithNodeGroupsArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7, argsForCall.arg8, argsForCall.arg9, argsForCall.arg10 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7, argsForCall.arg8, argsForCall.arg9, argsForCall.arg10, argsForCall.arg11 } func (fake *FakeStackManager) NewTasksToDeleteClusterWithNodeGroupsReturns(result1 *tasks.TaskTree, result2 error) { @@ -4674,8 +4597,6 @@ func (fake *FakeStackManager) Invocations() map[string][][]interface{} { defer fake.makeClusterStackNameMutex.RUnlock() fake.newManagedNodeGroupTaskMutex.RLock() defer fake.newManagedNodeGroupTaskMutex.RUnlock() - fake.newTaskToDeleteAddonIAMMutex.RLock() - defer fake.newTaskToDeleteAddonIAMMutex.RUnlock() fake.newTaskToDeleteUnownedNodeGroupMutex.RLock() defer fake.newTaskToDeleteUnownedNodeGroupMutex.RUnlock() fake.newTasksToCreateClusterWithNodeGroupsMutex.RLock() diff --git a/pkg/cfn/manager/interface.go b/pkg/cfn/manager/interface.go index 0056327839..3dc4ca91b5 100644 --- a/pkg/cfn/manager/interface.go +++ b/pkg/cfn/manager/interface.go @@ -83,11 +83,10 @@ type StackManager interface { MakeChangeSetName(action string) string MakeClusterStackName() string NewManagedNodeGroupTask(ctx context.Context, nodeGroups []*v1alpha5.ManagedNodeGroup, forceAddCNIPolicy bool, importer vpc.Importer) *tasks.TaskTree - NewTaskToDeleteAddonIAM(ctx context.Context, wait bool) (*tasks.TaskTree, error) NewTaskToDeleteUnownedNodeGroup(ctx context.Context, clusterName, nodegroup string, eksAPI awsapi.EKS, waitCondition *DeleteWaitCondition) tasks.Task NewTasksToCreateClusterWithNodeGroups(ctx context.Context, nodeGroups []*v1alpha5.NodeGroup, managedNodeGroups []*v1alpha5.ManagedNodeGroup, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree NewTasksToCreateIAMServiceAccounts(serviceAccounts []*v1alpha5.ClusterIAMServiceAccount, oidc *iamoidc.OpenIDConnectManager, clientSetGetter kubernetes.ClientSetGetter) *tasks.TaskTree - NewTasksToDeleteClusterWithNodeGroups(ctx context.Context, clusterStack *Stack, nodeGroupStacks []NodeGroupStack, clusterOperable bool, newOIDCManager NewOIDCManager, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, wait, force bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) + NewTasksToDeleteClusterWithNodeGroups(ctx context.Context, clusterStack *Stack, nodeGroupStacks []NodeGroupStack, clusterOperable bool, newOIDCManager NewOIDCManager, newTasksToDeleteAddonIAM NewTasksToDeleteAddonIAM, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, wait, force bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) NewTasksToDeleteIAMServiceAccounts(ctx context.Context, serviceAccounts []string, clientSetGetter kubernetes.ClientSetGetter, wait bool) (*tasks.TaskTree, error) NewTasksToDeleteNodeGroups(stacks []NodeGroupStack, shouldDelete func(_ string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) NewTasksToDeleteOIDCProviderWithIAMServiceAccounts(ctx context.Context, newOIDCManager NewOIDCManager, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, force bool) (*tasks.TaskTree, error)