From f65c32363989a9b1e18642183e01458038ae1704 Mon Sep 17 00:00:00 2001 From: Ilya Dmitrichenko Date: Thu, 27 Dec 2018 15:44:38 +0000 Subject: [PATCH] More refactoring and cleanup --- pkg/cfn/manager/api.go | 25 +++++++------ pkg/cfn/manager/cluster.go | 2 +- pkg/cfn/manager/deprecated.go | 8 ++-- pkg/cfn/manager/nodegroup.go | 31 ++++++++++------ pkg/cfn/manager/tasks.go | 62 ++++++++++++------------------- pkg/ctl/cmdutils/cmdutils.go | 11 ++++-- pkg/ctl/completion/completion.go | 3 +- pkg/ctl/create/cluster.go | 4 +- pkg/ctl/create/nodegroup.go | 9 ++--- pkg/ctl/delete/cluster.go | 15 ++++---- pkg/ctl/delete/delete.go | 2 +- pkg/ctl/delete/nodegroup.go | 56 +++++++++++++++------------- pkg/ctl/get/cluster.go | 8 ++-- pkg/ctl/get/nodegroup.go | 36 +++++++++--------- pkg/ctl/scale/nodegroup.go | 22 ++++++----- pkg/ctl/utils/describe_stacks.go | 2 +- pkg/ctl/utils/write_kubeconfig.go | 2 +- 17 files changed, 150 insertions(+), 148 deletions(-) diff --git a/pkg/cfn/manager/api.go b/pkg/cfn/manager/api.go index c0c71982c98..616265508b7 100644 --- a/pkg/cfn/manager/api.go +++ b/pkg/cfn/manager/api.go @@ -18,7 +18,8 @@ const ( ClusterNameTag = "eksctl.cluster.k8s.io/v1alpha1/cluster-name" // NodeGroupNameTag defines the tag of the node group name - NodeGroupNameTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-name" + NodeGroupNameTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-name" + oldNodeGroupIDTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-id" ) var ( @@ -95,7 +96,7 @@ func (c *StackCollection) doCreateStackRequest(i *Stack, templateBody []byte, pa // CreateStack with given name, stack builder instance and parameters; // any errors will be written to errs channel, when nil is written, // assume completion, do not expect more then one error value on the -// channel, it's closed immediately after it is written two +// channel, it's closed immediately after it is written to func (c *StackCollection) CreateStack(name string, stack builder.ResourceSet, parameters map[string]string, errs chan error) error { i := &Stack{StackName: &name} templateBody, err := stack.RenderJSON() @@ -225,8 +226,11 @@ func (c *StackCollection) DeleteStack(name string) (*Stack, error) { fmt.Sprintf("%s:%s", ClusterNameTag, c.spec.Metadata.Name)) } -// WaitDeleteStack kills a stack by name and waits for DELETED status -func (c *StackCollection) WaitDeleteStack(name string) error { +// WaitDeleteStack kills a stack by name and waits for DELETED status; +// any errors will be written to errs channel, when nil is written, +// assume completion, do not expect more then one error value on the +// channel, it's closed immediately after it is written to +func (c *StackCollection) WaitDeleteStack(name string, errs chan error) error { i, err := c.DeleteStack(name) if err != nil { return err @@ -234,12 +238,13 @@ func (c *StackCollection) WaitDeleteStack(name string) error { logger.Info("waiting for stack %q to get deleted", *i.StackName) - return c.doWaitUntilStackIsDeleted(i) + go c.waitUntilStackIsDeleted(i, errs) + + return nil } -// WaitDeleteStackTask kills a stack by name and waits for DELETED status -// When nil is returned, the `errs` channel must receive an `error` object or `nil`. -func (c *StackCollection) WaitDeleteStackTask(name string, errs chan error) error { +// BlockingWaitDeleteStack kills a stack by name and waits for DELETED status +func (c *StackCollection) BlockingWaitDeleteStack(name string) error { i, err := c.DeleteStack(name) if err != nil { return err @@ -247,9 +252,7 @@ func (c *StackCollection) WaitDeleteStackTask(name string, errs chan error) erro logger.Info("waiting for stack %q to get deleted", *i.StackName) - go c.waitUntilStackIsDeleted(i, errs) - - return nil + return c.doWaitUntilStackIsDeleted(i) } // DescribeStacks describes the existing stacks diff --git a/pkg/cfn/manager/cluster.go b/pkg/cfn/manager/cluster.go index 1f41d64ee19..d83572d341a 100644 --- a/pkg/cfn/manager/cluster.go +++ b/pkg/cfn/manager/cluster.go @@ -29,5 +29,5 @@ func (c *StackCollection) DeleteCluster() error { // WaitDeleteCluster waits till the cluster is deleted func (c *StackCollection) WaitDeleteCluster() error { - return c.WaitDeleteStack(c.makeClusterStackName()) + return c.BlockingWaitDeleteStack(c.makeClusterStackName()) } diff --git a/pkg/cfn/manager/deprecated.go b/pkg/cfn/manager/deprecated.go index 923f0dd231e..74a00bc8d2e 100644 --- a/pkg/cfn/manager/deprecated.go +++ b/pkg/cfn/manager/deprecated.go @@ -6,7 +6,7 @@ func (c *StackCollection) DeprecatedDeleteStackVPC(wait bool) error { stackName := "EKS-" + c.spec.Metadata.Name + "-VPC" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } @@ -20,7 +20,7 @@ func (c *StackCollection) DeprecatedDeleteStackServiceRole(wait bool) error { stackName := "EKS-" + c.spec.Metadata.Name + "-ServiceRole" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } @@ -34,7 +34,7 @@ func (c *StackCollection) DeprecatedDeleteStackDefaultNodeGroup(wait bool) error stackName := "EKS-" + c.spec.Metadata.Name + "-DefaultNodeGroup" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } @@ -48,7 +48,7 @@ func (c *StackCollection) DeprecatedDeleteStackControlPlane(wait bool) error { stackName := "EKS-" + c.spec.Metadata.Name + "-ControlPlane" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go index a39ece744de..20228bfc0bd 100644 --- a/pkg/cfn/manager/nodegroup.go +++ b/pkg/cfn/manager/nodegroup.go @@ -76,19 +76,22 @@ func (c *StackCollection) listAllNodeGroups() ([]string, error) { } // DeleteNodeGroup deletes a nodegroup stack -func (c *StackCollection) DeleteNodeGroup(errs chan error, data interface{}) error { - defer close(errs) - name := data.(string) - stack := c.MakeNodeGroupStackName(name) - _, err := c.DeleteStack(stack) - errs <- err - return nil +func (c *StackCollection) DeleteNodeGroup(name string) error { + name = c.MakeNodeGroupStackName(name) + _, err := c.DeleteStack(name) + return err } // WaitDeleteNodeGroup waits until the nodegroup is deleted func (c *StackCollection) WaitDeleteNodeGroup(errs chan error, data interface{}) error { name := c.MakeNodeGroupStackName(data.(string)) - return c.WaitDeleteStackTask(name, errs) + return c.WaitDeleteStack(name, errs) +} + +// BlockingWaitDeleteNodeGroup waits until the nodegroup is deleted +func (c *StackCollection) BlockingWaitDeleteNodeGroup(name string) error { + name = c.MakeNodeGroupStackName(name) + return c.BlockingWaitDeleteStack(name) } // ScaleInitialNodeGroup will scale the first nodegroup (ID: 0) @@ -153,7 +156,7 @@ func (c *StackCollection) ScaleNodeGroup(ng *api.NodeGroup) error { } // GetNodeGroupSummaries returns a list of summaries for the nodegroups of a cluster -func (c *StackCollection) GetNodeGroupSummaries() ([]*NodeGroupSummary, error) { +func (c *StackCollection) GetNodeGroupSummaries(name string) ([]*NodeGroupSummary, error) { stacks, err := c.ListStacks(fmt.Sprintf("^(eksctl|EKS)-%s-nodegroup-.+$", c.spec.Metadata.Name)) if err != nil { return nil, errors.Wrap(err, "getting nodegroup stacks") @@ -161,7 +164,6 @@ func (c *StackCollection) GetNodeGroupSummaries() ([]*NodeGroupSummary, error) { summaries := []*NodeGroupSummary{} for _, stack := range stacks { - logger.Info("stack %s\n", *stack.StackName) logger.Debug("stack = %#v", stack) summary, err := c.mapStackToNodeGroupSummary(stack) @@ -169,7 +171,11 @@ func (c *StackCollection) GetNodeGroupSummaries() ([]*NodeGroupSummary, error) { return nil, errors.New("error mapping stack to node gorup summary") } - summaries = append(summaries, summary) + if name == "" { + summaries = append(summaries, summary) + } else if summary.Name == name { + summaries = append(summaries, summary) + } } return summaries, nil @@ -209,6 +215,9 @@ func getNodeGroupName(tags []*cfn.Tag) string { if *tag.Key == NodeGroupNameTag { return *tag.Value } + if *tag.Key == oldNodeGroupIDTag { + return *tag.Value + } } return "" } diff --git a/pkg/cfn/manager/tasks.go b/pkg/cfn/manager/tasks.go index fcacaacad1a..2f16a3e1099 100644 --- a/pkg/cfn/manager/tasks.go +++ b/pkg/cfn/manager/tasks.go @@ -100,61 +100,45 @@ func (s *StackCollection) CreateOneNodeGroup(ng *api.NodeGroup) []error { }) } -// deleteAllNodeGroupsTasks returns a list of tasks for deleting all the -// nodegroup stacks -func (s *StackCollection) deleteAllNodeGroupsTasks(call taskFunc) ([]task, error) { - stacks, err := s.listAllNodeGroups() +// DeleteAllNodeGroups deletes all nodegroups without waiting +func (s *StackCollection) DeleteAllNodeGroups(call taskFunc) []error { + nodeGroupStackNames, err := s.listAllNodeGroups() if err != nil { - return nil, err + return []error{err} } - deleteAllNodeGroups := []task{} - for i := range stacks { - t := task{ - call: call, - data: stacks[i], - } - deleteAllNodeGroups = append(deleteAllNodeGroups, t) - } - return deleteAllNodeGroups, nil -} -// DeleteAllNodeGroups runs all tasks required to delete all the nodegroup -// stacks; any errors will be returned as a slice as soon as the group -// of tasks is completed -func (s *StackCollection) DeleteAllNodeGroups() []error { errs := []error{} - appendErr := func(err error) { - errs = append(errs, err) - } - - deleteAllNodeGroups, err := s.deleteAllNodeGroupsTasks(s.DeleteNodeGroup) - if err != nil { - appendErr(err) - return errs - } - - if Run(appendErr, deleteAllNodeGroups...); len(errs) > 0 { - return errs + for _, stackName := range nodeGroupStackNames { + if err := s.DeleteNodeGroup(stackName); err != nil { + errs = append(errs, err) + } } - return nil + return errs } // WaitDeleteAllNodeGroups runs all tasks required to delete all the nodegroup -// stacks, it waits for each nodegroup to get deleted; any errors will be -// returned as a slice as soon as the group of tasks is completed +// stacks and wait for all nodegroups to be deleted; any errors will be returned +// as a slice as soon as the group of tasks is completed func (s *StackCollection) WaitDeleteAllNodeGroups() []error { + nodeGroupStackNames, err := s.listAllNodeGroups() + if err != nil { + return []error{err} + } + errs := []error{} appendErr := func(err error) { errs = append(errs, err) } - deleteAllNodeGroups, err := s.deleteAllNodeGroupsTasks(s.WaitDeleteNodeGroup) - if err != nil { - appendErr(err) - return errs + deleteAllNodeGroups := []task{} + for i := range nodeGroupStackNames { + t := task{ + call: s.WaitDeleteNodeGroup, + data: nodeGroupStackNames[i], + } + deleteAllNodeGroups = append(deleteAllNodeGroups, t) } - if Run(appendErr, deleteAllNodeGroups...); len(errs) > 0 { return errs } diff --git a/pkg/ctl/cmdutils/cmdutils.go b/pkg/ctl/cmdutils/cmdutils.go index 5a5dcd7643d..589fb4c5fab 100644 --- a/pkg/ctl/cmdutils/cmdutils.go +++ b/pkg/ctl/cmdutils/cmdutils.go @@ -27,7 +27,7 @@ func GetNameArg(args []string) string { } // AddCommonFlagsForAWS adds common flags for api.ProviderConfig -func AddCommonFlagsForAWS(group *NamedFlagSetGroup, p *api.ProviderConfig) { +func AddCommonFlagsForAWS(group *NamedFlagSetGroup, p *api.ProviderConfig, cfnRole bool) { group.InFlagSet("AWS client", func(fs *pflag.FlagSet) { fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") @@ -37,6 +37,9 @@ func AddCommonFlagsForAWS(group *NamedFlagSetGroup, p *api.ProviderConfig) { logger.Debug("ignoring error %q", err.Error()) } fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + if cfnRole { + fs.StringVar(&p.CloudFormationRoleARN, "cfn-role-arn", "", "IAM role used by CloudFormation to call AWS API on your behalf") + } }) } @@ -45,9 +48,9 @@ func AddRegionFlag(fs *pflag.FlagSet, p *api.ProviderConfig) { fs.StringVarP(&p.Region, "region", "r", "", "AWS region") } -// AddCFNRoleARNFlag adds common --cfn-role-arn flag -func AddCFNRoleARNFlag(fs *pflag.FlagSet, p *api.ProviderConfig) { - fs.StringVar(&p.CloudFormationRoleARN, "cfn-role-arn", "", "IAM role used by CloudFormation to call AWS API on your behalf") +// AddWaitFlag adds common --wait flag +func AddWaitFlag(wait *bool, fs *pflag.FlagSet) { + fs.BoolVarP(wait, "wait", "w", false, "Wait for deletion of all resources before exiting") } // AddCommonFlagsForKubeconfig adds common flags for controlling how output kubeconfig is written diff --git a/pkg/ctl/completion/completion.go b/pkg/ctl/completion/completion.go index 37f0c5468dc..6f91851fbe2 100644 --- a/pkg/ctl/completion/completion.go +++ b/pkg/ctl/completion/completion.go @@ -3,8 +3,7 @@ package completion import ( "os" - "github.com/kubicorn/kubicorn/pkg/logger" - + "github.com/kris-nova/logger" "github.com/spf13/cobra" ) diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go index d4b510d3b77..abe7e6b49f0 100644 --- a/pkg/ctl/create/cluster.go +++ b/pkg/ctl/create/cluster.go @@ -54,7 +54,6 @@ func createClusterCmd(g *cmdutils.Grouping) *cobra.Command { fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", fmt.Sprintf("EKS cluster name (generated if unspecified, e.g. %q)", exampleClusterName)) fs.StringToStringVarP(&cfg.Metadata.Tags, "tags", "", map[string]string{}, `A list of KV pairs used to tag the AWS resources (e.g. "Owner=John Doe,Team=Some Team")`) cmdutils.AddRegionFlag(fs, p) - cmdutils.AddCFNRoleARNFlag(fs, p) fs.StringSliceVar(&availabilityZones, "zones", nil, "(auto-select if unspecified)") fs.StringVar(&cfg.Metadata.Version, "version", api.LatestVersion, fmt.Sprintf("Kubernetes version (valid options: %s)", strings.Join(api.SupportedVersions(), ","))) }) @@ -80,7 +79,7 @@ func createClusterCmd(g *cmdutils.Grouping) *cobra.Command { fs.StringVar(&kopsClusterNameForVPC, "vpc-from-kops-cluster", "", "re-use VPC from a given kops cluster") }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.InFlagSet("Output kubeconfig", func(fs *pflag.FlagSet) { cmdutils.AddCommonFlagsForKubeconfig(fs, &kubeconfigPath, &setContext, &autoKubeconfigPath, exampleClusterName) @@ -110,6 +109,7 @@ func doCreateCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.Node } meta.Name = utils.ClusterName(meta.Name, nameArg) + // Use given name or generate one, no argument mode here ng.Name = utils.NodeGroupName(ng.Name, "") if autoKubeconfigPath { diff --git a/pkg/ctl/create/nodegroup.go b/pkg/ctl/create/nodegroup.go index 3fdea437874..0649357cb99 100644 --- a/pkg/ctl/create/nodegroup.go +++ b/pkg/ctl/create/nodegroup.go @@ -3,13 +3,13 @@ package create import ( "fmt" "os" - "strings" - "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/kris-nova/logger" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" @@ -39,7 +39,6 @@ func createNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "name of the EKS cluster to add the nodegroup to") cmdutils.AddRegionFlag(fs, p) - cmdutils.AddCFNRoleARNFlag(fs, p) fs.StringVar(&cfg.Metadata.Version, "version", api.LatestVersion, fmt.Sprintf("Kubernetes version (valid options: %s)", strings.Join(api.SupportedVersions(), ","))) }) @@ -48,7 +47,7 @@ func createNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.AddTo(cmd) @@ -134,7 +133,7 @@ func doCreateNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.No return err } } - logger.Success("EKS cluster %q in %q region has a new nodegroup with name %d", cfg.Metadata.Name, cfg.Metadata.Region, ng.Name) + logger.Success("created nodegroup %q in cluster %q", cfg.Metadata.Name, ng.Name) return nil diff --git a/pkg/ctl/delete/cluster.go b/pkg/ctl/delete/cluster.go index 8df56de4e44..1ad622a4ea3 100644 --- a/pkg/ctl/delete/cluster.go +++ b/pkg/ctl/delete/cluster.go @@ -34,11 +34,10 @@ func deleteClusterCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name (required)") cmdutils.AddRegionFlag(fs, p) - cmdutils.AddCFNRoleARNFlag(fs, p) - fs.BoolVarP(&waitDelete, "wait", "w", false, "Wait for deletion of all resources before exiting") + cmdutils.AddWaitFlag(&wait, fs) }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.AddTo(cmd) return cmd @@ -94,7 +93,7 @@ func doDeleteCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, nameArg stri } var clusterErr bool - if waitDelete { + if wait { clusterErr = handleIfError(stackManager.WaitDeleteCluster(), "cluster") } else { clusterErr = handleIfError(stackManager.DeleteCluster(), "cluster") @@ -102,13 +101,13 @@ func doDeleteCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, nameArg stri if clusterErr { if handleIfError(ctl.DeprecatedDeleteControlPlane(cfg.Metadata), "control plane") { - handleIfError(stackManager.DeprecatedDeleteStackControlPlane(waitDelete), "stack control plane (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackControlPlane(wait), "stack control plane (deprecated)") } } - handleIfError(stackManager.DeprecatedDeleteStackServiceRole(waitDelete), "service group (deprecated)") - handleIfError(stackManager.DeprecatedDeleteStackVPC(waitDelete), "stack VPC (deprecated)") - handleIfError(stackManager.DeprecatedDeleteStackDefaultNodeGroup(waitDelete), "default nodegroup (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackServiceRole(wait), "service group (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackVPC(wait), "stack VPC (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackDefaultNodeGroup(wait), "default nodegroup (deprecated)") ctl.MaybeDeletePublicSSHKey(cfg.Metadata.Name) diff --git a/pkg/ctl/delete/delete.go b/pkg/ctl/delete/delete.go index 22fa8f14d3c..be6952b9bf4 100644 --- a/pkg/ctl/delete/delete.go +++ b/pkg/ctl/delete/delete.go @@ -7,7 +7,7 @@ import ( ) var ( - waitDelete bool + wait bool ) // Command will create the `delete` commands diff --git a/pkg/ctl/delete/nodegroup.go b/pkg/ctl/delete/nodegroup.go index 35c893eec74..4eeca96d94a 100644 --- a/pkg/ctl/delete/nodegroup.go +++ b/pkg/ctl/delete/nodegroup.go @@ -4,15 +4,14 @@ import ( "fmt" "os" - "errors" - - "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/kris-nova/logger" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" - "github.com/weaveworks/eksctl/pkg/utils" ) func deleteNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { @@ -31,16 +30,16 @@ func deleteNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { }, } - group := &cmdutils.NamedFlagSetGroup{} + group := g.New(cmd) group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name (required)") cmdutils.AddRegionFlag(fs, p) - fs.BoolVarP(&waitDelete, "wait", "w", false, "Wait for deletion of all resources before exiting") - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup to delete (required)") + cmdutils.AddWaitFlag(&wait, fs) }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.AddTo(cmd) @@ -58,32 +57,39 @@ func doDeleteNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.No return errors.New("--cluster must be set") } - if utils.NodeGroupName(ng.Name, nameArg) == "" { + if ng.Name != "" && nameArg != "" { return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) } - ng.Name = utils.NodeGroupName(ng.Name, nameArg) + + if nameArg != "" { + ng.Name = nameArg + } + + if ng.Name == "" { + return fmt.Errorf("--name must be set") + } logger.Info("deleting nodegroup %q in cluster %q", ng.Name, cfg.Metadata.Name) stackManager := ctl.NewStackManager(cfg) - if waitDelete { - err := stackManager.WaitDeleteNodeGroup(nil, ng.Name) - errs := []error{err} - if len(errs) > 0 { - logger.Info("%d error(s) occurred while deleting nodegroup(s)", len(errs)) - for _, err := range errs { - if err != nil { - logger.Critical("%s\n", err.Error()) - } - } - return fmt.Errorf("failed to delete nodegroup %q", ng.Name) + { + var ( + err error + verb string + ) + if wait { + err = stackManager.BlockingWaitDeleteNodeGroup(ng.Name) + verb = "was" + } else { + err = stackManager.DeleteNodeGroup(ng.Name) + verb = "will be" } - } else { - + if err != nil { + return errors.Wrapf(err, "failed to delete nodegroup %q", ng.Name) + } + logger.Success("nodegroup %q %s deleted", ng.Name, verb) } - logger.Success("nodegroup %q was deleted", ng.Name) - return nil } diff --git a/pkg/ctl/get/cluster.go b/pkg/ctl/get/cluster.go index 78417e626a3..9893058efcd 100644 --- a/pkg/ctl/get/cluster.go +++ b/pkg/ctl/get/cluster.go @@ -35,14 +35,12 @@ func getClusterCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name") fs.BoolVarP(&listAllRegions, "all-regions", "A", false, "List clusters across all supported regions") - fs.IntVar(&chunkSize, "chunk-size", defaultChunkSize, "Return large lists in chunks rather than all at once. Pass 0 to disable.") - - fs.StringVarP(&p.Region, "region", "r", "", "AWS region") - fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") - + cmdutils.AddRegionFlag(fs, p) fs.StringVarP(&output, "output", "o", "table", "Specifies the output format. Choose from table,json,yaml. Defaults to table.") }) + cmdutils.AddCommonFlagsForAWS(group, p, false) + group.AddTo(cmd) return cmd diff --git a/pkg/ctl/get/nodegroup.go b/pkg/ctl/get/nodegroup.go index 95af6d3c01b..59a2aaddf91 100644 --- a/pkg/ctl/get/nodegroup.go +++ b/pkg/ctl/get/nodegroup.go @@ -5,16 +5,16 @@ import ( "strconv" "time" - "github.com/weaveworks/eksctl/pkg/cfn/manager" - - "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/kris-nova/logger" "github.com/pkg/errors" "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/weaveworks/eksctl/pkg/cfn/manager" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/printers" - "github.com/spf13/pflag" ) func getNodegroupCmd(g *cmdutils.Grouping) *cobra.Command { @@ -27,11 +27,7 @@ func getNodegroupCmd(g *cmdutils.Grouping) *cobra.Command { Short: "Get nodegroups(s)", Aliases: []string{"nodegroups"}, Run: func(_ *cobra.Command, args []string) { - name := cmdutils.GetNameArg(args) - if name != "" { - ng.Name = name - } - if err := doGetNodegroups(p, cfg, ng.Name); err != nil { + if err := doGetNodegroups(p, cfg, ng, cmdutils.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -42,23 +38,19 @@ func getNodegroupCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name") - - fs.StringVarP(&p.Region, "region", "r", "", "AWS region") - fs.StringVarP(&p.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") - + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup") + cmdutils.AddRegionFlag(fs, p) fs.StringVarP(&output, "output", "o", "table", "Specifies the output format. Choose from table,json,yaml. Defaults to table.") }) - group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") - }) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd } -func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { +func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup, nameArg string) error { ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { @@ -69,8 +61,16 @@ func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) return errors.New("--cluster must be set") } + if ng.Name != "" && nameArg != "" { + return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) + } + + if nameArg != "" { + ng.Name = nameArg + } + manager := ctl.NewStackManager(cfg) - summaries, err := manager.GetNodeGroupSummaries() + summaries, err := manager.GetNodeGroupSummaries(ng.Name) if err != nil { return errors.Wrap(err, "getting nodegroup stack summaries") } diff --git a/pkg/ctl/scale/nodegroup.go b/pkg/ctl/scale/nodegroup.go index 2c9ae564597..5a74866f1ba 100644 --- a/pkg/ctl/scale/nodegroup.go +++ b/pkg/ctl/scale/nodegroup.go @@ -11,7 +11,6 @@ import ( "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" - "github.com/weaveworks/eksctl/pkg/utils" ) func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { @@ -34,18 +33,14 @@ func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name") + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup to scale") fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") - fs.StringVarP(&p.Region, "region", "r", "", "AWS region") - fs.StringVarP(&p.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") - - fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + cmdutils.AddRegionFlag(fs, p) }) - group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") - }) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) @@ -63,10 +58,17 @@ func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.Nod return errors.New("--cluster must be set") } - if utils.NodeGroupName(ng.Name, nameArg) == "" { + if ng.Name != "" && nameArg != "" { return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) } - ng.Name = utils.NodeGroupName(ng.Name, nameArg) + + if nameArg != "" { + ng.Name = nameArg + } + + if ng.Name == "" { + return fmt.Errorf("--name must be set") + } if ng.DesiredCapacity < 0 { return fmt.Errorf("number of nodes must be 0 or greater. Use the --nodes/-N flag") diff --git a/pkg/ctl/utils/describe_stacks.go b/pkg/ctl/utils/describe_stacks.go index 786e2242672..17da55144a0 100644 --- a/pkg/ctl/utils/describe_stacks.go +++ b/pkg/ctl/utils/describe_stacks.go @@ -43,7 +43,7 @@ func describeStacksCmd(g *cmdutils.Grouping) *cobra.Command { fs.BoolVar(&describeStacksEvents, "events", false, "include stack events") }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd diff --git a/pkg/ctl/utils/write_kubeconfig.go b/pkg/ctl/utils/write_kubeconfig.go index 73edc23cf68..2b9092fd509 100644 --- a/pkg/ctl/utils/write_kubeconfig.go +++ b/pkg/ctl/utils/write_kubeconfig.go @@ -47,7 +47,7 @@ func writeKubeconfigCmd(g *cmdutils.Grouping) *cobra.Command { cmdutils.AddCommonFlagsForKubeconfig(fs, &writeKubeconfigOutputPath, &writeKubeconfigSetContext, &writeKubeconfigAutoPath, "") }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd