diff --git a/cli/cmd/cluster-apply.go b/cli/cmd/cluster-apply.go index 384c0edf2..3c493f396 100644 --- a/cli/cmd/cluster-apply.go +++ b/cli/cmd/cluster-apply.go @@ -64,12 +64,12 @@ func runClusterApply(cmd *cobra.Command, args []string) { //nolint:funlen func clusterApply(contextLogger *log.Entry) error { - ex, p, lokoConfig, assetDir := initialize(contextLogger) + c := initialize(contextLogger) - exists := clusterExists(contextLogger, ex) + exists := clusterExists(contextLogger, &c.terraformExecutor) if exists && !confirm { // TODO: We could plan to a file and use it when installing. - if err := ex.Plan(); err != nil { + if err := c.terraformExecutor.Plan(); err != nil { return fmt.Errorf("reconciling cluster state: %v", err) } @@ -80,18 +80,18 @@ func clusterApply(contextLogger *log.Entry) error { } } - if err := p.Apply(ex); err != nil { + if err := c.platform.Apply(&c.terraformExecutor); err != nil { return fmt.Errorf("applying platform: %v", err) } - fmt.Printf("\nYour configurations are stored in %s\n", assetDir) + fmt.Printf("\nYour configurations are stored in %s\n", c.assetDir) - kubeconfig, err := getKubeconfig(contextLogger, lokoConfig, true) + kubeconfig, err := getKubeconfig(contextLogger, c.lokomotiveConfig, true) if err != nil { return fmt.Errorf("getting kubeconfig: %v", err) } - if err := verifyCluster(kubeconfig, p.Meta().ExpectedNodes); err != nil { + if err := verifyCluster(kubeconfig, c.platform.Meta().ExpectedNodes); err != nil { return fmt.Errorf("verifying cluster: %v", err) } @@ -102,14 +102,14 @@ func clusterApply(contextLogger *log.Entry) error { } // Do controlplane upgrades only if cluster already exists and it is not a managed platform. - if exists && !p.Meta().Managed { + if exists && !c.platform.Meta().Managed { fmt.Printf("\nEnsuring that cluster controlplane is up to date.\n") cu := controlplaneUpdater{ kubeconfig: kubeconfig, - assetDir: assetDir, + assetDir: c.assetDir, contextLogger: *contextLogger, - ex: *ex, + ex: c.terraformExecutor, } charts := platform.CommonControlPlaneCharts() @@ -126,7 +126,7 @@ func clusterApply(contextLogger *log.Entry) error { } } - if ph, ok := p.(platform.PlatformWithPostApplyHook); ok { + if ph, ok := c.platform.(platform.PlatformWithPostApplyHook); ok { if err := ph.PostApplyHook(kubeconfig); err != nil { return fmt.Errorf("running platform post install hook: %v", err) } @@ -137,14 +137,14 @@ func clusterApply(contextLogger *log.Entry) error { } componentsToApply := []string{} - for _, component := range lokoConfig.RootConfig.Components { + for _, component := range c.lokomotiveConfig.RootConfig.Components { componentsToApply = append(componentsToApply, component.Name) } contextLogger.Println("Applying component configuration") if len(componentsToApply) > 0 { - if err := applyComponents(lokoConfig, kubeconfig, componentsToApply...); err != nil { + if err := applyComponents(c.lokomotiveConfig, kubeconfig, componentsToApply...); err != nil { return fmt.Errorf("applying component configuration: %v", err) } } diff --git a/cli/cmd/cluster-destroy.go b/cli/cmd/cluster-destroy.go index b8f635a43..75295e710 100644 --- a/cli/cmd/cluster-destroy.go +++ b/cli/cmd/cluster-destroy.go @@ -48,9 +48,9 @@ func runClusterDestroy(cmd *cobra.Command, args []string) { } func clusterDestroy(contextLogger *log.Entry) error { - ex, p, _, _ := initialize(contextLogger) + c := initialize(contextLogger) - if !clusterExists(contextLogger, ex) { + if !clusterExists(contextLogger, &c.terraformExecutor) { contextLogger.Println("Cluster already destroyed, nothing to do") return nil @@ -65,7 +65,7 @@ func clusterDestroy(contextLogger *log.Entry) error { } } - if err := p.Destroy(ex); err != nil { + if err := c.platform.Destroy(&c.terraformExecutor); err != nil { return fmt.Errorf("destroying cluster: %v", err) } diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index f82fead16..5e70253ba 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -41,9 +41,18 @@ func init() { RootCmd.AddCommand(clusterCmd) } +// cluster is a temporary helper struct to aggregate objects which are used +// for managing the cluster and components. +type cluster struct { + terraformExecutor terraform.Executor + platform platform.Platform + lokomotiveConfig *config.Config + assetDir string +} + // initialize does common initialization actions between cluster operations // and returns created objects to the caller for further use. -func initialize(contextLogger *log.Entry) (*terraform.Executor, platform.Platform, *config.Config, string) { +func initialize(contextLogger *log.Entry) *cluster { lokoConfig, diags := getLokoConfig() if diags.HasErrors() { contextLogger.Fatal(diags) @@ -85,7 +94,12 @@ func initialize(contextLogger *log.Entry) (*terraform.Executor, platform.Platfor ex := initializeTerraform(contextLogger, p, b) - return ex, p, lokoConfig, assetDir + return &cluster{ + terraformExecutor: *ex, + platform: p, + lokomotiveConfig: lokoConfig, + assetDir: assetDir, + } } // initializeTerraform initialized Terraform directory using given backend and platform diff --git a/cli/cmd/utils.go b/cli/cmd/utils.go index 964cd9391..27c20253f 100644 --- a/cli/cmd/utils.go +++ b/cli/cmd/utils.go @@ -187,11 +187,11 @@ func readKubeconfigFromTerraformState(contextLogger *log.Entry) ([]byte, error) contextLogger.Warn("Kubeconfig file not found in assets directory, pulling kubeconfig from " + "Terraform state, this might be slow. Run 'lokoctl cluster apply' to fix it.") - ex, _, _, _ := initialize(contextLogger) //nolint:dogsled + c := initialize(contextLogger) kubeconfig := "" - if err := ex.Output(kubeconfigTerraformOutputKey, &kubeconfig); err != nil { + if err := c.terraformExecutor.Output(kubeconfigTerraformOutputKey, &kubeconfig); err != nil { return nil, fmt.Errorf("reading kubeconfig file content from Terraform state: %w", err) }