From d444d6cdcc39da0d308339f985f5de5486b25c16 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Mon, 19 Oct 2020 13:14:10 +0200 Subject: [PATCH] cli/cmd: move all CLI independent code into cluster subpackage This draws clear distinction between the CLI code sitting in cli/cmd package right now and Lokomotive "core" logic. cli/cmd/cluster package still contains some UI parts, like user input confirmation or output printed directly to standard output, which should also be addressed in the future. Refs #603 Signed-off-by: Mateusz Gozdek --- cli/cmd/cluster-apply.go | 180 +------------- cli/cmd/cluster-destroy.go | 65 +---- cli/cmd/cluster.go | 227 ----------------- cli/cmd/cluster/apply.go | 183 ++++++++++++++ cli/cmd/cluster/cluster.go | 245 +++++++++++++++++++ cli/cmd/cluster/component-apply.go | 87 +++++++ cli/cmd/cluster/component-delete.go | 127 ++++++++++ cli/cmd/cluster/component-render-manifest.go | 81 ++++++ cli/cmd/cluster/destroy.go | 70 ++++++ cli/cmd/cluster/health.go | 108 ++++++++ cli/cmd/{ => cluster}/utils.go | 2 +- cli/cmd/{ => cluster}/utils_internal_test.go | 2 +- cli/cmd/component-apply.go | 77 +----- cli/cmd/component-delete.go | 121 +-------- cli/cmd/component-render-manifest.go | 70 +----- cli/cmd/health.go | 97 +------- 16 files changed, 944 insertions(+), 798 deletions(-) create mode 100644 cli/cmd/cluster/apply.go create mode 100644 cli/cmd/cluster/cluster.go create mode 100644 cli/cmd/cluster/component-apply.go create mode 100644 cli/cmd/cluster/component-delete.go create mode 100644 cli/cmd/cluster/component-render-manifest.go create mode 100644 cli/cmd/cluster/destroy.go create mode 100644 cli/cmd/cluster/health.go rename cli/cmd/{ => cluster}/utils.go (99%) rename cli/cmd/{ => cluster}/utils_internal_test.go (99%) diff --git a/cli/cmd/cluster-apply.go b/cli/cmd/cluster-apply.go index 8a8046e26..26239420d 100644 --- a/cli/cmd/cluster-apply.go +++ b/cli/cmd/cluster-apply.go @@ -15,17 +15,11 @@ package cmd import ( - "fmt" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" - "github.com/kinvolk/lokomotive/internal" - "github.com/kinvolk/lokomotive/pkg/helm" - "github.com/kinvolk/lokomotive/pkg/k8sutil" - "github.com/kinvolk/lokomotive/pkg/lokomotive" - "github.com/kinvolk/lokomotive/pkg/platform" + "github.com/kinvolk/lokomotive/cli/cmd/cluster" ) var ( @@ -58,172 +52,16 @@ func runClusterApply(cmd *cobra.Command, args []string) { "args": args, }) - options := clusterApplyOptions{ - confirm: confirm, - upgradeKubelets: upgradeKubelets, - skipComponents: skipComponents, - verbose: verbose, - configPath: viper.GetString("lokocfg"), - valuesPath: viper.GetString("lokocfg-vars"), + options := cluster.ApplyOptions{ + Confirm: confirm, + UpgradeKubelets: upgradeKubelets, + SkipComponents: skipComponents, + Verbose: verbose, + ConfigPath: viper.GetString("lokocfg"), + ValuesPath: viper.GetString("lokocfg-vars"), } - if err := clusterApply(contextLogger, options); err != nil { + if err := cluster.Apply(contextLogger, options); err != nil { contextLogger.Fatalf("Applying cluster failed: %v", err) } } - -type clusterApplyOptions struct { - confirm bool - upgradeKubelets bool - skipComponents bool - verbose bool - configPath string - valuesPath string -} - -//nolint:funlen -func clusterApply(contextLogger *log.Entry, options clusterApplyOptions) error { - cc := clusterConfig{ - verbose: options.verbose, - configPath: options.configPath, - valuesPath: options.valuesPath, - } - - c, err := cc.initialize(contextLogger) - if err != nil { - return fmt.Errorf("initializing: %w", err) - } - - exists, err := clusterExists(c.terraformExecutor) - if err != nil { - return fmt.Errorf("checking if cluster exists: %w", err) - } - - if exists && !options.confirm { - // TODO: We could plan to a file and use it when installing. - if err := c.terraformExecutor.Plan(); err != nil { - return fmt.Errorf("reconciling cluster state: %v", err) - } - - if !askForConfirmation("Do you want to proceed with cluster apply?") { - contextLogger.Println("Cluster apply cancelled") - - return nil - } - } - - if err := c.platform.Apply(&c.terraformExecutor); err != nil { - return fmt.Errorf("applying platform: %v", err) - } - - fmt.Printf("\nYour configurations are stored in %s\n", c.assetDir) - - kg := kubeconfigGetter{ - platformRequired: true, - } - - kubeconfig, err := kg.getKubeconfig(contextLogger, c.lokomotiveConfig) - if err != nil { - return fmt.Errorf("getting kubeconfig: %v", err) - } - - if err := verifyCluster(kubeconfig, c.platform.Meta().ExpectedNodes); err != nil { - return fmt.Errorf("verifying cluster: %v", err) - } - - // Update all the pre installed namespaces with lokomotive specific label. - // `lokomotive.kinvolk.io/name: `. - if err := updateInstalledNamespaces(kubeconfig); err != nil { - return fmt.Errorf("updating installed namespace: %v", err) - } - - // Do controlplane upgrades only if cluster already exists and it is not a managed platform. - if exists && !c.platform.Meta().Managed { - fmt.Printf("\nEnsuring that cluster controlplane is up to date.\n") - - cu := controlplaneUpdater{ - kubeconfig: kubeconfig, - assetDir: c.assetDir, - contextLogger: *contextLogger, - ex: c.terraformExecutor, - } - - charts := platform.CommonControlPlaneCharts() - - if options.upgradeKubelets { - charts = append(charts, helm.LokomotiveChart{ - Name: "kubelet", - Namespace: "kube-system", - }) - } - - for _, c := range charts { - if err := cu.upgradeComponent(c.Name, c.Namespace); err != nil { - return fmt.Errorf("upgrading controlplane component %q: %w", c.Name, err) - } - } - } - - if ph, ok := c.platform.(platform.PlatformWithPostApplyHook); ok { - if err := ph.PostApplyHook(kubeconfig); err != nil { - return fmt.Errorf("running platform post install hook: %v", err) - } - } - - if options.skipComponents { - return nil - } - - componentObjects, err := componentNamesToObjects(selectComponentNames(nil, *c.lokomotiveConfig.RootConfig)) - if err != nil { - return fmt.Errorf("getting component objects: %w", err) - } - - contextLogger.Println("Applying component configuration") - - if err := applyComponents(c.lokomotiveConfig, kubeconfig, componentObjects); err != nil { - return fmt.Errorf("applying component configuration: %v", err) - } - - return nil -} - -func verifyCluster(kubeconfig []byte, expectedNodes int) error { - cs, err := k8sutil.NewClientset(kubeconfig) - if err != nil { - return fmt.Errorf("creating Kubernetes clientset: %w", err) - } - - cluster := lokomotive.NewCluster(cs, expectedNodes) - - return cluster.Verify() -} - -func updateInstalledNamespaces(kubeconfig []byte) error { - cs, err := k8sutil.NewClientset(kubeconfig) - if err != nil { - return fmt.Errorf("create clientset: %v", err) - } - - nsclient := cs.CoreV1().Namespaces() - - namespaces, err := k8sutil.ListNamespaces(nsclient) - if err != nil { - return fmt.Errorf("getting list of namespaces: %v", err) - } - - for _, ns := range namespaces.Items { - ns := k8sutil.Namespace{ - Name: ns.ObjectMeta.Name, - Labels: map[string]string{ - internal.NamespaceLabelKey: ns.ObjectMeta.Name, - }, - } - - if err := k8sutil.CreateOrUpdateNamespace(ns, nsclient); err != nil { - return fmt.Errorf("namespace %q with labels: %v", ns, err) - } - } - - return nil -} diff --git a/cli/cmd/cluster-destroy.go b/cli/cmd/cluster-destroy.go index 524405ad9..87117cc44 100644 --- a/cli/cmd/cluster-destroy.go +++ b/cli/cmd/cluster-destroy.go @@ -15,11 +15,11 @@ package cmd import ( - "fmt" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/kinvolk/lokomotive/cli/cmd/cluster" ) var confirm bool @@ -43,63 +43,14 @@ func runClusterDestroy(cmd *cobra.Command, args []string) { "args": args, }) - options := clusterDestroyOptions{ - confirm: confirm, - verbose: verbose, - configPath: viper.GetString("lokocfg"), - valuesPath: viper.GetString("lokocfg-vars"), + options := cluster.DestroyOptions{ + Confirm: confirm, + Verbose: verbose, + ConfigPath: viper.GetString("lokocfg"), + ValuesPath: viper.GetString("lokocfg-vars"), } - if err := clusterDestroy(contextLogger, options); err != nil { + if err := cluster.Destroy(contextLogger, options); err != nil { contextLogger.Fatalf("Destroying cluster: %v", err) } } - -type clusterDestroyOptions struct { - confirm bool - verbose bool - configPath string - valuesPath string -} - -func clusterDestroy(contextLogger *log.Entry, options clusterDestroyOptions) error { - cc := clusterConfig{ - verbose: options.verbose, - configPath: options.configPath, - valuesPath: options.valuesPath, - } - - c, err := cc.initialize(contextLogger) - if err != nil { - return fmt.Errorf("initializing: %w", err) - } - - exists, err := clusterExists(c.terraformExecutor) - if err != nil { - return fmt.Errorf("checking if cluster exists: %w", err) - } - - if !exists { - contextLogger.Println("Cluster already destroyed, nothing to do") - - return nil - } - - if !options.confirm { - confirmation := askForConfirmation("WARNING: This action cannot be undone. Do you really want to destroy the cluster?") - if !confirmation { - contextLogger.Println("Cluster destroy canceled") - - return nil - } - } - - if err := c.platform.Destroy(&c.terraformExecutor); err != nil { - return fmt.Errorf("destroying cluster: %v", err) - } - - contextLogger.Println("Cluster destroyed successfully") - contextLogger.Println("You can safely remove the assets directory now") - - return nil -} diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 66ec1ec5b..305fd70f0 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -15,21 +15,7 @@ package cmd import ( - "fmt" - - "github.com/mitchellh/go-homedir" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - "sigs.k8s.io/yaml" - - "github.com/kinvolk/lokomotive/pkg/backend" - "github.com/kinvolk/lokomotive/pkg/backend/local" - "github.com/kinvolk/lokomotive/pkg/components/util" - "github.com/kinvolk/lokomotive/pkg/config" - "github.com/kinvolk/lokomotive/pkg/platform" - "github.com/kinvolk/lokomotive/pkg/terraform" ) var clusterCmd = &cobra.Command{ @@ -40,216 +26,3 @@ var clusterCmd = &cobra.Command{ func init() { RootCmd.AddCommand(clusterCmd) } - -// cluster is a temporary helper struct to aggregate objects which are used -// for managing the cluster and components. -type cluster struct { - terraformExecutor terraform.Executor - platform platform.Platform - lokomotiveConfig *config.Config - assetDir string -} - -type clusterConfig struct { - verbose bool - configPath string - valuesPath string -} - -// initialize does common initialization actions between cluster operations -// and returns created objects to the caller for further use. -func (cc clusterConfig) initialize(contextLogger *log.Entry) (*cluster, error) { - lokoConfig, diags := config.LoadConfig(cc.configPath, cc.valuesPath) - if diags.HasErrors() { - return nil, diags - } - - p, diags := getConfiguredPlatform(lokoConfig, true) - if diags.HasErrors() { - for _, diagnostic := range diags { - contextLogger.Error(diagnostic.Error()) - } - - return nil, fmt.Errorf("loading platform configuration") - } - - // Get the configured backend for the cluster. Backend types currently supported: local, s3. - b, diags := getConfiguredBackend(lokoConfig) - if diags.HasErrors() { - for _, diagnostic := range diags { - contextLogger.Error(diagnostic.Error()) - } - - return nil, fmt.Errorf("loading backend configuration") - } - - // Use a local backend if no backend is configured. - if b == nil { - b = local.NewLocalBackend() - } - - assetDir, err := homedir.Expand(p.Meta().AssetDir) - if err != nil { - return nil, fmt.Errorf("expanding path %q: %v", p.Meta().AssetDir, err) - } - - // Validate backend configuration. - if err = b.Validate(); err != nil { - return nil, fmt.Errorf("validating backend configuration: %v", err) - } - - ex, err := cc.initializeTerraform(p, b) - if err != nil { - return nil, fmt.Errorf("initializing Terraform: %w", err) - } - - return &cluster{ - terraformExecutor: *ex, - platform: p, - lokomotiveConfig: lokoConfig, - assetDir: assetDir, - }, nil -} - -// initializeTerraform initialized Terraform directory using given backend and platform -// and returns configured executor. -func (cc clusterConfig) initializeTerraform(p platform.Platform, b backend.Backend) (*terraform.Executor, error) { - assetDir, err := homedir.Expand(p.Meta().AssetDir) - if err != nil { - return nil, fmt.Errorf("expanding path %q: %w", p.Meta().AssetDir, err) - } - - // Render backend configuration. - renderedBackend, err := b.Render() - if err != nil { - return nil, fmt.Errorf("rendering backend configuration: %w", err) - } - - // Configure Terraform directory, module and backend. - if err := terraform.Configure(assetDir, renderedBackend); err != nil { - return nil, fmt.Errorf("configuring Terraform: %w", err) - } - - conf := terraform.Config{ - WorkingDir: terraform.GetTerraformRootDir(assetDir), - Verbose: cc.verbose, - } - - ex, err := terraform.NewExecutor(conf) - if err != nil { - return nil, fmt.Errorf("creating Terraform executor: %w", err) - } - - if err := p.Initialize(ex); err != nil { - return nil, fmt.Errorf("initializing Platform: %w", err) - } - - if err := ex.Init(); err != nil { - return nil, fmt.Errorf("running 'terraform init': %w", err) - } - - return ex, nil -} - -// clusterExists determines if cluster has already been created by getting all -// outputs from the Terraform. If there is any output defined, it means 'terraform apply' -// run at least once. -func clusterExists(ex terraform.Executor) (bool, error) { - o := map[string]interface{}{} - - if err := ex.Output("", &o); err != nil { - return false, fmt.Errorf("getting Terraform output: %w", err) - } - - return len(o) != 0, nil -} - -type controlplaneUpdater struct { - kubeconfig []byte - assetDir string - contextLogger log.Entry - ex terraform.Executor -} - -func (c controlplaneUpdater) getControlplaneChart(name string) (*chart.Chart, error) { - chart, err := platform.ControlPlaneChart(name) - if err != nil { - return nil, fmt.Errorf("loading chart from assets failed: %w", err) - } - - if err := chart.Validate(); err != nil { - return nil, fmt.Errorf("chart is invalid: %w", err) - } - - return chart, nil -} - -func (c controlplaneUpdater) getControlplaneValues(name string) (map[string]interface{}, error) { - valuesRaw := "" - if err := c.ex.Output(fmt.Sprintf("%s_values", name), &valuesRaw); err != nil { - return nil, fmt.Errorf("failed to get controlplane component values.yaml from Terraform: %w", err) - } - - values := map[string]interface{}{} - if err := yaml.Unmarshal([]byte(valuesRaw), &values); err != nil { - return nil, fmt.Errorf("failed to parse values.yaml for controlplane component: %w", err) - } - - return values, nil -} - -func (c controlplaneUpdater) upgradeComponent(component, namespace string) error { - actionConfig, err := util.HelmActionConfig(namespace, c.kubeconfig) - if err != nil { - return fmt.Errorf("initializing Helm action: %w", err) - } - - helmChart, err := c.getControlplaneChart(component) - if err != nil { - return fmt.Errorf("loading chart from assets: %w", err) - } - - values, err := c.getControlplaneValues(component) - if err != nil { - return fmt.Errorf("getting chart values from Terraform: %w", err) - } - - exists, err := util.ReleaseExists(*actionConfig, component) - if err != nil { - return fmt.Errorf("checking if controlplane component is installed: %w", err) - } - - if !exists { - fmt.Printf("Controlplane component '%s' is missing, reinstalling...", component) - - install := action.NewInstall(actionConfig) - install.ReleaseName = component - install.Namespace = namespace - install.Atomic = true - install.CreateNamespace = true - - if _, err := install.Run(helmChart, values); err != nil { - fmt.Println("Failed!") - - return fmt.Errorf("installing controlplane component: %w", err) - } - - fmt.Println("Done.") - } - - update := action.NewUpgrade(actionConfig) - - update.Atomic = true - - fmt.Printf("Ensuring controlplane component '%s' is up to date... ", component) - - if _, err := update.Run(component, helmChart, values); err != nil { - fmt.Println("Failed!") - - return fmt.Errorf("updating controlplane component: %w", err) - } - - fmt.Println("Done.") - - return nil -} diff --git a/cli/cmd/cluster/apply.go b/cli/cmd/cluster/apply.go new file mode 100644 index 000000000..abe02d803 --- /dev/null +++ b/cli/cmd/cluster/apply.go @@ -0,0 +1,183 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + + log "github.com/sirupsen/logrus" + + "github.com/kinvolk/lokomotive/internal" + "github.com/kinvolk/lokomotive/pkg/helm" + "github.com/kinvolk/lokomotive/pkg/k8sutil" + "github.com/kinvolk/lokomotive/pkg/lokomotive" + "github.com/kinvolk/lokomotive/pkg/platform" +) + +type ApplyOptions struct { + Confirm bool + UpgradeKubelets bool + SkipComponents bool + Verbose bool + ConfigPath string + ValuesPath string +} + +//nolint:funlen +func Apply(contextLogger *log.Entry, options ApplyOptions) error { + cc := clusterConfig{ + verbose: options.Verbose, + configPath: options.ConfigPath, + valuesPath: options.ValuesPath, + } + + c, err := cc.initialize(contextLogger) + if err != nil { + return fmt.Errorf("initializing: %w", err) + } + + exists, err := clusterExists(c.terraformExecutor) + if err != nil { + return fmt.Errorf("checking if cluster exists: %w", err) + } + + if exists && !options.Confirm { + // TODO: We could plan to a file and use it when installing. + if err := c.terraformExecutor.Plan(); err != nil { + return fmt.Errorf("reconciling cluster state: %v", err) + } + + if !askForConfirmation("Do you want to proceed with cluster apply?") { + contextLogger.Println("Cluster apply cancelled") + + return nil + } + } + + if err := c.platform.Apply(&c.terraformExecutor); err != nil { + return fmt.Errorf("applying platform: %v", err) + } + + fmt.Printf("\nYour configurations are stored in %s\n", c.assetDir) + + kg := kubeconfigGetter{ + platformRequired: true, + } + + kubeconfig, err := kg.getKubeconfig(contextLogger, c.lokomotiveConfig) + if err != nil { + return fmt.Errorf("getting kubeconfig: %v", err) + } + + if err := verifyCluster(kubeconfig, c.platform.Meta().ExpectedNodes); err != nil { + return fmt.Errorf("verifying cluster: %v", err) + } + + // Update all the pre installed namespaces with lokomotive specific label. + // `lokomotive.kinvolk.io/name: `. + if err := updateInstalledNamespaces(kubeconfig); err != nil { + return fmt.Errorf("updating installed namespace: %v", err) + } + + // Do controlplane upgrades only if cluster already exists and it is not a managed platform. + if exists && !c.platform.Meta().Managed { + fmt.Printf("\nEnsuring that cluster controlplane is up to date.\n") + + cu := controlplaneUpdater{ + kubeconfig: kubeconfig, + assetDir: c.assetDir, + contextLogger: *contextLogger, + ex: c.terraformExecutor, + } + + charts := platform.CommonControlPlaneCharts() + + if options.UpgradeKubelets { + charts = append(charts, helm.LokomotiveChart{ + Name: "kubelet", + Namespace: "kube-system", + }) + } + + for _, c := range charts { + if err := cu.upgradeComponent(c.Name, c.Namespace); err != nil { + return fmt.Errorf("upgrading controlplane component %q: %w", c.Name, err) + } + } + } + + if ph, ok := c.platform.(platform.PlatformWithPostApplyHook); ok { + if err := ph.PostApplyHook(kubeconfig); err != nil { + return fmt.Errorf("running platform post install hook: %v", err) + } + } + + if options.SkipComponents { + return nil + } + + componentObjects, err := componentNamesToObjects(selectComponentNames(nil, *c.lokomotiveConfig.RootConfig)) + if err != nil { + return fmt.Errorf("getting component objects: %w", err) + } + + contextLogger.Println("Applying component configuration") + + if err := applyComponents(c.lokomotiveConfig, kubeconfig, componentObjects); err != nil { + return fmt.Errorf("applying component configuration: %v", err) + } + + return nil +} + +func verifyCluster(kubeconfig []byte, expectedNodes int) error { + cs, err := k8sutil.NewClientset(kubeconfig) + if err != nil { + return fmt.Errorf("creating Kubernetes clientset: %w", err) + } + + cluster := lokomotive.NewCluster(cs, expectedNodes) + + return cluster.Verify() +} + +func updateInstalledNamespaces(kubeconfig []byte) error { + cs, err := k8sutil.NewClientset(kubeconfig) + if err != nil { + return fmt.Errorf("create clientset: %v", err) + } + + nsclient := cs.CoreV1().Namespaces() + + namespaces, err := k8sutil.ListNamespaces(nsclient) + if err != nil { + return fmt.Errorf("getting list of namespaces: %v", err) + } + + for _, ns := range namespaces.Items { + ns := k8sutil.Namespace{ + Name: ns.ObjectMeta.Name, + Labels: map[string]string{ + internal.NamespaceLabelKey: ns.ObjectMeta.Name, + }, + } + + if err := k8sutil.CreateOrUpdateNamespace(ns, nsclient); err != nil { + return fmt.Errorf("namespace %q with labels: %v", ns, err) + } + } + + return nil +} diff --git a/cli/cmd/cluster/cluster.go b/cli/cmd/cluster/cluster.go new file mode 100644 index 000000000..709474636 --- /dev/null +++ b/cli/cmd/cluster/cluster.go @@ -0,0 +1,245 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + + "github.com/mitchellh/go-homedir" + log "github.com/sirupsen/logrus" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "sigs.k8s.io/yaml" + + "github.com/kinvolk/lokomotive/pkg/backend" + "github.com/kinvolk/lokomotive/pkg/backend/local" + "github.com/kinvolk/lokomotive/pkg/components/util" + "github.com/kinvolk/lokomotive/pkg/config" + "github.com/kinvolk/lokomotive/pkg/platform" + "github.com/kinvolk/lokomotive/pkg/terraform" +) + +// cluster is a temporary helper struct to aggregate objects which are used +// for managing the cluster and components. +type cluster struct { + terraformExecutor terraform.Executor + platform platform.Platform + lokomotiveConfig *config.Config + assetDir string +} + +type clusterConfig struct { + verbose bool + configPath string + valuesPath string +} + +// initialize does common initialization actions between cluster operations +// and returns created objects to the caller for further use. +func (cc clusterConfig) initialize(contextLogger *log.Entry) (*cluster, error) { + lokoConfig, diags := config.LoadConfig(cc.configPath, cc.valuesPath) + if diags.HasErrors() { + return nil, diags + } + + p, diags := getConfiguredPlatform(lokoConfig, true) + if diags.HasErrors() { + for _, diagnostic := range diags { + contextLogger.Error(diagnostic.Error()) + } + + return nil, fmt.Errorf("loading platform configuration") + } + + // Get the configured backend for the cluster. Backend types currently supported: local, s3. + b, diags := getConfiguredBackend(lokoConfig) + if diags.HasErrors() { + for _, diagnostic := range diags { + contextLogger.Error(diagnostic.Error()) + } + + return nil, fmt.Errorf("loading backend configuration") + } + + // Use a local backend if no backend is configured. + if b == nil { + b = local.NewLocalBackend() + } + + assetDir, err := homedir.Expand(p.Meta().AssetDir) + if err != nil { + return nil, fmt.Errorf("expanding path %q: %v", p.Meta().AssetDir, err) + } + + // Validate backend configuration. + if err = b.Validate(); err != nil { + return nil, fmt.Errorf("validating backend configuration: %v", err) + } + + ex, err := cc.initializeTerraform(p, b) + if err != nil { + return nil, fmt.Errorf("initializing Terraform: %w", err) + } + + return &cluster{ + terraformExecutor: *ex, + platform: p, + lokomotiveConfig: lokoConfig, + assetDir: assetDir, + }, nil +} + +// initializeTerraform initialized Terraform directory using given backend and platform +// and returns configured executor. +func (cc clusterConfig) initializeTerraform(p platform.Platform, b backend.Backend) (*terraform.Executor, error) { + assetDir, err := homedir.Expand(p.Meta().AssetDir) + if err != nil { + return nil, fmt.Errorf("expanding path %q: %w", p.Meta().AssetDir, err) + } + + // Render backend configuration. + renderedBackend, err := b.Render() + if err != nil { + return nil, fmt.Errorf("rendering backend configuration: %w", err) + } + + // Configure Terraform directory, module and backend. + if err := terraform.Configure(assetDir, renderedBackend); err != nil { + return nil, fmt.Errorf("configuring Terraform: %w", err) + } + + conf := terraform.Config{ + WorkingDir: terraform.GetTerraformRootDir(assetDir), + Verbose: cc.verbose, + } + + ex, err := terraform.NewExecutor(conf) + if err != nil { + return nil, fmt.Errorf("creating Terraform executor: %w", err) + } + + if err := p.Initialize(ex); err != nil { + return nil, fmt.Errorf("initializing Platform: %w", err) + } + + if err := ex.Init(); err != nil { + return nil, fmt.Errorf("running 'terraform init': %w", err) + } + + return ex, nil +} + +// clusterExists determines if cluster has already been created by getting all +// outputs from the Terraform. If there is any output defined, it means 'terraform apply' +// run at least once. +func clusterExists(ex terraform.Executor) (bool, error) { + o := map[string]interface{}{} + + if err := ex.Output("", &o); err != nil { + return false, fmt.Errorf("getting Terraform output: %w", err) + } + + return len(o) != 0, nil +} + +type controlplaneUpdater struct { + kubeconfig []byte + assetDir string + contextLogger log.Entry + ex terraform.Executor +} + +func (c controlplaneUpdater) getControlplaneChart(name string) (*chart.Chart, error) { + chart, err := platform.ControlPlaneChart(name) + if err != nil { + return nil, fmt.Errorf("loading chart from assets failed: %w", err) + } + + if err := chart.Validate(); err != nil { + return nil, fmt.Errorf("chart is invalid: %w", err) + } + + return chart, nil +} + +func (c controlplaneUpdater) getControlplaneValues(name string) (map[string]interface{}, error) { + valuesRaw := "" + if err := c.ex.Output(fmt.Sprintf("%s_values", name), &valuesRaw); err != nil { + return nil, fmt.Errorf("failed to get controlplane component values.yaml from Terraform: %w", err) + } + + values := map[string]interface{}{} + if err := yaml.Unmarshal([]byte(valuesRaw), &values); err != nil { + return nil, fmt.Errorf("failed to parse values.yaml for controlplane component: %w", err) + } + + return values, nil +} + +func (c controlplaneUpdater) upgradeComponent(component, namespace string) error { + actionConfig, err := util.HelmActionConfig(namespace, c.kubeconfig) + if err != nil { + return fmt.Errorf("initializing Helm action: %w", err) + } + + helmChart, err := c.getControlplaneChart(component) + if err != nil { + return fmt.Errorf("loading chart from assets: %w", err) + } + + values, err := c.getControlplaneValues(component) + if err != nil { + return fmt.Errorf("getting chart values from Terraform: %w", err) + } + + exists, err := util.ReleaseExists(*actionConfig, component) + if err != nil { + return fmt.Errorf("checking if controlplane component is installed: %w", err) + } + + if !exists { + fmt.Printf("Controlplane component '%s' is missing, reinstalling...", component) + + install := action.NewInstall(actionConfig) + install.ReleaseName = component + install.Namespace = namespace + install.Atomic = true + install.CreateNamespace = true + + if _, err := install.Run(helmChart, values); err != nil { + fmt.Println("Failed!") + + return fmt.Errorf("installing controlplane component: %w", err) + } + + fmt.Println("Done.") + } + + update := action.NewUpgrade(actionConfig) + + update.Atomic = true + + fmt.Printf("Ensuring controlplane component '%s' is up to date... ", component) + + if _, err := update.Run(component, helmChart, values); err != nil { + fmt.Println("Failed!") + + return fmt.Errorf("updating controlplane component: %w", err) + } + + fmt.Println("Done.") + + return nil +} diff --git a/cli/cmd/cluster/component-apply.go b/cli/cmd/cluster/component-apply.go new file mode 100644 index 000000000..83b8fc319 --- /dev/null +++ b/cli/cmd/cluster/component-apply.go @@ -0,0 +1,87 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + + log "github.com/sirupsen/logrus" + + "github.com/kinvolk/lokomotive/pkg/components" + "github.com/kinvolk/lokomotive/pkg/components/util" + "github.com/kinvolk/lokomotive/pkg/config" +) + +type ComponentApplyOptions struct { + KubeconfigPath string + ConfigPath string + ValuesPath string +} + +// ComponentApply implements 'lokoctl component apply' separated from CLI +// dependencies. +func ComponentApply(contextLogger *log.Entry, componentsList []string, options ComponentApplyOptions) error { + lokoConfig, diags := config.LoadConfig(options.ConfigPath, options.ValuesPath) + if diags.HasErrors() { + return diags + } + + componentObjects, err := componentNamesToObjects(selectComponentNames(componentsList, *lokoConfig.RootConfig)) + if err != nil { + return fmt.Errorf("getting component objects: %w", err) + } + + kg := kubeconfigGetter{ + platformRequired: false, + path: options.KubeconfigPath, + } + + kubeconfig, err := kg.getKubeconfig(contextLogger, lokoConfig) + if err != nil { + contextLogger.Debugf("Error in finding kubeconfig file: %s", err) + + return fmt.Errorf("suitable kubeconfig file not found. Did you run 'lokoctl cluster apply' ?") + } + + if err := applyComponents(lokoConfig, kubeconfig, componentObjects); err != nil { + return fmt.Errorf("applying components: %w", err) + } + + return nil +} + +// applyComponents reads the configuration of given components and applies them to the cluster pointer +// by given kubeconfig file content. +func applyComponents(lokoConfig *config.Config, kubeconfig []byte, componentObjects []components.Component) error { + for _, component := range componentObjects { + componentName := component.Metadata().Name + fmt.Printf("Applying component '%s'...\n", componentName) + + componentConfigBody := lokoConfig.LoadComponentConfigBody(componentName) + + if diags := component.LoadConfig(componentConfigBody, lokoConfig.EvalContext); diags.HasErrors() { + fmt.Printf("%v\n", diags) + return diags + } + + if err := util.InstallComponent(component, kubeconfig); err != nil { + return fmt.Errorf("installing component %q: %w", componentName, err) + } + + fmt.Printf("Successfully applied component '%s' configuration!\n", componentName) + } + + return nil +} diff --git a/cli/cmd/cluster/component-delete.go b/cli/cmd/cluster/component-delete.go new file mode 100644 index 000000000..3f1ad702f --- /dev/null +++ b/cli/cmd/cluster/component-delete.go @@ -0,0 +1,127 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/kinvolk/lokomotive/pkg/components" + "github.com/kinvolk/lokomotive/pkg/components/util" + "github.com/kinvolk/lokomotive/pkg/config" +) + +type ComponentDeleteOptions struct { + Confirm bool + DeleteNamespace bool + KubeconfigPath string + ConfigPath string + ValuesPath string +} + +// ComponentApply implements 'lokoctl component delete' separated from CLI +// dependencies. +func ComponentDelete(contextLogger *log.Entry, componentsList []string, options ComponentDeleteOptions) error { + lokoConfig, diags := config.LoadConfig(options.ConfigPath, options.ValuesPath) + if diags.HasErrors() { + return diags + } + + componentsToDelete := selectComponentNames(componentsList, *lokoConfig.RootConfig) + + componentObjects, err := componentNamesToObjects(componentsToDelete) + if err != nil { + return fmt.Errorf("getting component objects: %v", err) + } + + confirmationMessage := fmt.Sprintf( + "The following components will be deleted:\n\t%s\n\nAre you sure you want to proceed?", + strings.Join(componentsToDelete, "\n\t"), + ) + + if !options.Confirm && !askForConfirmation(confirmationMessage) { + contextLogger.Info("Components deletion cancelled.") + + return nil + } + + kg := kubeconfigGetter{ + platformRequired: false, + path: options.KubeconfigPath, + } + + kubeconfig, err := kg.getKubeconfig(contextLogger, lokoConfig) + if err != nil { + contextLogger.Debugf("Error in finding kubeconfig file: %s", err) + + return fmt.Errorf("suitable kubeconfig file not found. Did you run 'lokoctl cluster apply' ?") + } + + if err := deleteComponents(kubeconfig, componentObjects, options.DeleteNamespace); err != nil { + return fmt.Errorf("deleting components: %w", err) + } + + return nil +} + +// selectComponentNames returns list of components to operate on. If explicit list is empty, +// it returns components defined in the configuration. +func selectComponentNames(list []string, lokomotiveConfig config.RootConfig) []string { + if len(list) != 0 { + return list + } + + for _, component := range lokomotiveConfig.Components { + list = append(list, component.Name) + } + + return list +} + +// componentNamesToObjects converts list of component names to list of component objects. +// If some component does not exist, error is returned. +func componentNamesToObjects(componentNames []string) ([]components.Component, error) { + c := []components.Component{} + + for _, componentName := range componentNames { + component, err := components.Get(componentName) + if err != nil { + return nil, fmt.Errorf("getting component %q: %w", componentName, err) + } + + c = append(c, component) + } + + return c, nil +} + +func deleteComponents(kubeconfig []byte, componentObjects []components.Component, deleteNamespace bool) error { + for _, compObj := range componentObjects { + fmt.Printf("Deleting component '%s'...\n", compObj.Metadata().Name) + + if err := util.UninstallComponent(compObj, kubeconfig, deleteNamespace); err != nil { + return fmt.Errorf("uninstalling component %q: %w", compObj.Metadata().Name, err) + } + + fmt.Printf("Successfully deleted component %q!\n", compObj.Metadata().Name) + } + + // Add a line to distinguish between info logs and errors, if any. + fmt.Println() + + return nil +} diff --git a/cli/cmd/cluster/component-render-manifest.go b/cli/cmd/cluster/component-render-manifest.go new file mode 100644 index 000000000..9e2ca6d4e --- /dev/null +++ b/cli/cmd/cluster/component-render-manifest.go @@ -0,0 +1,81 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + + log "github.com/sirupsen/logrus" + + "github.com/kinvolk/lokomotive/pkg/components" + "github.com/kinvolk/lokomotive/pkg/config" +) + +type ComponentRenderManifestOptions struct { + ConfigPath string + ValuesPath string +} + +func ComponentRenderManifest(contextLogger *log.Entry, componentsList []string, options ComponentRenderManifestOptions) error { + lokoConfig, diags := config.LoadConfig(options.ConfigPath, options.ValuesPath) + if diags.HasErrors() { + for _, diagnostic := range diags { + contextLogger.Error(diagnostic.Error()) + } + + return diags + } + + componentsToRender := selectComponentNames(componentsList, *lokoConfig.RootConfig) + + if err := renderComponentManifests(lokoConfig, componentsToRender); err != nil { + return fmt.Errorf("rendering component manifests: %w", err) + } + + return nil +} + +func renderComponentManifests(lokoConfig *config.Config, componentNames []string) error { + for _, componentName := range componentNames { + contextLogger := log.WithFields(log.Fields{ + "component": componentName, + }) + + component, err := components.Get(componentName) + if err != nil { + return fmt.Errorf("getting component %q: %w", componentName, err) + } + + componentConfigBody := lokoConfig.LoadComponentConfigBody(componentName) + + if diags := component.LoadConfig(componentConfigBody, lokoConfig.EvalContext); diags.HasErrors() { + for _, diagnostic := range diags { + contextLogger.Error(diagnostic.Error()) + } + return diags + } + + manifests, err := component.RenderManifests() + if err != nil { + return fmt.Errorf("rendering manifest of component %q: %w", componentName, err) + } + + fmt.Printf("# manifests for component %s\n", componentName) + for filename, manifest := range manifests { + fmt.Printf("\n---\n# %s\n%s", filename, manifest) + } + } + return nil +} diff --git a/cli/cmd/cluster/destroy.go b/cli/cmd/cluster/destroy.go new file mode 100644 index 000000000..57b8bbcd7 --- /dev/null +++ b/cli/cmd/cluster/destroy.go @@ -0,0 +1,70 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + + log "github.com/sirupsen/logrus" +) + +type DestroyOptions struct { + Confirm bool + Verbose bool + ConfigPath string + ValuesPath string +} + +func Destroy(contextLogger *log.Entry, options DestroyOptions) error { + cc := clusterConfig{ + verbose: options.Verbose, + configPath: options.ConfigPath, + valuesPath: options.ValuesPath, + } + + c, err := cc.initialize(contextLogger) + if err != nil { + return fmt.Errorf("initializing: %w", err) + } + + exists, err := clusterExists(c.terraformExecutor) + if err != nil { + return fmt.Errorf("checking if cluster exists: %w", err) + } + + if !exists { + contextLogger.Println("Cluster already destroyed, nothing to do") + + return nil + } + + if !options.Confirm { + confirmation := askForConfirmation("WARNING: This action cannot be undone. Do you really want to destroy the cluster?") + if !confirmation { + contextLogger.Println("Cluster destroy canceled") + + return nil + } + } + + if err := c.platform.Destroy(&c.terraformExecutor); err != nil { + return fmt.Errorf("destroying cluster: %v", err) + } + + contextLogger.Println("Cluster destroyed successfully") + contextLogger.Println("You can safely remove the assets directory now") + + return nil +} diff --git a/cli/cmd/cluster/health.go b/cli/cmd/cluster/health.go new file mode 100644 index 000000000..b6a46648e --- /dev/null +++ b/cli/cmd/cluster/health.go @@ -0,0 +1,108 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + "os" + "text/tabwriter" + + log "github.com/sirupsen/logrus" + + "github.com/kinvolk/lokomotive/pkg/config" + "github.com/kinvolk/lokomotive/pkg/k8sutil" + "github.com/kinvolk/lokomotive/pkg/lokomotive" +) + +type HealthOptions struct { + ConfigPath string + ValuesPath string +} + +//nolint:funlen +func Health(contextLogger *log.Entry, options HealthOptions) error { + lokoConfig, diags := config.LoadConfig(options.ConfigPath, options.ValuesPath) + if diags.HasErrors() { + for _, diagnostic := range diags { + contextLogger.Error(diagnostic.Error()) + } + + return diags + } + + kg := kubeconfigGetter{ + platformRequired: true, + } + + kubeconfig, err := kg.getKubeconfig(contextLogger, lokoConfig) + if err != nil { + contextLogger.Debugf("Error in finding kubeconfig file: %s", err) + + return fmt.Errorf("suitable kubeconfig file not found. Did you run 'lokoctl cluster apply' ?") + } + + cs, err := k8sutil.NewClientset(kubeconfig) + if err != nil { + return fmt.Errorf("creating Kubernetes client: %w", err) + } + + // We can skip error checking here, as getKubeconfig() already checks it. + p, _ := getConfiguredPlatform(lokoConfig, true) + + cluster := lokomotive.NewCluster(cs, p.Meta().ExpectedNodes) + + ns, err := cluster.GetNodeStatus() + if err != nil { + return fmt.Errorf("getting node status: %w", err) + } + + ns.PrettyPrint() + + if !ns.Ready() { + return fmt.Errorf("cluster is not completely ready") + } + + components, err := cluster.Health() + if err != nil { + return fmt.Errorf("getting Lokomotive cluster health: %w", err) + } + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0) + + // Print the header. + fmt.Fprintln(w, "Name\tStatus\tMessage\tError\t") + + // An empty line between header and the body. + fmt.Fprintln(w, "\t\t\t\t") + + for _, component := range components { + + // The client-go library defines only one `ComponenetConditionType` at the moment, + // which is `ComponentHealthy`. However, iterating over the list keeps this from + // breaking in case client-go adds another `ComponentConditionType`. + for _, condition := range component.Conditions { + line := fmt.Sprintf( + "%s\t%s\t%s\t%s\t", + component.Name, condition.Status, condition.Message, condition.Error, + ) + + fmt.Fprintln(w, line) + } + + w.Flush() + } + + return nil +} diff --git a/cli/cmd/utils.go b/cli/cmd/cluster/utils.go similarity index 99% rename from cli/cmd/utils.go rename to cli/cmd/cluster/utils.go index 2a3d83912..f17181fc8 100644 --- a/cli/cmd/utils.go +++ b/cli/cmd/cluster/utils.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package cmd +package cluster import ( "fmt" diff --git a/cli/cmd/utils_internal_test.go b/cli/cmd/cluster/utils_internal_test.go similarity index 99% rename from cli/cmd/utils_internal_test.go rename to cli/cmd/cluster/utils_internal_test.go index 694e52959..4b31203fd 100644 --- a/cli/cmd/utils_internal_test.go +++ b/cli/cmd/cluster/utils_internal_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package cmd +package cluster import ( "fmt" diff --git a/cli/cmd/component-apply.go b/cli/cmd/component-apply.go index 1336c384d..fb3260be2 100644 --- a/cli/cmd/component-apply.go +++ b/cli/cmd/component-apply.go @@ -15,15 +15,12 @@ package cmd import ( - "fmt" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/kinvolk/lokomotive/cli/cmd/cluster" "github.com/kinvolk/lokomotive/pkg/components" - "github.com/kinvolk/lokomotive/pkg/components/util" - "github.com/kinvolk/lokomotive/pkg/config" ) var componentApplyCmd = &cobra.Command{ @@ -62,75 +59,13 @@ func runApply(cmd *cobra.Command, args []string) { log.SetLevel(log.DebugLevel) } - options := componentApplyOptions{ - kubeconfigPath: kubeconfigFlag, - configPath: viper.GetString("lokocfg"), - valuesPath: viper.GetString("lokocfg-vars"), + options := cluster.ComponentApplyOptions{ + KubeconfigPath: kubeconfigFlag, + ConfigPath: viper.GetString("lokocfg"), + ValuesPath: viper.GetString("lokocfg-vars"), } - if err := componentApply(contextLogger, args, options); err != nil { + if err := cluster.ComponentApply(contextLogger, args, options); err != nil { contextLogger.Fatalf("Applying components failed: %v", err) } } - -type componentApplyOptions struct { - kubeconfigPath string - configPath string - valuesPath string -} - -// componentApply implements 'lokoctl component apply' separated from CLI -// dependencies. -func componentApply(contextLogger *log.Entry, componentsList []string, options componentApplyOptions) error { - lokoConfig, diags := config.LoadConfig(options.configPath, options.valuesPath) - if diags.HasErrors() { - return diags - } - - componentObjects, err := componentNamesToObjects(selectComponentNames(componentsList, *lokoConfig.RootConfig)) - if err != nil { - return fmt.Errorf("getting component objects: %w", err) - } - - kg := kubeconfigGetter{ - platformRequired: false, - path: options.kubeconfigPath, - } - - kubeconfig, err := kg.getKubeconfig(contextLogger, lokoConfig) - if err != nil { - contextLogger.Debugf("Error in finding kubeconfig file: %s", err) - - return fmt.Errorf("suitable kubeconfig file not found. Did you run 'lokoctl cluster apply' ?") - } - - if err := applyComponents(lokoConfig, kubeconfig, componentObjects); err != nil { - return fmt.Errorf("applying components: %w", err) - } - - return nil -} - -// applyComponents reads the configuration of given components and applies them to the cluster pointer -// by given kubeconfig file content. -func applyComponents(lokoConfig *config.Config, kubeconfig []byte, componentObjects []components.Component) error { - for _, component := range componentObjects { - componentName := component.Metadata().Name - fmt.Printf("Applying component '%s'...\n", componentName) - - componentConfigBody := lokoConfig.LoadComponentConfigBody(componentName) - - if diags := component.LoadConfig(componentConfigBody, lokoConfig.EvalContext); diags.HasErrors() { - fmt.Printf("%v\n", diags) - return diags - } - - if err := util.InstallComponent(component, kubeconfig); err != nil { - return fmt.Errorf("installing component %q: %w", componentName, err) - } - - fmt.Printf("Successfully applied component '%s' configuration!\n", componentName) - } - - return nil -} diff --git a/cli/cmd/component-delete.go b/cli/cmd/component-delete.go index d3135eee6..7d5c256ea 100644 --- a/cli/cmd/component-delete.go +++ b/cli/cmd/component-delete.go @@ -15,16 +15,12 @@ package cmd import ( - "fmt" - "strings" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/kinvolk/lokomotive/cli/cmd/cluster" "github.com/kinvolk/lokomotive/pkg/components" - "github.com/kinvolk/lokomotive/pkg/components/util" - "github.com/kinvolk/lokomotive/pkg/config" ) var componentDeleteCmd = &cobra.Command{ @@ -64,116 +60,15 @@ func runDelete(cmd *cobra.Command, args []string) { log.SetLevel(log.DebugLevel) } - options := componentDeleteOptions{ - confirm: confirm, - deleteNamespace: deleteNamespace, - kubeconfigPath: kubeconfigFlag, - configPath: viper.GetString("lokocfg"), - valuesPath: viper.GetString("lokocfg-vars"), + options := cluster.ComponentDeleteOptions{ + Confirm: confirm, + DeleteNamespace: deleteNamespace, + KubeconfigPath: kubeconfigFlag, + ConfigPath: viper.GetString("lokocfg"), + ValuesPath: viper.GetString("lokocfg-vars"), } - if err := componentDelete(contextLogger, args, options); err != nil { + if err := cluster.ComponentDelete(contextLogger, args, options); err != nil { contextLogger.Fatalf("Deleting components failed: %v", err) } } - -type componentDeleteOptions struct { - confirm bool - deleteNamespace bool - kubeconfigPath string - configPath string - valuesPath string -} - -// componentDelete implements 'lokoctl component delete' separated from CLI -// dependencies. -func componentDelete(contextLogger *log.Entry, componentsList []string, options componentDeleteOptions) error { - lokoConfig, diags := config.LoadConfig(options.configPath, options.valuesPath) - if diags.HasErrors() { - return diags - } - - componentsToDelete := selectComponentNames(componentsList, *lokoConfig.RootConfig) - - componentObjects, err := componentNamesToObjects(componentsToDelete) - if err != nil { - return fmt.Errorf("getting component objects: %v", err) - } - - confirmationMessage := fmt.Sprintf( - "The following components will be deleted:\n\t%s\n\nAre you sure you want to proceed?", - strings.Join(componentsToDelete, "\n\t"), - ) - - if !options.confirm && !askForConfirmation(confirmationMessage) { - contextLogger.Info("Components deletion cancelled.") - - return nil - } - - kg := kubeconfigGetter{ - platformRequired: false, - path: options.kubeconfigPath, - } - - kubeconfig, err := kg.getKubeconfig(contextLogger, lokoConfig) - if err != nil { - contextLogger.Debugf("Error in finding kubeconfig file: %s", err) - - return fmt.Errorf("suitable kubeconfig file not found. Did you run 'lokoctl cluster apply' ?") - } - - if err := deleteComponents(kubeconfig, componentObjects, options.deleteNamespace); err != nil { - return fmt.Errorf("deleting components: %w", err) - } - - return nil -} - -// selectComponentNames returns list of components to operate on. If explicit list is empty, -// it returns components defined in the configuration. -func selectComponentNames(list []string, lokomotiveConfig config.RootConfig) []string { - if len(list) != 0 { - return list - } - - for _, component := range lokomotiveConfig.Components { - list = append(list, component.Name) - } - - return list -} - -// componentNamesToObjects converts list of component names to list of component objects. -// If some component does not exist, error is returned. -func componentNamesToObjects(componentNames []string) ([]components.Component, error) { - c := []components.Component{} - - for _, componentName := range componentNames { - component, err := components.Get(componentName) - if err != nil { - return nil, fmt.Errorf("getting component %q: %w", componentName, err) - } - - c = append(c, component) - } - - return c, nil -} - -func deleteComponents(kubeconfig []byte, componentObjects []components.Component, deleteNamespace bool) error { - for _, compObj := range componentObjects { - fmt.Printf("Deleting component '%s'...\n", compObj.Metadata().Name) - - if err := util.UninstallComponent(compObj, kubeconfig, deleteNamespace); err != nil { - return fmt.Errorf("uninstalling component %q: %w", compObj.Metadata().Name, err) - } - - fmt.Printf("Successfully deleted component %q!\n", compObj.Metadata().Name) - } - - // Add a line to distinguish between info logs and errors, if any. - fmt.Println() - - return nil -} diff --git a/cli/cmd/component-render-manifest.go b/cli/cmd/component-render-manifest.go index 321b9d18d..cb5d2581f 100644 --- a/cli/cmd/component-render-manifest.go +++ b/cli/cmd/component-render-manifest.go @@ -15,14 +15,11 @@ package cmd import ( - "fmt" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" - "github.com/kinvolk/lokomotive/pkg/components" - "github.com/kinvolk/lokomotive/pkg/config" + "github.com/kinvolk/lokomotive/cli/cmd/cluster" ) var componentRenderCmd = &cobra.Command{ @@ -41,69 +38,12 @@ func runComponentRender(cmd *cobra.Command, args []string) { "args": args, }) - options := componentRenderManifestOptions{ - configPath: viper.GetString("lokocfg"), - valuesPath: viper.GetString("lokocfg-vars"), + options := cluster.ComponentRenderManifestOptions{ + ConfigPath: viper.GetString("lokocfg"), + ValuesPath: viper.GetString("lokocfg-vars"), } - if err := componentRenderManifest(contextLogger, args, options); err != nil { + if err := cluster.ComponentRenderManifest(contextLogger, args, options); err != nil { contextLogger.Fatalf("Rendering component manifests failed: %v", err) } } - -type componentRenderManifestOptions struct { - configPath string - valuesPath string -} - -func componentRenderManifest(contextLogger *log.Entry, componentsList []string, options componentRenderManifestOptions) error { - lokoConfig, diags := config.LoadConfig(options.configPath, options.valuesPath) - if diags.HasErrors() { - for _, diagnostic := range diags { - contextLogger.Error(diagnostic.Error()) - } - - return diags - } - - componentsToRender := selectComponentNames(componentsList, *lokoConfig.RootConfig) - - if err := renderComponentManifests(lokoConfig, componentsToRender); err != nil { - return fmt.Errorf("rendering component manifests: %w", err) - } - - return nil -} - -func renderComponentManifests(lokoConfig *config.Config, componentNames []string) error { - for _, componentName := range componentNames { - contextLogger := log.WithFields(log.Fields{ - "component": componentName, - }) - - component, err := components.Get(componentName) - if err != nil { - return fmt.Errorf("getting component %q: %w", componentName, err) - } - - componentConfigBody := lokoConfig.LoadComponentConfigBody(componentName) - - if diags := component.LoadConfig(componentConfigBody, lokoConfig.EvalContext); diags.HasErrors() { - for _, diagnostic := range diags { - contextLogger.Error(diagnostic.Error()) - } - return diags - } - - manifests, err := component.RenderManifests() - if err != nil { - return fmt.Errorf("rendering manifest of component %q: %w", componentName, err) - } - - fmt.Printf("# manifests for component %s\n", componentName) - for filename, manifest := range manifests { - fmt.Printf("\n---\n# %s\n%s", filename, manifest) - } - } - return nil -} diff --git a/cli/cmd/health.go b/cli/cmd/health.go index 9d0e53586..3b159c87a 100644 --- a/cli/cmd/health.go +++ b/cli/cmd/health.go @@ -15,17 +15,11 @@ package cmd import ( - "fmt" - "os" - "text/tabwriter" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" - "github.com/kinvolk/lokomotive/pkg/config" - "github.com/kinvolk/lokomotive/pkg/k8sutil" - "github.com/kinvolk/lokomotive/pkg/lokomotive" + "github.com/kinvolk/lokomotive/cli/cmd/cluster" ) var healthCmd = &cobra.Command{ @@ -51,93 +45,12 @@ func runHealth(cmd *cobra.Command, args []string) { log.SetLevel(log.DebugLevel) } - options := healthOptions{ - configPath: viper.GetString("lokocfg"), - valuesPath: viper.GetString("lokocfg-vars"), + options := cluster.HealthOptions{ + ConfigPath: viper.GetString("lokocfg"), + ValuesPath: viper.GetString("lokocfg-vars"), } - if err := health(contextLogger, options); err != nil { + if err := cluster.Health(contextLogger, options); err != nil { contextLogger.Fatalf("Checking cluster health failed: %v", err) } } - -type healthOptions struct { - configPath string - valuesPath string -} - -//nolint:funlen -func health(contextLogger *log.Entry, options healthOptions) error { - lokoConfig, diags := config.LoadConfig(options.configPath, options.valuesPath) - if diags.HasErrors() { - for _, diagnostic := range diags { - contextLogger.Error(diagnostic.Error()) - } - - return diags - } - - kg := kubeconfigGetter{ - platformRequired: true, - } - - kubeconfig, err := kg.getKubeconfig(contextLogger, lokoConfig) - if err != nil { - contextLogger.Debugf("Error in finding kubeconfig file: %s", err) - - return fmt.Errorf("suitable kubeconfig file not found. Did you run 'lokoctl cluster apply' ?") - } - - cs, err := k8sutil.NewClientset(kubeconfig) - if err != nil { - return fmt.Errorf("creating Kubernetes client: %w", err) - } - - // We can skip error checking here, as getKubeconfig() already checks it. - p, _ := getConfiguredPlatform(lokoConfig, true) - - cluster := lokomotive.NewCluster(cs, p.Meta().ExpectedNodes) - - ns, err := cluster.GetNodeStatus() - if err != nil { - return fmt.Errorf("getting node status: %w", err) - } - - ns.PrettyPrint() - - if !ns.Ready() { - return fmt.Errorf("cluster is not completely ready") - } - - components, err := cluster.Health() - if err != nil { - return fmt.Errorf("getting Lokomotive cluster health: %w", err) - } - - w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0) - - // Print the header. - fmt.Fprintln(w, "Name\tStatus\tMessage\tError\t") - - // An empty line between header and the body. - fmt.Fprintln(w, "\t\t\t\t") - - for _, component := range components { - - // The client-go library defines only one `ComponenetConditionType` at the moment, - // which is `ComponentHealthy`. However, iterating over the list keeps this from - // breaking in case client-go adds another `ComponentConditionType`. - for _, condition := range component.Conditions { - line := fmt.Sprintf( - "%s\t%s\t%s\t%s\t", - component.Name, condition.Status, condition.Message, condition.Error, - ) - - fmt.Fprintln(w, line) - } - - w.Flush() - } - - return nil -}