From bdedd8fd23f3641710df9d78b35a4be2cb71f367 Mon Sep 17 00:00:00 2001 From: kakaZhou719 <83740799+kakaZhou719@users.noreply.github.com> Date: Mon, 13 Jun 2022 22:07:53 +0800 Subject: [PATCH] bugfix: add auth flag for sealer scale (#1489) --- apply/apply.go | 4 - apply/processor/gen.go | 4 +- apply/run.go | 30 +++++--- apply/scale.go | 128 ++++++++++++++++++++++--------- cmd/sealer/cmd/delete.go | 1 + cmd/sealer/cmd/join.go | 9 +++ cmd/sealer/cmd/upgrade.go | 2 +- pkg/clusterfile/util.go | 36 ++++++++- pkg/infra/aliyun/ali_provider.go | 27 ++++++- utils/yaml/yaml.go | 28 ------- 10 files changed, 180 insertions(+), 89 deletions(-) diff --git a/apply/apply.go b/apply/apply.go index 89d9a4ad015..7ea16168bb4 100644 --- a/apply/apply.go +++ b/apply/apply.go @@ -86,10 +86,6 @@ func NewApplierFromFile(path string) (applydriver.Interface, error) { }, nil } -func NewApplier(cluster *v2.Cluster) (applydriver.Interface, error) { - return NewDefaultApplier(cluster) -} - func NewDefaultApplier(cluster *v2.Cluster) (applydriver.Interface, error) { if cluster.Name == "" { return nil, fmt.Errorf("cluster name cannot be empty") diff --git a/apply/processor/gen.go b/apply/processor/gen.go index 610b72d3be2..3cf0bd04d11 100644 --- a/apply/processor/gen.go +++ b/apply/processor/gen.go @@ -20,7 +20,7 @@ import ( "fmt" "strconv" - "github.com/sealerio/sealer/utils/yaml" + "github.com/sealerio/sealer/pkg/clusterfile" "github.com/sealerio/sealer/utils/net" @@ -74,7 +74,7 @@ func NewGenerateProcessor() (Processor, error) { func (g *GenerateProcessor) init(cluster *v2.Cluster) error { fileName := fmt.Sprintf("%s/.sealer/%s/Clusterfile", common.GetHomeDir(), cluster.Name) - if err := yaml.MarshalToFile(fileName, cluster); err != nil { + if err := clusterfile.SaveToDisk(cluster, fileName); err != nil { return err } return nil diff --git a/apply/run.go b/apply/run.go index 5f6e536bf21..455ea20671c 100644 --- a/apply/run.go +++ b/apply/run.go @@ -49,29 +49,37 @@ func (c *ClusterArgs) SetClusterArgs() error { c.cluster.Kind = common.Cluster c.cluster.Name = c.runArgs.ClusterName c.cluster.Spec.Image = c.imageName - c.cluster.Spec.SSH.User = c.runArgs.User - c.cluster.Spec.SSH.Pk = c.runArgs.Pk - c.cluster.Spec.SSH.PkPasswd = c.runArgs.PkPassword - c.cluster.Spec.SSH.Port = strconv.Itoa(int(c.runArgs.Port)) c.cluster.Spec.Env = append(c.cluster.Spec.Env, c.runArgs.CustomEnv...) c.cluster.Spec.CMDArgs = append(c.cluster.Spec.CMDArgs, c.runArgs.CMDArgs...) - if c.runArgs.Password != "" { - c.cluster.Spec.SSH.Passwd = c.runArgs.Password - } + err := PreProcessIPList(c.runArgs) if err != nil { return err } + if net.IsIPList(c.runArgs.Masters) && (net.IsIPList(c.runArgs.Nodes) || c.runArgs.Nodes == "") { + // add common ssh config. + c.cluster.Spec.SSH = v1.SSH{ + User: c.runArgs.User, + Passwd: c.runArgs.Password, + Pk: c.runArgs.Pk, + PkPasswd: c.runArgs.PkPassword, + Port: strconv.Itoa(int(c.runArgs.Port)), + } + masters := strings.Split(c.runArgs.Masters, ",") nodes := strings.Split(c.runArgs.Nodes, ",") c.hosts = []v2.Host{} + c.setHostWithIpsPort(masters, common.MASTER) - if len(nodes) != 0 { + // If s does not contain sep and sep is not empty, Split returns a + // slice of length 1 whose only element is s. + if len(nodes) > 1 { c.setHostWithIpsPort(nodes, common.NODE) } c.cluster.Spec.Hosts = c.hosts } else { + // if user execute sealer run without password and infra info,choose local host ip as master0 ip. ip, err := net.GetLocalDefaultIP() if err != nil { return err @@ -92,14 +100,14 @@ func (c *ClusterArgs) setHostWithIpsPort(ips []string, role string) { for i := range ips { ip, port := net.GetHostIPAndPortOrDefault(ips[i], strconv.Itoa(int(c.runArgs.Port))) if _, ok := hostMap[port]; !ok { - hostMap[port] = &v2.Host{IPS: []string{ip}, Roles: []string{role}, SSH: v1.SSH{Port: port}} + hostMap[port] = &v2.Host{IPS: []string{ip}, Roles: []string{role}} continue } hostMap[port].IPS = append(hostMap[port].IPS, ip) } _, master0Port := net.GetHostIPAndPortOrDefault(ips[0], strconv.Itoa(int(c.runArgs.Port))) for port, host := range hostMap { - host.IPS = removeIPListDuplicatesAndEmpty(host.IPS) + host.IPS = removeDuplicate(host.IPS) if port == master0Port && role == common.MASTER { c.hosts = append([]v2.Host{*host}, c.hosts...) continue @@ -117,5 +125,5 @@ func NewApplierFromArgs(imageName string, runArgs *Args) (applydriver.Interface, if err := c.SetClusterArgs(); err != nil { return nil, err } - return NewApplier(c.cluster) + return NewDefaultApplier(c.cluster) } diff --git a/apply/scale.go b/apply/scale.go index f2dbb2963ee..dd97d6514aa 100644 --- a/apply/scale.go +++ b/apply/scale.go @@ -16,8 +16,13 @@ package apply import ( "fmt" + "strconv" "strings" + "github.com/sealerio/sealer/utils/hash" + + v1 "github.com/sealerio/sealer/types/api/v1" + "github.com/sealerio/sealer/utils/yaml" "github.com/sealerio/sealer/utils/net" @@ -49,68 +54,109 @@ func NewScaleApplierFromArgs(clusterfile string, scaleArgs *Args, flag string) ( return nil, err } - /* if err := utils.MarshalYamlToFile(clusterfile, cluster); err != nil { - return nil, err - }*/ - applier, err := NewApplier(cluster) + applier, err := NewDefaultApplier(cluster) if err != nil { return nil, err } return applier, nil } -func Join(cluster *v2.Cluster, scalingArgs *Args) error { - /* switch cluster.Spec.Provider { - case common.BAREMETAL: - return joinBaremetalNodes(cluster, scalingArgs) - case common.AliCloud: - return joinInfraNodes(cluster, scalingArgs) - case common.CONTAINER: - return joinInfraNodes(cluster, scalingArgs) - default: - return fmt.Errorf(" clusterfile provider type is not found !") - }*/ - return joinBaremetalNodes(cluster, scalingArgs) +func Join(cluster *v2.Cluster, scaleArgs *Args) error { + return joinBaremetalNodes(cluster, scaleArgs) } func joinBaremetalNodes(cluster *v2.Cluster, scaleArgs *Args) error { - if err := PreProcessIPList(scaleArgs); err != nil { + var err error + // merge custom Env to the existed cluster + cluster.Spec.Env = append(cluster.Spec.Env, scaleArgs.CustomEnv...) + + if err = PreProcessIPList(scaleArgs); err != nil { return err } + if (!net.IsIPList(scaleArgs.Nodes) && scaleArgs.Nodes != "") || (!net.IsIPList(scaleArgs.Masters) && scaleArgs.Masters != "") { return fmt.Errorf(" Parameter error: The current mode should submit iplist!") } - if scaleArgs.Masters != "" && net.IsIPList(scaleArgs.Masters) { - for i := 0; i < len(cluster.Spec.Hosts); i++ { - role := cluster.Spec.Hosts[i].Roles - if !strUtils.NotIn(common.MASTER, role) { - cluster.Spec.Hosts[i].IPS = removeIPListDuplicatesAndEmpty(append(cluster.Spec.Hosts[i].IPS, strings.Split(scaleArgs.Masters, ",")...)) - break - } - if i == len(cluster.Spec.Hosts)-1 { - return fmt.Errorf("not found `master` role from file") - } + // if scaleArgs`s ssh auth credential is different from local cluster,will add it to each host. + // if not use local cluster ssh auth credential. + var changedSSH *v1.SSH + + passwd := cluster.Spec.SSH.Passwd + if cluster.Spec.SSH.Encrypted { + passwd, err = hash.AesDecrypt([]byte(cluster.Spec.SSH.Passwd)) + if err != nil { + return err } } - //add join node - if scaleArgs.Nodes != "" && net.IsIPList(scaleArgs.Nodes) { - for i := 0; i < len(cluster.Spec.Hosts); i++ { - role := cluster.Spec.Hosts[i].Roles - if !strUtils.NotIn(common.NODE, role) { - cluster.Spec.Hosts[i].IPS = removeIPListDuplicatesAndEmpty(append(cluster.Spec.Hosts[i].IPS, strings.Split(scaleArgs.Nodes, ",")...)) - break + + if scaleArgs.Password != "" && scaleArgs.Password != passwd { + // Encrypt password here to avoid merge failed. + passwd, err = hash.AesEncrypt([]byte(scaleArgs.Password)) + if err != nil { + return err + } + changedSSH = &v1.SSH{ + Encrypted: true, + User: scaleArgs.User, + Passwd: passwd, + Pk: scaleArgs.Pk, + PkPasswd: scaleArgs.PkPassword, + Port: strconv.Itoa(int(scaleArgs.Port)), + } + } + + //add joined masters + if scaleArgs.Masters != "" { + masterIPs := cluster.GetMasterIPList() + addedMasterIP := removeDuplicate(strings.Split(scaleArgs.Masters, ",")) + + for _, ip := range addedMasterIP { + // if ip already taken by master will return join duplicated ip error + if !strUtils.NotIn(ip, masterIPs) { + return fmt.Errorf("failed to scale master for duplicated ip: %s", ip) } - if i == len(cluster.Spec.Hosts)-1 { - hosts := v2.Host{IPS: removeIPListDuplicatesAndEmpty(strings.Split(scaleArgs.Nodes, ",")), Roles: []string{common.NODE}} - cluster.Spec.Hosts = append(cluster.Spec.Hosts, hosts) + } + + host := v2.Host{ + IPS: addedMasterIP, + Roles: []string{common.MASTER}, + } + + if changedSSH != nil { + host.SSH = *changedSSH + } + + cluster.Spec.Hosts = append(cluster.Spec.Hosts, host) + } + + //add joined nodes + if scaleArgs.Nodes != "" { + nodeIPs := cluster.GetNodeIPList() + addedNodeIP := removeDuplicate(strings.Split(scaleArgs.Nodes, ",")) + + for _, ip := range addedNodeIP { + // if ip already taken by node will return join duplicated ip error + if !strUtils.NotIn(ip, nodeIPs) { + return fmt.Errorf("failed to scale node for duplicated ip: %s", ip) } } + + host := v2.Host{ + IPS: addedNodeIP, + Roles: []string{common.NODE}, + } + + if changedSSH != nil { + host.SSH = *changedSSH + } + + cluster.Spec.Hosts = append(cluster.Spec.Hosts, host) } return nil } -func removeIPListDuplicatesAndEmpty(ipList []string) []string { +func removeDuplicate(ipList []string) []string { return strUtils.RemoveDuplicate(strUtils.NewComparator(ipList, []string{""}).GetSrcSubtraction()) } @@ -119,16 +165,22 @@ func Delete(cluster *v2.Cluster, scaleArgs *Args) error { } func deleteBaremetalNodes(cluster *v2.Cluster, scaleArgs *Args) error { + // adding custom Env params for delete option here to support executing users clean scripts via env. + cluster.Spec.Env = append(cluster.Spec.Env, scaleArgs.CustomEnv...) + if err := PreProcessIPList(scaleArgs); err != nil { return err } + if (!net.IsIPList(scaleArgs.Nodes) && scaleArgs.Nodes != "") || (!net.IsIPList(scaleArgs.Masters) && scaleArgs.Masters != "") { return fmt.Errorf(" Parameter error: The current mode should submit iplist!") } + //master0 machine cannot be deleted if !strUtils.NotIn(cluster.GetMaster0IP(), strings.Split(scaleArgs.Masters, ",")) { return fmt.Errorf("master0 machine cannot be deleted") } + if scaleArgs.Masters != "" && net.IsIPList(scaleArgs.Masters) { for i := range cluster.Spec.Hosts { if !strUtils.NotIn(common.MASTER, cluster.Spec.Hosts[i].Roles) { diff --git a/cmd/sealer/cmd/delete.go b/cmd/sealer/cmd/delete.go index 2ef313b470a..178dd3497b1 100644 --- a/cmd/sealer/cmd/delete.go +++ b/cmd/sealer/cmd/delete.go @@ -96,6 +96,7 @@ func init() { deleteCmd.Flags().StringVarP(&deleteArgs.Nodes, "nodes", "n", "", "reduce Count or IPList to nodes") deleteCmd.Flags().StringVarP(&deleteClusterFile, "Clusterfile", "f", "", "delete a kubernetes cluster with Clusterfile Annotations") deleteCmd.Flags().StringVarP(&deleteClusterName, "cluster", "c", "", "delete a kubernetes cluster with cluster name") + deleteCmd.Flags().StringSliceVarP(&deleteArgs.CustomEnv, "env", "e", []string{}, "set custom environment variables") deleteCmd.Flags().BoolVar(&runtime.ForceDelete, "force", false, "We also can input an --force flag to delete cluster by force") deleteCmd.Flags().BoolP("all", "a", false, "this flags is for delete nodes, if this is true, empty all node ip") } diff --git a/cmd/sealer/cmd/join.go b/cmd/sealer/cmd/join.go index f0f79213177..7e0ea9e0c94 100644 --- a/cmd/sealer/cmd/join.go +++ b/cmd/sealer/cmd/join.go @@ -15,6 +15,7 @@ package cmd import ( + "github.com/sealerio/sealer/pkg/cert" "github.com/spf13/cobra" "github.com/sealerio/sealer/apply" @@ -56,6 +57,14 @@ join default cluster: func init() { joinArgs = &apply.Args{} rootCmd.AddCommand(joinCmd) + + joinCmd.Flags().StringVarP(&joinArgs.User, "user", "u", "root", "set baremetal server username") + joinCmd.Flags().StringVarP(&joinArgs.Password, "passwd", "p", "", "set cloud provider or baremetal server password") + joinCmd.Flags().Uint16Var(&joinArgs.Port, "port", 22, "set the sshd service port number for the server (default port: 22)") + joinCmd.Flags().StringVar(&joinArgs.Pk, "pk", cert.GetUserHomeDir()+"/.ssh/id_rsa", "set baremetal server private key") + joinCmd.Flags().StringVar(&joinArgs.PkPassword, "pk-passwd", "", "set baremetal server private key password") + joinCmd.Flags().StringSliceVarP(&joinArgs.CustomEnv, "env", "e", []string{}, "set custom environment variables") + joinCmd.Flags().StringVarP(&joinArgs.Masters, "masters", "m", "", "set Count or IPList to masters") joinCmd.Flags().StringVarP(&joinArgs.Nodes, "nodes", "n", "", "set Count or IPList to nodes") joinCmd.Flags().StringVarP(&clusterName, "cluster-name", "c", "", "specify the name of cluster") diff --git a/cmd/sealer/cmd/upgrade.go b/cmd/sealer/cmd/upgrade.go index bc3102d4a30..3c1c3bbe179 100644 --- a/cmd/sealer/cmd/upgrade.go +++ b/cmd/sealer/cmd/upgrade.go @@ -53,7 +53,7 @@ var upgradeCmd = &cobra.Command{ if err != nil { return err } - applier, err := apply.NewApplier(desiredCluster) + applier, err := apply.NewDefaultApplier(desiredCluster) if err != nil { return err } diff --git a/pkg/clusterfile/util.go b/pkg/clusterfile/util.go index 9efa4be78bc..f8065071c0b 100644 --- a/pkg/clusterfile/util.go +++ b/pkg/clusterfile/util.go @@ -21,12 +21,13 @@ import ( "path/filepath" "strings" + "github.com/sealerio/sealer/utils/hash" + yamlUtils "github.com/sealerio/sealer/utils/yaml" "github.com/sealerio/sealer/common" "github.com/sealerio/sealer/pkg/cert" v2 "github.com/sealerio/sealer/types/api/v2" - k8sRuntime "k8s.io/apimachinery/pkg/runtime" ) var ErrClusterNotExist = fmt.Errorf("no cluster exist") @@ -72,13 +73,42 @@ func GetDefaultCluster() (cluster *v2.Cluster, err error) { return GetClusterFromFile(fmt.Sprintf("%s/.sealer/%s/Clusterfile", userHome, name)) } -func SaveToDisk(cluster k8sRuntime.Object, clusterName string) error { +// SaveToDisk save cluster obj to disk file with encrypted ssh credential +func SaveToDisk(cluster *v2.Cluster, clusterName string) error { fileName := common.GetClusterWorkClusterfile(clusterName) err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm) if err != nil { return fmt.Errorf("mkdir failed %s %v", fileName, err) } - cluster = cluster.DeepCopyObject() + + // if user run cluster image without password,skip to encrypt. + if !cluster.Spec.SSH.Encrypted && cluster.Spec.SSH.Passwd != "" { + passwd, err := hash.AesEncrypt([]byte(cluster.Spec.SSH.Passwd)) + if err != nil { + return err + } + cluster.Spec.SSH.Passwd = passwd + cluster.Spec.SSH.Encrypted = true + } + + var hosts []v2.Host + for _, host := range cluster.Spec.Hosts { + if len(host.IPS) == 0 { + continue + } + if !host.SSH.Encrypted && host.SSH.Passwd != "" { + passwd, err := hash.AesEncrypt([]byte(host.SSH.Passwd)) + if err != nil { + return err + } + host.SSH.Passwd = passwd + host.SSH.Encrypted = true + } + hosts = append(hosts, host) + } + + cluster.Spec.Hosts = hosts + err = yamlUtils.MarshalToFile(fileName, cluster) if err != nil { return fmt.Errorf("marshal cluster file failed %v", err) diff --git a/pkg/infra/aliyun/ali_provider.go b/pkg/infra/aliyun/ali_provider.go index 3932afbd244..74859fb90ff 100644 --- a/pkg/infra/aliyun/ali_provider.go +++ b/pkg/infra/aliyun/ali_provider.go @@ -15,14 +15,20 @@ package aliyun import ( + "fmt" + "os" + "path/filepath" "strings" "time" + "github.com/sealerio/sealer/common" + "github.com/sealerio/sealer/utils/hash" + "github.com/sealerio/sealer/utils/yaml" + "github.com/aliyun/alibaba-cloud-sdk-go/services/ecs" "github.com/aliyun/alibaba-cloud-sdk-go/services/vpc" "github.com/sirupsen/logrus" - "github.com/sealerio/sealer/pkg/clusterfile" v1 "github.com/sealerio/sealer/types/api/v1" ) @@ -112,11 +118,28 @@ func (a *AliProvider) ReconcileResource(resourceKey string, action Alifunc) erro return err } logrus.Infof("create resource success %s: %s", resourceKey, a.Cluster.Annotations[resourceKey]) - return clusterfile.SaveToDisk(a.Cluster, a.Cluster.Name) + return a.SaveToDisk() } return nil } +func (a *AliProvider) SaveToDisk() error { + fileName := common.GetClusterWorkClusterfile(a.Cluster.Name) + err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm) + if err != nil { + return fmt.Errorf("mkdir failed %s %v", fileName, err) + } + + passwd, err := hash.AesEncrypt([]byte(a.Cluster.Spec.SSH.Passwd)) + if err != nil { + return err + } + a.Cluster.Spec.SSH.Passwd = passwd + a.Cluster.Spec.SSH.Encrypted = true + + return yaml.MarshalToFile(fileName, a.Cluster) +} + func (a *AliProvider) DeleteResource(resourceKey string, action Alifunc) { if a.Cluster.Annotations[resourceKey] != "" { err := action() diff --git a/utils/yaml/yaml.go b/utils/yaml/yaml.go index e9e937f3acb..eb256979589 100644 --- a/utils/yaml/yaml.go +++ b/utils/yaml/yaml.go @@ -24,12 +24,7 @@ import ( osi "github.com/sealerio/sealer/utils/os" - "github.com/sealerio/sealer/utils/hash" - "sigs.k8s.io/yaml" - - v1 "github.com/sealerio/sealer/types/api/v1" - v2 "github.com/sealerio/sealer/types/api/v2" ) func UnmarshalFile(file string, obj interface{}) error { @@ -46,29 +41,6 @@ func UnmarshalFile(file string, obj interface{}) error { } func MarshalToFile(file string, obj interface{}) error { - switch cluster := obj.(type) { - case *v1.Cluster: - if cluster.Spec.SSH.Encrypted { - break - } - passwd, err := hash.AesEncrypt([]byte(cluster.Spec.SSH.Passwd)) - if err != nil { - return err - } - cluster.Spec.SSH.Passwd = passwd - cluster.Spec.SSH.Encrypted = true - case *v2.Cluster: - if cluster.Spec.SSH.Encrypted { - break - } - passwd, err := hash.AesEncrypt([]byte(cluster.Spec.SSH.Passwd)) - if err != nil { - return err - } - cluster.Spec.SSH.Passwd = passwd - cluster.Spec.SSH.Encrypted = true - default: - } data, err := yaml.Marshal(obj) if err != nil { return err