diff --git a/config/yurtctl-servant/Dockerfile b/config/yurtctl-servant/Dockerfile index fd1f3963cfa..1154c8492cd 100644 --- a/config/yurtctl-servant/Dockerfile +++ b/config/yurtctl-servant/Dockerfile @@ -1,4 +1 @@ -FROM alpine:3.8 - -RUN mkdir -p /var/lib/openyurt -ADD setup_edgenode /var/lib/openyurt +FROM alpine:3.8 \ No newline at end of file diff --git a/config/yurtctl-servant/setup_edgenode b/config/yurtctl-servant/setup_edgenode deleted file mode 100755 index f108e677392..00000000000 --- a/config/yurtctl-servant/setup_edgenode +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/env bash -# This script can not be executed directly, it is baked in the -# openyurt/yurtctl-servant, before exeuction, context value (i.e., __variable__) -# need to be replaced based on the environment variables set in the pod, -# and will be executed as a subprogram of the nsenter command. - -set -o errexit -set -o pipefail - -KUBELET_SVC=${KUBELET_SVC:-/etc/systemd/system/kubelet.service.d/10-kubeadm.conf} -OPENYURT_DIR=${OPENYURT_DIR:-/var/lib/openyurt} -STATIC_POD_PATH=${STATIC_POD_PATH:-/etc/kubernetes/manifests} -ACTION=$1 - -# PROVIDER can be nounset -set -o nounset - -declare -r YURTHUB_TEMPLATE=' -apiVersion: v1 -kind: Pod -metadata: - labels: - k8s-app: yurt-hub - name: yurt-hub - namespace: kube-system -spec: - volumes: - - name: hub-dir - hostPath: - path: /var/lib/yurthub - type: DirectoryOrCreate - - name: kubernetes - hostPath: - path: /etc/kubernetes - type: Directory - containers: - - name: yurt-hub - image: __yurthub_image__ - imagePullPolicy: IfNotPresent - volumeMounts: - - name: hub-dir - mountPath: /var/lib/yurthub - - name: kubernetes - mountPath: /etc/kubernetes - command: - - yurthub - - --v=2 - - --server-addr=__kubernetes_service_addr__ - - --node-name=$(NODE_NAME) - - --join-token=__join_token__ - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /v1/healthz - port: 10261 - initialDelaySeconds: 300 - periodSeconds: 5 - failureThreshold: 3 - resources: - requests: - cpu: 150m - memory: 150Mi - limits: - memory: 300Mi - securityContext: - capabilities: - add: ["NET_ADMIN", "NET_RAW"] - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - hostNetwork: true - priorityClassName: system-node-critical - priority: 2000001000 -' - -# log outputs the log message with date and program prefix -log() { - echo "$(date +"%m/%d/%Y-%T-%Z") [YURT_SERVANT] [LOG] $@" -} - -# error outputs the error message with data program prefix -error() { - echo "$(date +"%m/%d/%Y-%T-%Z") [YURT_SERVANT] [ERROR] $@" -} - -check_addr() -{ - echo $1|grep -E '^(http(s)?:\/\/)?[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+(:[0-9]{1,5})?$' > /dev/null; - if [ $? -ne 0 ] - then - log "apiserver addr $1 is error." - exit 1 - fi - - log "apiserver addr $1 is ok." - return 0 -} - -# preset creates the /var/lib/kubelet/pki/kubelet-client-current.pem if -# it does not exist -preset() { - # if $KUBELET_CLI_PEM doesn't exist, create one based on 'client-certificate-data' - # and 'client-key-data' of $KUBELET_CONF - if [ ! -f $KUBELET_SVC ]; then - log "$KUBELET_SVC don't exist." - exit 1 - fi - if [ ! -d $STATIC_POD_PATH ]; then - log "$STATIC_POD_PATH don't exist." - exit 1 - fi -} - -# setup_yurthub sets up the yurthub pod and wait for the its status to be Running -setup_yurthub() { - # 1. put yurt-hub yaml into /etc/kubernetes/manifests - log "setting up yurthub on nodes" - KUBELET_CONF=`cat $KUBELET_SVC | grep -Eo '\-\-kubeconfig=.*kubelet.conf' | awk -F '=' '{print $2}'` - apiserver_addr=`cat $KUBELET_CONF | grep "server:" | awk '{print $2}'` - check_addr $apiserver_addr - log "setting up yurthub apiserver addr ${apiserver_addr}." - yurthub_yaml=$(echo "$YURTHUB_TEMPLATE" | - sed "s|__kubernetes_service_addr__|${apiserver_addr}|") - - echo "$yurthub_yaml" > ${STATIC_POD_PATH}/yurt-hub.yaml - log "create the ${STATIC_POD_PATH}/yurt-hub.yaml" - # 2. wait yurthub pod to be ready - local retry=5 - while [ $retry -ge 0 ] - do - sleep 10 - # NOTE: context variables need to be replaced before exeuction - set +e - local hub_healthz - hub_healthz=$(netstat -nlutp | grep 10261 | wc -l) - set -e - - if [ "$hub_healthz" == "1" ]; then - log "yurt-hub-$NODE_NAME healthz is OK" - return - else - retry=$((retry-1)) - if [ $retry -ge 0 ]; then - log "yurt-hub-$NODE_NAME is not ready, will retry $retry times" - else - error "yurt-hub-$NODE_NAME failed, after retry 5 times" - exit 1 - fi - continue - fi - done -} - -# reset_kubelet changes the configuration of the kubelet service and restart it -reset_kubelet() { - # 1. create a working dir to store revised kubelet.conf - mkdir -p $OPENYURT_DIR - cp $KUBELET_CONF $OPENYURT_DIR/ - # 2. revise the copy of the kubelet.conf - cat << EOF > $OPENYURT_DIR/kubelet.conf -apiVersion: v1 -clusters: -- cluster: - server: http://127.0.0.1:10261 - name: default-cluster -contexts: -- context: - cluster: default-cluster - namespace: default - user: default-auth - name: default-context -current-context: default-context -kind: Config -preferences: {} -EOF - log "revised kubeconfig $OPENYURT_DIR/kubelet.conf is generated" - # 3. revise the kubelet.service drop-in - # 3.1 make a backup for the origin kubelet.service - cp $KUBELET_SVC ${KUBELET_SVC}.bk - # 3.2 revise the drop-in, point it to the $OPENYURT_DIR/kubelet.conf - sed -i "s/--bootstrap.*bootstrap-kubelet.conf//g; - s|--kubeconfig=.*kubelet.conf|--kubeconfig=$OPENYURT_DIR\/kubelet.conf|g" $KUBELET_SVC - log "kubelet.service drop-in file is revised" - # 4. reset the kubelete.service - systemctl daemon-reload - systemctl restart kubelet.service - log "kubelet has been restarted" -} - -# remove_yurthub deletes the yurt-hub pod -remove_yurthub() { - # remove the yurt-hub.yaml to delete the yurt-hub - [ -f $STATIC_POD_PATH/yurt-hub.yaml ] && - rm $STATIC_POD_PATH/yurt-hub.yaml - log "yurt-hub has been removed" -} - -# revert_kubelet resets the kubelet service and makes it connect to the -# apiserver directly -revert_kubelet() { - # 1. remove openyurt's kubelet.conf if exist - [ -f $OPENYURT_DIR/kubelet.conf ] && rm $OPENYURT_DIR/kubelet.conf - if [ -f ${KUBELET_SVC}.bk ]; then - # if found, use the backup file - log "found backup file ${KUBELET_SVC}.bk, will use it to revert the node" - mv ${KUBELET_SVC}.bk $KUBELET_SVC - else - # if the backup file doesn't not exist, revise the kubelet.service drop-in - log "didn't find the ${KUBELET_SVC}.bk, will revise the $KUBELET_SVC directly" - exit 1 - fi - # 2. reset the kubelete.service - systemctl daemon-reload - systemctl restart kubelet.service - log "kubelet has been reset back to default" -} - -case $ACTION in - convert) - preset - setup_yurthub - reset_kubelet - ;; - revert) - revert_kubelet - remove_yurthub - ;; - *) - error "unknown action $ACTION" - exit 1 - ;; -esac - - - -log "done" diff --git a/hack/lib/release-images.sh b/hack/lib/release-images.sh index e3b16f6510d..dcdbded2f87 100755 --- a/hack/lib/release-images.sh +++ b/hack/lib/release-images.sh @@ -133,8 +133,6 @@ function build_yurtctl_servant_image() { esac cat << EOF > $docker_file_path FROM $base_image -RUN mkdir -p /var/lib/openyurt -ADD setup_edgenode /var/lib/openyurt EOF ln $servant_script_path $docker_build_path/setup_edgenode docker build --no-cache -t $yurtctl_servant_image -f $docker_file_path $docker_build_path diff --git a/pkg/yurtctl/cmd/convert/convert.go b/pkg/yurtctl/cmd/convert/convert.go index 0720dcd75f0..2c45fd5c273 100644 --- a/pkg/yurtctl/cmd/convert/convert.go +++ b/pkg/yurtctl/cmd/convert/convert.go @@ -22,12 +22,14 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/klog" clusterinfophase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" "github.com/alibaba/openyurt/pkg/projectinfo" "github.com/alibaba/openyurt/pkg/yurtctl/constants" @@ -89,6 +91,8 @@ func NewConvertCmd() *cobra.Command { }, } + cmd.AddCommand(NewConvertEdgeNodeCmd()) + cmd.Flags().StringP("cloud-nodes", "c", "", "The list of cloud nodes.(e.g. -c cloudnode1,cloudnode2)") cmd.Flags().StringP("provider", "p", "minikube", @@ -227,11 +231,23 @@ func (co *ConvertOptions) RunConvert() (err error) { } klog.V(4).Info("the server version is valid") - // 2. label nodes as cloud node or edge node + // 1.1. check the state of worker nodes nodeLst, err := co.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return } + for _, node := range nodeLst.Items { + if !strutil.IsInStringLst(co.CloudNodes, node.GetName()) { + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + if condition == nil || condition.Status != v1.ConditionTrue { + klog.Errorf("Cannot do the convert, the status of worker node: %s is not 'Ready'.", node.Name) + return + } + } + } + klog.V(4).Info("the status of worker nodes are satisfied") + + // 2. label nodes as cloud node or edge node var edgeNodeNames []string for _, node := range nodeLst.Items { if strutil.IsInStringLst(co.CloudNodes, node.GetName()) { @@ -243,14 +259,7 @@ func (co *ConvertOptions) RunConvert() (err error) { } continue } - // label node as edge node - klog.Infof("mark %s as the edge-node", node.GetName()) edgeNodeNames = append(edgeNodeNames, node.GetName()) - _, err = kubeutil.LabelNode(co.clientSet, - &node, projectinfo.GetEdgeWorkerLabelKey(), "true") - if err != nil { - return - } } // 3. deploy yurt controller manager @@ -331,7 +340,7 @@ func (co *ConvertOptions) RunConvert() (err error) { "joinToken": joinToken, "pod_manifest_path": co.PodMainfestPath, "kubeadm_conf_path": co.KubeadmConfPath, - }, edgeNodeNames); err != nil { + }, edgeNodeNames, true); err != nil { klog.Errorf("fail to run ServantJobs: %s", err) return } diff --git a/pkg/yurtctl/cmd/convert/edgenode.go b/pkg/yurtctl/cmd/convert/edgenode.go new file mode 100644 index 00000000000..602f084b4d2 --- /dev/null +++ b/pkg/yurtctl/cmd/convert/edgenode.go @@ -0,0 +1,408 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package convert + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + + "github.com/alibaba/openyurt/pkg/projectinfo" + enutil "github.com/alibaba/openyurt/pkg/yurtctl/util/edgenode" + kubeutil "github.com/alibaba/openyurt/pkg/yurtctl/util/kubernetes" + strutil "github.com/alibaba/openyurt/pkg/yurtctl/util/strings" + "github.com/alibaba/openyurt/pkg/yurthub/healthchecker" +) + +const ( + kubeletConfigRegularExpression = "\\-\\-kubeconfig=.*kubelet.conf" + apiserverAddrRegularExpression = "server: (http(s)?:\\/\\/)?[\\w][\\w]{0,62}(\\.[\\w][-\\w]{0,62})+(:[\\d]{1,5})?" + hubHealthzCheckFrequency = 10 * time.Second + failedRetry = 5 + filemode = 0666 + dirmode = 0755 +) + +// ConvertEdgeNodeOptions has the information required by sub command convert edgenode +type ConvertEdgeNodeOptions struct { + clientSet *kubernetes.Clientset + EdgeNodes []string + YurthubImage string + YurctlServantImage string + PodMainfestPath string + JoinToken string + KubeadmConfPath string + openyurtDir string +} + +// NewConvertEdgeNodeOptions creates a new ConvertEdgeNodeOptions +func NewConvertEdgeNodeOptions() *ConvertEdgeNodeOptions { + return &ConvertEdgeNodeOptions{} +} + +// NewConvertEdgeNodeCmd generates a new sub command convert edgenode +func NewConvertEdgeNodeCmd() *cobra.Command { + c := NewConvertEdgeNodeOptions() + cmd := &cobra.Command{ + Use: "edgenode", + Short: "Converts the kubernetes node to a yurt node", + Run: func(cmd *cobra.Command, _ []string) { + if err := c.Complete(cmd.Flags()); err != nil { + klog.Fatalf("fail to complete the convert edgenode option: %s", err) + } + if err := c.RunConvertEdgeNode(); err != nil { + klog.Fatalf("fail to covert the kubernetes node to a yurt node: %s", err) + } + }, + } + + cmd.Flags().StringP("edge-nodes", "e", "", + "The list of edge nodes wanted to be convert.(e.g. -e edgenode1,edgenode2)") + cmd.Flags().String("yurthub-image", "openyurt/yurthub:latest", + "The yurthub image.") + cmd.Flags().String("yurtctl-servant-image", "openyurt/yurtctl-servant:latest", + "The yurtctl-servant image.") + cmd.Flags().String("pod-manifest-path", "", + "Path to the directory on edge node containing static pod files.") + cmd.Flags().String("kubeadm-conf-path", "", + "The path to kubelet service conf that is used by kubelet component to join the cluster on the edge node.") + cmd.Flags().String("join-token", "", "The token used by yurthub for joining the cluster.") + + return cmd +} + +// Complete completes all the required options +func (c *ConvertEdgeNodeOptions) Complete(flags *pflag.FlagSet) error { + enStr, err := flags.GetString("edge-nodes") + if err != nil { + return err + } + if enStr != "" { + c.EdgeNodes = strings.Split(enStr, ",") + } + + yurthubImage, err := flags.GetString("yurthub-image") + if err != nil { + return err + } + c.YurthubImage = yurthubImage + + ycsi, err := flags.GetString("yurtctl-servant-image") + if err != nil { + return err + } + c.YurctlServantImage = ycsi + + podMainfestPath, err := flags.GetString("pod-manifest-path") + if err != nil { + return err + } + if podMainfestPath == "" { + podMainfestPath = os.Getenv("STATIC_POD_PATH") + } + if podMainfestPath == "" { + podMainfestPath = enutil.StaticPodPath + } + c.PodMainfestPath = podMainfestPath + + kubeadmConfPath, err := flags.GetString("kubeadm-conf-path") + if err != nil { + return err + } + if kubeadmConfPath == "" { + kubeadmConfPath = os.Getenv("KUBELET_SVC") + } + if kubeadmConfPath == "" { + kubeadmConfPath = enutil.KubeletSvcPath + } + c.KubeadmConfPath = kubeadmConfPath + + joinToken, err := flags.GetString("join-token") + if err != nil { + return err + } + c.JoinToken = joinToken + + c.clientSet, err = enutil.GenClientSet(flags) + if err != nil { + return err + } + + openyurtDir := os.Getenv("OPENYURT_DIR") + if openyurtDir == "" { + openyurtDir = enutil.OpenyurtDir + } + c.openyurtDir = openyurtDir + + return nil +} + +// RunConvertEdgeNode converts a standard Kubernetes node to a Yurt node +func (c *ConvertEdgeNodeOptions) RunConvertEdgeNode() (err error) { + // 1. check the server version + if err = kubeutil.ValidateServerVersion(c.clientSet); err != nil { + return + } + klog.V(4).Info("the server version is valid") + + nodeName, err := enutil.GetNodeName() + if err != nil { + return err + } + if len(c.EdgeNodes) > 1 || len(c.EdgeNodes) == 1 && c.EdgeNodes[0] != nodeName { + // 2 remote edgenode convert + nodeLst, err := c.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + return err + } + + // 2.1. check the EdgeNodes and its label + var edgeNodeNames []string + for _, node := range nodeLst.Items { + _, ok := node.Labels[projectinfo.GetEdgeWorkerLabelKey()] + if !ok { + edgeNodeNames = append(edgeNodeNames, node.GetName()) + } + } + for _, edgeNode := range c.EdgeNodes { + if !strutil.IsInStringLst(edgeNodeNames, edgeNode) { + klog.Errorf("Cannot do the convert, the worker node: %s is not a Kubernetes node.", edgeNode) + return err + } + } + + // 2.2. check the state of EdgeNodes + for _, node := range nodeLst.Items { + if strutil.IsInStringLst(c.EdgeNodes, node.GetName()) { + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + if condition == nil || condition.Status != v1.ConditionTrue { + klog.Errorf("Cannot do the convert, the status of worker node: %s is not 'Ready'.", node.Name) + return err + } + } + } + + // 2.3. deploy yurt-hub and reset the kubelet service + joinToken, err := kubeutil.GetOrCreateJoinTokenString(c.clientSet) + if err != nil { + return err + } + if err = kubeutil.RunServantJobs(c.clientSet, map[string]string{ + "action": "convert", + "yurtctl_servant_image": c.YurctlServantImage, + "yurthub_image": c.YurthubImage, + "joinToken": joinToken, + "pod_manifest_path": c.PodMainfestPath, + "kubeadm_conf_path": c.KubeadmConfPath, + }, c.EdgeNodes, true); err != nil { + klog.Errorf("fail to run ServantJobs: %s", err) + return err + } + } else { + // 3. local edgenode convert + node, err := c.clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + + // 3.1. check the state of EdgeNodes + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + if condition == nil || condition.Status != v1.ConditionTrue { + klog.Errorf("Cannot do the convert, the status of worker node: %s is not 'Ready'.", node.Name) + return err + } + + // 3.2. check the label of EdgeNodes + _, ok := node.Labels[projectinfo.GetEdgeWorkerLabelKey()] + if ok { + klog.Errorf("Cannot do the convert, the worker node: %s is not a Kubernetes node.", node.Name) + return err + } + + // 3.3. label node as edge node + klog.Infof("mark %s as the edge-node", nodeName) + _, err = kubeutil.LabelNode(c.clientSet, node, projectinfo.GetEdgeWorkerLabelKey(), "true") + if err != nil { + return err + } + + // 3. deploy yurt-hub and reset the kubelet service + if c.JoinToken == "" { + c.JoinToken, err = kubeutil.GetOrCreateJoinTokenString(c.clientSet) + if err != nil { + return err + } + } + err = c.SetupYurthub() + if err != nil { + return fmt.Errorf("fail to set up the yurthub pod: %v", err) + } + err = c.ResetKubelet() + if err != nil { + return fmt.Errorf("fail to reset the kubelet service: %v", err) + } + } + return +} + +// SetupYurthub sets up the yurthub pod and wait for the its status to be Running +func (c *ConvertEdgeNodeOptions) SetupYurthub() error { + // 1. put yurt-hub yaml into /etc/kubernetes/manifests + klog.Infof("setting up yurthub on node") + + // 1-1. get apiserver address + kubeletConfPath, err := enutil.GetSingleContentFromFile(c.KubeadmConfPath, kubeletConfigRegularExpression) + if err != nil { + return err + } + kubeletConfPath = strings.Split(kubeletConfPath, "=")[1] + apiserverAddr, err := enutil.GetSingleContentFromFile(kubeletConfPath, apiserverAddrRegularExpression) + if err != nil { + return err + } + apiserverAddr = strings.Split(apiserverAddr, " ")[1] + + // 1-2. replace variables in yaml file + klog.Infof("setting up yurthub apiserver addr") + yurthubTemplate := enutil.ReplaceRegularExpression(enutil.YurthubTemplate, + map[string]string{ + "__kubernetes_service_addr__": apiserverAddr, + "__yurthub_image__": c.YurthubImage, + "__join_token__": c.JoinToken, + }) + + // 1-3. create yurthub.yaml + _, err = enutil.DirExists(c.PodMainfestPath) + if err != nil { + return err + } + err = ioutil.WriteFile(c.getYurthubYaml(), []byte(yurthubTemplate), filemode) + if err != nil { + return err + } + klog.Infof("create the %s/yurt-hub.yaml", c.PodMainfestPath) + + // 2. wait yurthub pod to be ready + err = hubHealthcheck() + return err +} + +// ResetKubelet changes the configuration of the kubelet service and restart it +func (c *ConvertEdgeNodeOptions) ResetKubelet() error { + // 1. create a working dir to store revised kubelet.conf + err := os.MkdirAll(c.openyurtDir, dirmode) + if err != nil { + return err + } + fullpath := c.getYurthubKubeletConf() + err = ioutil.WriteFile(fullpath, []byte(enutil.OpenyurtKubeletConf), filemode) + if err != nil { + return err + } + klog.Infof("revised kubeconfig %s is generated", fullpath) + + // 2. revise the kubelet.service drop-in + // 2.1 make a backup for the origin kubelet.service + bkfile := c.getKubeletSvcBackup() + err = enutil.CopyFile(c.KubeadmConfPath, bkfile) + + // 2.2 revise the drop-in, point it to the $OPENYURT_DIR/kubelet.conf + contentbyte, err := ioutil.ReadFile(c.KubeadmConfPath) + if err != nil { + return err + } + kubeConfigSetup := fmt.Sprintf("--kubeconfig=%s/kubelet.conf", c.openyurtDir) + content := enutil.ReplaceRegularExpression(string(contentbyte), map[string]string{ + "--bootstrap.*bootstrap-kubelet.conf": "", + "--kubeconfig=.*kubelet.conf": kubeConfigSetup, + }) + err = ioutil.WriteFile(c.KubeadmConfPath, []byte(content), filemode) + if err != nil { + return err + } + klog.Info("kubelet.service drop-in file is revised") + + // 3. reset the kubelet.service + klog.Info(enutil.DaemonReload) + cmd := exec.Command("bash", "-c", enutil.DaemonReload) + if err := enutil.Exec(cmd); err != nil { + return err + } + klog.Info(enutil.RestartKubeletSvc) + cmd = exec.Command("bash", "-c", enutil.RestartKubeletSvc) + if err := enutil.Exec(cmd); err != nil { + return err + } + klog.Infof("kubelet has been restarted") + return nil +} + +func (c *ConvertEdgeNodeOptions) getYurthubYaml() string { + return filepath.Join(c.PodMainfestPath, enutil.YurthubYamlName) +} + +func (c *ConvertEdgeNodeOptions) getYurthubKubeletConf() string { + return filepath.Join(c.openyurtDir, enutil.KubeletConfName) +} + +func (c *ConvertEdgeNodeOptions) getKubeletSvcBackup() string { + return fmt.Sprintf(enutil.KubeletSvcBackup, c.KubeadmConfPath) +} + +// hubHealthcheck will check the status of yurthub pod +func hubHealthcheck() error { + serverHealthzUrl, err := url.Parse(fmt.Sprintf("http://%s", enutil.ServerHealthzServer)) + if err != nil { + return err + } + serverHealthzUrl.Path = enutil.ServerHealthzUrlPath + + intervalTicker := time.NewTicker(hubHealthzCheckFrequency) + defer intervalTicker.Stop() + retry := failedRetry + for { + select { + case <-intervalTicker.C: + _, err := healthchecker.PingClusterHealthz(http.DefaultClient, serverHealthzUrl.String()) + retry-- + if err != nil { + if retry > 0 { + klog.Infof("yurt-hub is not ready, ping cluster healthz with result: %v, will retry %d times", err, retry) + } else { + return fmt.Errorf("yurt-hub failed after retry 5 times, ping cluster healthz with result: %v", err) + } + } else { + klog.Infof("yurt-hub healthz is OK") + return nil + } + } + } +} diff --git a/pkg/yurtctl/cmd/revert/edgenode.go b/pkg/yurtctl/cmd/revert/edgenode.go new file mode 100644 index 00000000000..5f82fcbe202 --- /dev/null +++ b/pkg/yurtctl/cmd/revert/edgenode.go @@ -0,0 +1,296 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revert + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + + "github.com/alibaba/openyurt/pkg/projectinfo" + "github.com/alibaba/openyurt/pkg/yurtctl/constants" + enutil "github.com/alibaba/openyurt/pkg/yurtctl/util/edgenode" + kubeutil "github.com/alibaba/openyurt/pkg/yurtctl/util/kubernetes" + strutil "github.com/alibaba/openyurt/pkg/yurtctl/util/strings" +) + +// RevertEdgeNodeOptions has the information required by sub command revert edgenode +type RevertEdgeNodeOptions struct { + clientSet *kubernetes.Clientset + EdgeNodes []string + YurtctlServantImage string + PodMainfestPath string + KubeadmConfPath string + openyurtDir string +} + +// NewRevertEdgeNodeOptions creates a new RevertEdgeNodeOptions +func NewRevertEdgeNodeOptions() *RevertEdgeNodeOptions { + return &RevertEdgeNodeOptions{} +} + +// NewRevertEdgeNodeCmd generates a new sub command revert edgenode +func NewRevertEdgeNodeCmd() *cobra.Command { + r := NewRevertEdgeNodeOptions() + cmd := &cobra.Command{ + Use: "edgenode", + Short: "reverts the yurt node to a kubernetes node", + Run: func(cmd *cobra.Command, _ []string) { + if err := r.Complete(cmd.Flags()); err != nil { + klog.Fatalf("fail to complete the revert edgenode option: %s", err) + } + if err := r.RunRevertEdgeNode(); err != nil { + klog.Fatalf("fail to revert the yurt node to a kubernetes node: %s", err) + } + }, + } + + cmd.Flags().StringP("edge-nodes", "e", "", + "The list of edge nodes wanted to be revert.(e.g. -e edgenode1,edgenode2)") + cmd.Flags().String("yurtctl-servant-image", "openyurt/yurtctl-servant:latest", + "The yurtctl-servant image.") + cmd.Flags().String("pod-manifest-path", "", + "Path to the directory on edge node containing static pod files.") + cmd.Flags().String("kubeadm-conf-path", "", + "The path to kubelet service conf that is used by kubelet component to join the cluster on the edge node.") + + return cmd +} + +// Complete completes all the required options +func (r *RevertEdgeNodeOptions) Complete(flags *pflag.FlagSet) (err error) { + enStr, err := flags.GetString("edge-nodes") + if err != nil { + return err + } + if enStr != "" { + r.EdgeNodes = strings.Split(enStr, ",") + } + + ycsi, err := flags.GetString("yurtctl-servant-image") + if err != nil { + return err + } + r.YurtctlServantImage = ycsi + + podMainfestPath, err := flags.GetString("pod-manifest-path") + if err != nil { + return err + } + if podMainfestPath == "" { + podMainfestPath = os.Getenv("STATIC_POD_PATH") + } + if podMainfestPath == "" { + podMainfestPath = enutil.StaticPodPath + } + r.PodMainfestPath = podMainfestPath + + kubeadmConfPath, err := flags.GetString("kubeadm-conf-path") + if err != nil { + return err + } + if kubeadmConfPath == "" { + kubeadmConfPath = os.Getenv("KUBELET_SVC") + } + if kubeadmConfPath == "" { + kubeadmConfPath = enutil.KubeletSvcPath + } + r.KubeadmConfPath = kubeadmConfPath + + r.clientSet, err = enutil.GenClientSet(flags) + if err != nil { + return err + } + + openyurtDir := os.Getenv("OPENYURT_DIR") + if openyurtDir == "" { + openyurtDir = enutil.OpenyurtDir + } + r.openyurtDir = openyurtDir + + return +} + +// RunRevertEdgeNode reverts the target Yurt node back to a standard Kubernetes node +func (r *RevertEdgeNodeOptions) RunRevertEdgeNode() (err error) { + // 1. check the server version + if err = kubeutil.ValidateServerVersion(r.clientSet); err != nil { + return + } + klog.V(4).Info("the server version is valid") + + nodeName, err := enutil.GetNodeName() + if err != nil { + return err + } + if len(r.EdgeNodes) > 1 || len(r.EdgeNodes) == 1 && r.EdgeNodes[0] != nodeName { + // 2. remote edgenode revert + nodeLst, err := r.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + return err + } + + // 2.1. check the EdgeNodes and its label + var edgeNodeNames []string + for _, node := range nodeLst.Items { + isEdgeNode, ok := node.Labels[projectinfo.GetEdgeWorkerLabelKey()] + if ok && isEdgeNode == "true" { + edgeNodeNames = append(edgeNodeNames, node.GetName()) + } + } + for _, edgeNode := range r.EdgeNodes { + if !strutil.IsInStringLst(edgeNodeNames, edgeNode) { + klog.Errorf("Cannot do the revert, the worker node: %s is not a Yurt edge node.", edgeNode) + return err + } + } + + // 2.2. check the state of EdgeNodes + for _, node := range nodeLst.Items { + if strutil.IsInStringLst(r.EdgeNodes, node.GetName()) { + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + if condition == nil || condition.Status != v1.ConditionTrue { + klog.Errorf("Cannot do the revert, the status of worker node: %s is not 'Ready'.", node.Name) + return err + } + } + } + + // 2.3. remove yurt-hub and revert kubelet service + if err = kubeutil.RunServantJobs(r.clientSet, + map[string]string{ + "action": "revert", + "yurtctl_servant_image": r.YurtctlServantImage, + "pod_manifest_path": r.PodMainfestPath, + "kubeadm_conf_path": r.KubeadmConfPath, + }, + r.EdgeNodes, false); err != nil { + klog.Errorf("fail to revert edge node: %s", err) + return err + } + } else { + // 3. local edgenode revert + node, err := r.clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + + // 3.1. check the state of EdgeNodes + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + if condition == nil || condition.Status != v1.ConditionTrue { + klog.Errorf("Cannot do the revert, the status of worker node: %s is not 'Ready'.", node.Name) + return err + } + + // 3.2. check the label of EdgeNodes and remove label + isEdgeNode, ok := node.Labels[projectinfo.GetEdgeWorkerLabelKey()] + if ok && isEdgeNode == "true" { + _, foundAutonomy := node.Annotations[constants.AnnotationAutonomy] + if foundAutonomy { + delete(node.Annotations, constants.AnnotationAutonomy) + } + + delete(node.Labels, projectinfo.GetEdgeWorkerLabelKey()) + if _, err = r.clientSet.CoreV1().Nodes().Update(node); err != nil { + return err + } + } else { + klog.Errorf("Cannot do the revert, the worker node: %s is not a Yurt edge node.", node.Name) + return err + } + klog.Info("label alibabacloud.com/is-edge-worker is removed") + + // 3.3. remove yurt-hub and revert kubelet service + if err := r.RevertKubelet(); err != nil { + return fmt.Errorf("fail to revert kubelet: %v", err) + } + if err := r.RemoveYurthub(); err != nil { + return err + } + } + + return +} + +// RevertKubelet resets the kubelet service +func (r *RevertEdgeNodeOptions) RevertKubelet() error { + // 1. remove openyurt's kubelet.conf if exist + yurtKubeletConf := r.getYurthubKubeletConf() + if err := os.Remove(yurtKubeletConf); err != nil { + return err + } + kubeletSvcBk := r.getKubeletSvcBackup() + _, err := enutil.FileExists(kubeletSvcBk) + if err != nil { + klog.Errorf("fail to get file %s, will revise the %s directly", kubeletSvcBk, r.KubeadmConfPath) + return err + } + klog.Infof("found backup file %s, will use it to revert the node", kubeletSvcBk) + err = os.Rename(kubeletSvcBk, r.KubeadmConfPath) + if err != nil { + return err + } + + // 2. reset the kubelet.service + klog.Info(enutil.DaemonReload) + cmd := exec.Command("bash", "-c", enutil.DaemonReload) + if err := enutil.Exec(cmd); err != nil { + return err + } + + klog.Info(enutil.RestartKubeletSvc) + cmd = exec.Command("bash", "-c", enutil.RestartKubeletSvc) + if err := enutil.Exec(cmd); err != nil { + return err + } + klog.Infof("kubelet has been reset back to default") + return nil +} + +// RemoveYurthub deletes the yurt-hub pod +func (r *RevertEdgeNodeOptions) RemoveYurthub() error { + // 1. remove the yurt-hub.yaml to delete the yurt-hub + yurthubYaml := r.getYurthubYaml() + err := os.Remove(yurthubYaml) + if err != nil { + return err + } + klog.Infof("yurt-hub has been removed") + return nil +} + +func (r *RevertEdgeNodeOptions) getYurthubKubeletConf() string { + return filepath.Join(r.openyurtDir, enutil.KubeletConfName) +} + +func (r *RevertEdgeNodeOptions) getKubeletSvcBackup() string { + return fmt.Sprintf(enutil.KubeletSvcBackup, r.KubeadmConfPath) +} + +func (r *RevertEdgeNodeOptions) getYurthubYaml() string { + return filepath.Join(r.PodMainfestPath, enutil.YurthubYamlName) +} diff --git a/pkg/yurtctl/cmd/revert/revert.go b/pkg/yurtctl/cmd/revert/revert.go index a57279f8d64..cf31b5cae13 100644 --- a/pkg/yurtctl/cmd/revert/revert.go +++ b/pkg/yurtctl/cmd/revert/revert.go @@ -35,10 +35,11 @@ import ( nodeutil "k8s.io/kubernetes/pkg/controller/util/node" ) -// ConvertOptions has the information required by the revert operation +// RevertOptions has the information required by the revert operation type RevertOptions struct { clientSet *kubernetes.Clientset YurtctlServantImage string + PodMainfestPath string KubeadmConfPath string } @@ -63,9 +64,14 @@ func NewRevertCmd() *cobra.Command { }, } + cmd.AddCommand(NewRevertEdgeNodeCmd()) + cmd.Flags().String("yurtctl-servant-image", "openyurt/yurtctl-servant:latest", "The yurtctl-servant image.") + cmd.Flags().String("pod-manifest-path", + "/etc/kubernetes/manifests", + "Path to the directory on edge node containing static pod files.") cmd.Flags().String("kubeadm-conf-path", "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf", "The path to kubelet service conf that is used by kubelet component to join the cluster on the edge node.") @@ -81,6 +87,12 @@ func (ro *RevertOptions) Complete(flags *pflag.FlagSet) error { } ro.YurtctlServantImage = ycsi + pmp, err := flags.GetString("pod-manifest-path") + if err != nil { + return err + } + ro.PodMainfestPath = pmp + kcp, err := flags.GetString("kubeadm-conf-path") if err != nil { return err @@ -137,14 +149,9 @@ func (ro *RevertOptions) RunRevert() (err error) { if ok && isEdgeNode == "true" { // cache edge nodes, we need to run servant job on each edge node later edgeNodeNames = append(edgeNodeNames, node.GetName()) - // remove the autonomy annotation, if found - _, foundAutonomy := node.Annotations[constants.AnnotationAutonomy] - if foundAutonomy { - delete(node.Annotations, constants.AnnotationAutonomy) - } } - if ok { - // remove the label for both the cloud node and the edge node + if ok && isEdgeNode == "false" { + // remove the label for both the cloud node delete(node.Labels, projectinfo.GetEdgeWorkerLabelKey()) if _, err = ro.clientSet.CoreV1().Nodes().Update(&node); err != nil { return @@ -234,9 +241,10 @@ func (ro *RevertOptions) RunRevert() (err error) { map[string]string{ "action": "revert", "yurtctl_servant_image": ro.YurtctlServantImage, + "pod_manifest_path": ro.PodMainfestPath, "kubeadm_conf_path": ro.KubeadmConfPath, }, - edgeNodeNames); err != nil { + edgeNodeNames, false); err != nil { klog.Errorf("fail to revert edge node: %s", err) return } diff --git a/pkg/yurtctl/constants/constants.go b/pkg/yurtctl/constants/constants.go index 83879937a55..850fdc65a09 100644 --- a/pkg/yurtctl/constants/constants.go +++ b/pkg/yurtctl/constants/constants.go @@ -159,8 +159,8 @@ spec: command: - yurt-controller-manager ` - // ServantJobTemplate defines the servant job in yaml format - ServantJobTemplate = ` + // ConvertServantJobTemplate defines the yurtctl convert servant job in yaml format + ConvertServantJobTemplate = ` apiVersion: batch/v1 kind: Job metadata: @@ -186,7 +186,52 @@ spec: - /bin/sh - -c args: - - "sed -i 's|__yurthub_image__|{{.yurthub_image}}|g;s|__join_token__|{{.joinToken}}|g' /var/lib/openyurt/setup_edgenode && cp /var/lib/openyurt/setup_edgenode /tmp && nsenter -t 1 -m -u -n -i /var/tmp/setup_edgenode {{.action}} " + - "nsenter -t 1 -m -u -n -i -- /bin/yurtctl convert edgenode --yurthub-image {{.yurthub_image}} --join-token {{.joinToken}}" + securityContext: + privileged: true + volumeMounts: + - mountPath: /tmp + name: host-var-tmp + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: STATIC_POD_PATH + value: {{.pod_manifest_path}} + {{if .kubeadm_conf_path }} + - name: KUBELET_SVC + value: {{.kubeadm_conf_path}} + {{end}} +` + // RevertServantJobTemplate defines the yurtctl revert servant job in yaml format + RevertServantJobTemplate = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: {{.jobName}} + namespace: kube-system +spec: + template: + spec: + hostPID: true + hostNetwork: true + restartPolicy: OnFailure + nodeName: {{.nodeName}} + volumes: + - name: host-var-tmp + hostPath: + path: /var/tmp + type: Directory + containers: + - name: yurtctl-servant + image: {{.yurtctl_servant_image}} + imagePullPolicy: Always + command: + - /bin/sh + - -c + args: + - "nsenter -t 1 -m -u -n -i -- /bin/yurtctl revert edgenode" securityContext: privileged: true volumeMounts: diff --git a/pkg/yurtctl/util/edgenode/common.go b/pkg/yurtctl/util/edgenode/common.go new file mode 100644 index 00000000000..7c02896df7a --- /dev/null +++ b/pkg/yurtctl/util/edgenode/common.go @@ -0,0 +1,112 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package edgenode + +const ( + KubeletSvcPath = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + OpenyurtDir = "/var/lib/openyurt" + StaticPodPath = "/etc/kubernetes/manifests" + KubeCondfigPath = "/etc/kubernetes/kubelet.conf" + YurthubYamlName = "yurt-hub.yaml" + KubeletConfName = "kubelet.conf" + KubeletSvcBackup = "%s.bk" + + Hostname = "/etc/hostname" + KubeletHostname = "--hostname-override=[^\"\\s]*" + KubeletEnvironmentFile = "EnvironmentFile=.*" + + DaemonReload = "systemctl daemon-reload" + RestartKubeletSvc = "systemctl restart kubelet" + + ServerHealthzServer = "127.0.0.1:10261" + ServerHealthzUrlPath = "/v1/healthz" + OpenyurtKubeletConf = ` +apiVersion: v1 +clusters: +- cluster: + server: http://127.0.0.1:10261 + name: default-cluster +contexts: +- context: + cluster: default-cluster + namespace: default + user: default-auth + name: default-context +current-context: default-context +kind: Config +preferences: {} +` + YurthubTemplate = ` +apiVersion: v1 +kind: Pod +metadata: + labels: + k8s-app: yurt-hub + name: yurt-hub + namespace: kube-system +spec: + volumes: + - name: hub-dir + hostPath: + path: /var/lib/yurthub + type: DirectoryOrCreate + - name: kubernetes + hostPath: + path: /etc/kubernetes + type: Directory + containers: + - name: yurt-hub + image: __yurthub_image__ + imagePullPolicy: IfNotPresent + volumeMounts: + - name: hub-dir + mountPath: /var/lib/yurthub + - name: kubernetes + mountPath: /etc/kubernetes + command: + - yurthub + - --v=2 + - --server-addr=__kubernetes_service_addr__ + - --node-name=$(NODE_NAME) + - --join-token=__join_token__ + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /v1/healthz + port: 10261 + initialDelaySeconds: 300 + periodSeconds: 5 + failureThreshold: 3 + resources: + requests: + cpu: 150m + memory: 150Mi + limits: + memory: 300Mi + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + hostNetwork: true + priorityClassName: system-node-critical + priority: 2000001000 +` +) diff --git a/pkg/yurtctl/util/edgenode/util.go b/pkg/yurtctl/util/edgenode/util.go new file mode 100644 index 00000000000..cc864b2097b --- /dev/null +++ b/pkg/yurtctl/util/edgenode/util.go @@ -0,0 +1,197 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package edgenode + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/spf13/pflag" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" +) + +// FileExists determines whether the file exists +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsExist(err) { + return true, err + } else if err != nil { + return false, err + } + return true, nil +} + +// GetContentFormFile returns all strings that match the regular expression regularExpression +func GetContentFormFile(filename string, regularExpression string) ([]string, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + ct := string(content) + reg := regexp.MustCompile(regularExpression) + res := reg.FindAllString(ct, -1) + return res, nil +} + +// GetSingleContentFromFile determines whether there is a unique string that matches the +// regular expression regularExpression and returns it +func GetSingleContentFromFile(filename string, regularExpression string) (string, error) { + contents, err := GetContentFormFile(filename, regularExpression) + if err != nil { + return "", fmt.Errorf("failed to read file %s, %v", filename, err) + } + if contents == nil { + return "", fmt.Errorf("no matching string %s in file %s", regularExpression, filename) + } + if len(contents) > 1 { + return "", fmt.Errorf("there are multiple matching string %s in file %s", regularExpression, filename) + } + return contents[0], nil +} + +// DirExists determines whether the directory exists +func DirExists(dirname string) (bool, error) { + s, err := os.Stat(dirname) + if err != nil { + return false, err + } + if !s.IsDir() { + return false, fmt.Errorf("%s is not a dir", dirname) + } + return true, nil +} + +// CopyFile copys sourceFile to destinationFile +func CopyFile(sourceFile string, destinationFile string) error { + content, err := ioutil.ReadFile(sourceFile) + if err != nil { + return fmt.Errorf("failed to read source file %s: %v", sourceFile, err) + } + err = ioutil.WriteFile(destinationFile, content, 0666) + if err != nil { + return fmt.Errorf("failed to write destination file %s: %v", destinationFile, err) + } + return nil +} + +// ReplaceRegularExpression matchs the regular expression and replace it with the corresponding string +func ReplaceRegularExpression(content string, replace map[string]string) string { + for old, new := range replace { + reg := regexp.MustCompile(old) + content = reg.ReplaceAllString(content, new) + } + return content +} + +// GetNodeName gets the node name based on environment variable, parameters --hostname-override +// in the configuration file or hostname +func GetNodeName() (string, error) { + //1. from env NODE_NAME + nodename := os.Getenv("NODE_NAME") + if nodename != "" { + return nodename, nil + } + + //2. find --hostname-override in 10-kubeadm.conf + nodeName, err := GetSingleContentFromFile(KubeletSvcPath, KubeletHostname) + if nodeName != "" { + nodeName = strings.Split(nodeName, "=")[1] + return nodeName, nil + } + + //3. find --hostname-override in EnvironmentFile + environmentFiles, err := GetContentFormFile(KubeletSvcPath, KubeletEnvironmentFile) + if err != nil { + return "", err + } + for _, ef := range environmentFiles { + ef = strings.Split(ef, "-")[1] + nodeName, err = GetSingleContentFromFile(ef, KubeletHostname) + if nodeName != "" { + nodeName = strings.Split(nodeName, "=")[1] + return nodeName, nil + } + } + + //4. read nodeName from /etc/hostname + content, err := ioutil.ReadFile(Hostname) + if err != nil { + return "", err + } + nodeName = strings.Trim(string(content), "\n") + return nodeName, nil +} + +// GenClientSet generates the clientset based on command option, environment variable, +// file in $HOME/.kube or the default kubeconfig file +func GenClientSet(flags *pflag.FlagSet) (*kubernetes.Clientset, error) { + kubeconfigPath, err := PrepareKubeConfigPath(flags) + if err != nil { + return nil, err + } + + restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return nil, err + } + + return kubernetes.NewForConfig(restCfg) +} + +// PrepareKubeConfigPath returns the path of cluster kubeconfig file +func PrepareKubeConfigPath(flags *pflag.FlagSet) (string, error) { + kbCfgPath, err := flags.GetString("kubeconfig") + if err != nil { + return "", err + } + + if kbCfgPath == "" { + kbCfgPath = os.Getenv("KUBECONFIG") + } + + if kbCfgPath == "" { + if home := homedir.HomeDir(); home != "" { + homeKbCfg := filepath.Join(home, ".kube", "config") + if ok, _ := FileExists(kbCfgPath); ok { + kbCfgPath = homeKbCfg + } + } + } + + if kbCfgPath == "" { + kbCfgPath = KubeCondfigPath + } + + return kbCfgPath, nil +} + +// Exec execs the command +func Exec(cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + if err := cmd.Wait(); err != nil { + return err + } + return nil +} diff --git a/pkg/yurtctl/util/kubernetes/util.go b/pkg/yurtctl/util/kubernetes/util.go index d97d34a9bf9..1fb08958154 100644 --- a/pkg/yurtctl/util/kubernetes/util.go +++ b/pkg/yurtctl/util/kubernetes/util.go @@ -274,8 +274,12 @@ func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, p } // RunServantJobs launchs servant jobs on specified edge nodes -func RunServantJobs(cliSet *kubernetes.Clientset, tmplCtx map[string]string, edgeNodeNames []string) error { +func RunServantJobs(cliSet *kubernetes.Clientset, tmplCtx map[string]string, edgeNodeNames []string, convert bool) error { var wg sync.WaitGroup + servantJobTemplate := constants.ConvertServantJobTemplate + if !convert { + servantJobTemplate = constants.RevertServantJobTemplate + } for _, nodeName := range edgeNodeNames { action, exist := tmplCtx["action"] if !exist { @@ -292,7 +296,7 @@ func RunServantJobs(cliSet *kubernetes.Clientset, tmplCtx map[string]string, edg } tmplCtx["nodeName"] = nodeName - jobYaml, err := tmplutil.SubsituteTemplate(constants.ServantJobTemplate, tmplCtx) + jobYaml, err := tmplutil.SubsituteTemplate(servantJobTemplate, tmplCtx) if err != nil { return err } diff --git a/pkg/yurthub/healthchecker/health_checker.go b/pkg/yurthub/healthchecker/health_checker.go index 9f8502dce33..0cc481363ff 100644 --- a/pkg/yurthub/healthchecker/health_checker.go +++ b/pkg/yurthub/healthchecker/health_checker.go @@ -116,7 +116,7 @@ func newChecker(url *url.URL, tp transport.Interface, failedRetry, healthyThresh healthyThreshold: healthyThreshold, } - initHealthyStatus, err := pingClusterHealthz(c.healthzClient, c.serverHealthzAddr) + initHealthyStatus, err := PingClusterHealthz(c.healthzClient, c.serverHealthzAddr) if err != nil { klog.Errorf("cluster(%s) init status: unhealthy, %v", c.serverHealthzAddr, err) } @@ -146,7 +146,7 @@ func (c *checker) healthyCheckLoop(stopCh <-chan struct{}) { return case <-intervalTicker.C: for i := 0; i < c.failedRetry; i++ { - isHealthy, err = pingClusterHealthz(c.healthzClient, c.serverHealthzAddr) + isHealthy, err = PingClusterHealthz(c.healthzClient, c.serverHealthzAddr) if err != nil { klog.V(2).Infof("ping cluster healthz with result, %v", err) if !c.clusterHealthy { @@ -189,7 +189,7 @@ func (c *checker) healthyCheckLoop(stopCh <-chan struct{}) { } } -func pingClusterHealthz(client *http.Client, addr string) (bool, error) { +func PingClusterHealthz(client *http.Client, addr string) (bool, error) { if client == nil { return false, fmt.Errorf("http client is invalid") }