diff --git a/packages/tkg-clusterclass-vsphere/bundle/config/upstream/base.yaml b/packages/tkg-clusterclass-vsphere/bundle/config/upstream/base.yaml index 1014429bf6..e3ee874e36 100644 --- a/packages/tkg-clusterclass-vsphere/bundle/config/upstream/base.yaml +++ b/packages/tkg-clusterclass-vsphere/bundle/config/upstream/base.yaml @@ -400,7 +400,7 @@ spec: type: string default: [] - name: worker - required: true + required: false schema: openAPIV3Schema: type: object @@ -457,6 +457,12 @@ spec: openAPIV3Schema: type: object properties: {} + - name: controlPlaneTaint + required: false + schema: + openAPIV3Schema: + type: boolean + default: true patches: - name: vsphereClusterTemplate definitions: @@ -1402,6 +1408,21 @@ spec: - level: Metadata omitStages: - "RequestReceived" + - name: controlPlaneTaint + enabledIf: '{{ not .controlPlaneTaint }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/taints + value: [] + - op: add + path: /spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/taints + value: [] - name: windows definitions: - selector: diff --git a/providers/infrastructure-vsphere/v1.4.1/cconly/base.yaml b/providers/infrastructure-vsphere/v1.4.1/cconly/base.yaml index 1014429bf6..e3ee874e36 100644 --- a/providers/infrastructure-vsphere/v1.4.1/cconly/base.yaml +++ b/providers/infrastructure-vsphere/v1.4.1/cconly/base.yaml @@ -400,7 +400,7 @@ spec: type: string default: [] - name: worker - required: true + required: false schema: openAPIV3Schema: type: object @@ -457,6 +457,12 @@ spec: openAPIV3Schema: type: object properties: {} + - name: controlPlaneTaint + required: false + schema: + openAPIV3Schema: + type: boolean + default: true patches: - name: vsphereClusterTemplate definitions: @@ -1402,6 +1408,21 @@ spec: - level: Metadata omitStages: - "RequestReceived" + - name: controlPlaneTaint + enabledIf: '{{ not .controlPlaneTaint }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/taints + value: [] + - op: add + path: /spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/taints + value: [] - name: windows definitions: - selector: diff --git a/tkg/client/client.go b/tkg/client/client.go index 2db1ca8fcb..8d4d50266a 100644 --- a/tkg/client/client.go +++ b/tkg/client/client.go @@ -236,6 +236,7 @@ type TkgClient struct { tkgConfigPathsClient tkgconfigpaths.Client clusterKubeConfig *types.ClusterKubeConfig clusterClientFactory clusterclient.ClusterClientFactory + vcClientFactory vc.VcClientFactory featureFlagClient FeatureFlagClient } @@ -253,6 +254,7 @@ type Options struct { TKGPathsClient tkgconfigpaths.Client ClusterKubeConfig *types.ClusterKubeConfig ClusterClientFactory clusterclient.ClusterClientFactory + VcClientFactory vc.VcClientFactory FeatureFlagClient FeatureFlagClient } @@ -283,6 +285,7 @@ func New(options Options) (*TkgClient, error) { // nolint:gocritic tkgConfigPathsClient: options.TKGPathsClient, clusterKubeConfig: options.ClusterKubeConfig, clusterClientFactory: options.ClusterClientFactory, + vcClientFactory: options.VcClientFactory, featureFlagClient: options.FeatureFlagClient, }, nil } diff --git a/tkg/client/cluster.go b/tkg/client/cluster.go index 967d3e1dec..1ab9e65dc2 100644 --- a/tkg/client/cluster.go +++ b/tkg/client/cluster.go @@ -4,15 +4,21 @@ package client import ( + "bytes" "fmt" + "io" "os" "path/filepath" "strconv" "strings" "time" + "gopkg.in/yaml.v3" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/version" + capi "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/client" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -25,6 +31,7 @@ import ( "github.com/vmware-tanzu/tanzu-framework/tkg/log" "github.com/vmware-tanzu/tanzu-framework/tkg/tkgconfighelper" "github.com/vmware-tanzu/tanzu-framework/tkg/utils" + "github.com/vmware-tanzu/tanzu-framework/util/topology" ) const ( @@ -95,7 +102,7 @@ func (c *TkgClient) CreateCluster(options *CreateClusterOptions, waitForCluster GetClientTimeout: 3 * time.Second, OperationTimeout: c.timeout, } - regionalClusterClient, err := clusterclient.NewClient(options.Kubeconfig.Path, options.Kubeconfig.Context, clusterclientOptions) + regionalClusterClient, err := c.clusterClientFactory.NewClient(options.Kubeconfig.Path, options.Kubeconfig.Context, clusterclientOptions) if err != nil { return false, errors.Wrap(err, "unable to get cluster client while creating cluster") } @@ -141,6 +148,11 @@ func (c *TkgClient) CreateCluster(options *CreateClusterOptions, waitForCluster if err != nil { return false, errors.Wrap(err, "unable to get cluster configuration") } + + err = validateConfigForSingleNodeCluster(bytes, options, c) + if err != nil { + return false, err + } } else { bytes, err = c.getClusterConfigurationBytes(&options.ClusterConfigOptions, infraProviderName, isManagementCluster, options.IsWindowsWorkloadCluster) if err != nil { @@ -862,3 +874,63 @@ func (c *TkgClient) ValidateManagementClusterVersionWithCLI(regionalClusterClien return nil } + +// validateConfigForSingleNodeCluster validates that the controlPlaneTaint CC variable is not set for the single node workload cluster, otherwise returns error +func validateConfigForSingleNodeCluster(stream []byte, options *CreateClusterOptions, tkgClient *TkgClient) error { + cluster, err := getClusterObjectFromYaml(stream) + if err != nil { + return err + } + + if !topology.IsSingleNodeCluster(cluster) { + return nil + } + + controlPlaneTaint := true + err = topology.GetVariable(cluster, "controlPlaneTaint", &controlPlaneTaint) + if err != nil { + return errors.Wrap(err, "failed to get CC variable controlPlaneTaint") + } + + // Do not allow the creation of single node clusters without the feature gate. + if !tkgClient.IsFeatureActivated(constants.FeatureFlagSingleNodeClusters) { + return errors.New("Worker count cannot be 0, minimum worker count required is 1") + } + + if controlPlaneTaint { + return errors.New(fmt.Sprintf("unable to create single node cluster %s as control plane node has taint", options.ClusterName)) + } + + return nil +} + +func getClusterObjectFromYaml(stream []byte) (*capi.Cluster, error) { + clusterYaml, err := findClusterDefinitionIn(stream) + if err != nil { + return nil, err + } + + cluster := &capi.Cluster{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(clusterYaml, cluster) + if err != nil { + return nil, errors.Wrap(err, "unable to convert yaml to structured format") + } + return cluster, nil +} + +func findClusterDefinitionIn(stream []byte) (map[string]interface{}, error) { + clusterYaml := make(map[string]interface{}) + decoder := yaml.NewDecoder(bytes.NewBufferString(string(stream))) + for { + if err := decoder.Decode(&clusterYaml); err != nil { + if err == io.EOF { + break + } + return clusterYaml, errors.Wrap(err, "unable to read cluster yaml") + } + if clusterYaml["kind"] == constants.KindCluster { + break + } + } + return clusterYaml, nil +} diff --git a/tkg/client/cluster_test.go b/tkg/client/cluster_test.go index 0d6dcfb920..47435d3c1c 100644 --- a/tkg/client/cluster_test.go +++ b/tkg/client/cluster_test.go @@ -3,15 +3,24 @@ package client_test import ( + "fmt" + "time" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" "sigs.k8s.io/cluster-api/api/v1alpha3" + capiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterctl "sigs.k8s.io/cluster-api/cmd/clusterctl/client" "sigs.k8s.io/controller-runtime/pkg/client" . "github.com/vmware-tanzu/tanzu-framework/tkg/client" + "github.com/vmware-tanzu/tanzu-framework/tkg/constants" "github.com/vmware-tanzu/tanzu-framework/tkg/fakes" "github.com/vmware-tanzu/tanzu-framework/tkg/region" + "github.com/vmware-tanzu/tanzu-framework/tkg/tkgconfigbom" ) var _ = Describe("ValidateManagementClusterVersionWithCLI", func() { @@ -95,4 +104,210 @@ var _ = Describe("ValidateManagementClusterVersionWithCLI", func() { }) }) }) + +}) + +var _ = Describe("CreateCluster", func() { + const ( + clusterName = "regional-cluster-2" + ) + var ( + tkgClient *TkgClient + clusterClientFactory *fakes.ClusterClientFactory + clusterClient *fakes.ClusterClient + featureFlagClient *fakes.FeatureFlagClient + tkgBomClient *fakes.TKGConfigBomClient + tkgConfigUpdaterClient *fakes.TKGConfigUpdaterClient + tkgConfigReaderWriter *fakes.TKGConfigReaderWriter + tkgConfigReaderWriterClient *fakes.TKGConfigReaderWriterClient + vcClientFactory *fakes.VcClientFactory + vcClient *fakes.VCClient + options CreateClusterOptions + ) + BeforeEach(func() { + clusterClientFactory = &fakes.ClusterClientFactory{} + clusterClient = &fakes.ClusterClient{} + clusterClientFactory.NewClientReturns(clusterClient, nil) + featureFlagClient = &fakes.FeatureFlagClient{} + tkgBomClient = &fakes.TKGConfigBomClient{} + tkgConfigUpdaterClient = &fakes.TKGConfigUpdaterClient{} + tkgConfigReaderWriterClient = &fakes.TKGConfigReaderWriterClient{} + tkgConfigReaderWriter = &fakes.TKGConfigReaderWriter{} + vcClientFactory = &fakes.VcClientFactory{} + vcClient = &fakes.VCClient{} + + tkgConfigReaderWriterClient.TKGConfigReaderWriterReturns(tkgConfigReaderWriter) + vcClientFactory.NewClientReturns(vcClient, nil) + + tkgClient, err = CreateTKGClientOptsMutator(configFile2, testingDir, "../fakes/config/bom/tkg-bom-v1.3.1.yaml", 2*time.Second, func(o Options) Options { + o.ClusterClientFactory = clusterClientFactory + o.FeatureFlagClient = featureFlagClient + o.TKGBomClient = tkgBomClient + o.TKGConfigUpdater = tkgConfigUpdaterClient + o.ReaderWriterConfigClient = tkgConfigReaderWriterClient + o.VcClientFactory = vcClientFactory + return o + }) + Expect(err).NotTo(HaveOccurred()) + + tkgBomConfigData := ` +ova: [] +` + tkgBomConfig := &tkgconfigbom.BOMConfiguration{} + err = yaml.Unmarshal([]byte(tkgBomConfigData), tkgBomConfig) + Expect(err).NotTo(HaveOccurred()) + tkgBomClient.GetBOMConfigurationFromTkrVersionReturns(tkgBomConfig, nil) + tkgBomClient.GetDefaultTkgBOMConfigurationReturns(&tkgconfigbom.BOMConfiguration{ + Release: &tkgconfigbom.ReleaseInfo{Version: "v1.23"}, + }, nil) + + clusterClient.GetManagementClusterTKGVersionReturns("v1.2.1-rc.1", nil) + clusterClient.GetRegionalClusterDefaultProviderNameReturns(VSphereProviderName, nil) + tkgBomClient.GetDefaultTKGReleaseVersionReturns("v1.2.1-rc.1", nil) + tkgBomClient.GetDefaultTkrBOMConfigurationReturns(&tkgconfigbom.BOMConfiguration{ + Release: &tkgconfigbom.ReleaseInfo{Version: "v1.3"}, + Components: map[string][]*tkgconfigbom.ComponentInfo{ + "kubernetes": {{Version: "v1.18.0+vmware.2"}}, + }, + }, nil) + clusterClient.ListResourcesCalls(func(clusterList interface{}, options ...client.ListOption) error { + if clusterList, ok := clusterList.(*capiv1alpha3.ClusterList); ok { + clusterList.Items = []capiv1alpha3.Cluster{ + { + ObjectMeta: v1.ObjectMeta{ + Name: clusterName, + Namespace: constants.DefaultNamespace, + }, + }, + } + return nil + } + return nil + }) + clusterClient.IsPacificRegionalClusterReturns(false, nil) + + tkgConfigReaderWriter.GetCalls(func(key string) (string, error) { + configMap := populateConfigMap() + if val, ok := configMap[key]; ok { + return val, nil + } + return "192.168.2.1/16", nil + }) + }) + + Context("ValidateConfigForSingleNodeCluster", func() { + When("Feature gate is enabled", func() { + BeforeEach(func() { + featureFlagClient.IsConfigFeatureActivatedStub = func(featureFlagName string) (bool, error) { + if featureFlagName == constants.FeatureFlagSingleNodeClusters { + return true, nil + } + return true, nil + } + }) + + It("Should fail reading the cluster yaml", func() { + options = createClusterOptions(clusterName, "../fakes/config/invalid_config.yaml") + _, err := tkgClient.CreateCluster(&options, false) + Expect(err.Error()).To(ContainSubstring("unable to read cluster yaml")) + }) + + It("Should fail if cluster is single node and controlPlaneTaint exists", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere_snc_cp_taint_true.yaml") + _, err := tkgClient.CreateCluster(&options, false) + + Expect(err).To(MatchError(fmt.Sprintf("unable to create single node cluster %s as control plane node has taint", clusterName))) + Expect(clusterClient.ApplyCallCount()).To(BeZero()) + }) + + It("Should fail validation if control plane taint is invalid", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere_snc_invalid_cp_taint.yaml") + _, err := tkgClient.CreateCluster(&options, false) + + Expect(err).To(MatchError("failed to get CC variable controlPlaneTaint: unmarshalling from JSON into value: json: cannot unmarshal string into Go value of type bool")) + Expect(clusterClient.ApplyCallCount()).To(BeZero()) + }) + + It("Should fail if cluster is single node with workers nil and controlPlaneTaint are set", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere_snc_omit_workers.yaml") + _, err := tkgClient.CreateCluster(&options, false) + Expect(err).To(MatchError(fmt.Sprintf("unable to create single node cluster %s as control plane node has taint", clusterName))) + Expect(clusterClient.ApplyCallCount()).To(BeZero()) + }) + + It("Should successfully create a single node cluster", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere_snc.yaml") + _, err := tkgClient.CreateCluster(&options, false) + Expect(err).ToNot(HaveOccurred()) + Expect(clusterClient.ApplyCallCount()).To(Equal(1)) + }) + + It("Should successfully create a multi node cluster", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere.yaml") + _, err := tkgClient.CreateCluster(&options, false) + Expect(err).ToNot(HaveOccurred()) + Expect(clusterClient.ApplyCallCount()).To(Equal(1)) + }) + }) + When("Feature gate is disabled", func() { + BeforeEach(func() { + featureFlagClient.IsConfigFeatureActivatedStub = func(featureFlagName string) (bool, error) { + if featureFlagName == constants.FeatureFlagSingleNodeClusters { + return false, nil + } + return true, nil + } + }) + It("Should fail if cluster is single node", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere_snc.yaml") + _, err := tkgClient.CreateCluster(&options, false) + + Expect(err).To(MatchError("Worker count cannot be 0, minimum worker count required is 1")) + Expect(clusterClient.ApplyCallCount()).To(BeZero()) + }) + It("Should successfully create a multi node cluster", func() { + options = createClusterOptions(clusterName, "../fakes/config/cluster_vsphere.yaml") + _, err := tkgClient.CreateCluster(&options, false) + Expect(err).ToNot(HaveOccurred()) + Expect(clusterClient.ApplyCallCount()).To(Equal(1)) + }) + + }) + }) }) + +func populateConfigMap() map[string]string { + configMap := make(map[string]string, 0) + configMap[constants.ConfigVariableCNI] = "antrea" + configMap[constants.ConfigVariableControlPlaneNodeNameservers] = "8.8.8.8" + configMap[constants.ConfigVariableWorkerNodeNameservers] = "8.8.8.8" + configMap[VsphereNodeCPUVarName[0]] = "2" + configMap[VsphereNodeCPUVarName[1]] = "2" + configMap[VsphereNodeMemVarName[0]] = "4098" + configMap[VsphereNodeMemVarName[1]] = "4098" + configMap[VsphereNodeDiskVarName[0]] = "20" + configMap[VsphereNodeDiskVarName[1]] = "20" + configMap[constants.ConfigVariableVsphereServer] = "10.0.0.1" + configMap[constants.ConfigVariableWorkerMachineCount0] = "0" + configMap[constants.ConfigVariableWorkerMachineCount1] = "0" + configMap[constants.ConfigVariableWorkerMachineCount2] = "0" + return configMap +} + +func createClusterOptions(clusterName, configFile string) CreateClusterOptions { + options := CreateClusterOptions{ + ClusterConfigOptions: ClusterConfigOptions{ + KubernetesVersion: "v1.18.0+vmware.2", + ClusterName: clusterName, + TargetNamespace: constants.DefaultNamespace, + ProviderRepositorySource: &clusterctl.ProviderRepositorySourceOptions{ + InfrastructureProvider: VSphereProviderName, + }, + WorkerMachineCount: pointer.Int64Ptr(0), + }, + IsInputFileClusterClassBased: true, + ClusterConfigFile: configFile, + } + options.Edition = "some edition" + return options +} diff --git a/tkg/client/get_cluster_helper.go b/tkg/client/get_cluster_helper.go index 3d418de1d6..e0828feace 100644 --- a/tkg/client/get_cluster_helper.go +++ b/tkg/client/get_cluster_helper.go @@ -306,7 +306,8 @@ func getClusterStatus(clusterInfo *clusterObjects) TKGClusterPhase { creationCompleteCondition := clusterInfo.cluster.Status.InfrastructureReady && clusterInfo.cluster.Status.ControlPlaneReady && clusterInfo.kcp.Status.ReadyReplicas > 0 && - readyReplicas > 0 + // readyReplicas check will only happen in case the replicas in MD is > 0 such that ClusterStatus is `CREATED` in case of SNC. + (replicas == 0 || readyReplicas > 0) if !creationCompleteCondition { return getClusterStatusWhileCreating(clusterInfo) diff --git a/tkg/client/get_cluster_test.go b/tkg/client/get_cluster_test.go index d4a951983c..81cf671b9a 100644 --- a/tkg/client/get_cluster_test.go +++ b/tkg/client/get_cluster_test.go @@ -315,6 +315,52 @@ var _ = Describe("Unit tests for get clusters", func() { }) }) + Context("When cluster is in running state: #CP=1 #Worker=0", func() { + BeforeEach(func() { + createClusterOptions = fakehelper.TestAllClusterComponentOptions{ + ClusterName: "cluster-1", + Namespace: constants.DefaultNamespace, + Labels: map[string]string{ + TkgLabelClusterRolePrefix + TkgLabelClusterRoleWorkload: "", + }, + ClusterOptions: fakehelper.TestClusterOptions{ + Phase: "provisioned", + InfrastructureReady: true, + ControlPlaneInitialized: true, + ControlPlaneReady: true, + + OperationType: clusterclient.OperationTypeCreate, + OperationtTimeout: 30 * 60, // 30 minutes + StartTimestamp: time.Now().UTC().Add(-2 * time.Hour).String(), + // when cluster is in running state opeationType & lastObserved state + // should not matter even if more then timout time has elapsed + LastObservedTimestamp: time.Now().UTC().Add(-1 * time.Hour).String(), + }, + CPOptions: fakehelper.TestCPOptions{ + SpecReplicas: 1, + ReadyReplicas: 1, + UpdatedReplicas: 1, + Replicas: 1, + K8sVersion: "v1.18.2+vmware.1", + }, + MachineOptions: []fakehelper.TestMachineOptions{ + {Phase: "running", K8sVersion: "v1.18.2+vmware.1", IsCP: true}, + }, + } + }) + It("should not return an error and all status should be correct", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(len(clusterInfo)).To(Equal(1)) + Expect(clusterInfo[0].Name).To(Equal(createClusterOptions.ClusterName)) + Expect(clusterInfo[0].Namespace).To(Equal(createClusterOptions.Namespace)) + Expect(clusterInfo[0].ControlPlaneCount).To(Equal(fmt.Sprintf("%v/%v", createClusterOptions.CPOptions.ReadyReplicas, createClusterOptions.CPOptions.SpecReplicas))) + Expect(clusterInfo[0].WorkerCount).To(Equal("")) + Expect(clusterInfo[0].K8sVersion).To(Equal(createClusterOptions.CPOptions.K8sVersion)) + Expect(clusterInfo[0].Roles).To(Equal([]string{TkgLabelClusterRoleWorkload})) + Expect(clusterInfo[0].Status).To(Equal(string(TKGClusterPhaseRunning))) + }) + }) + Context("When cluster is in running state #CP=3 #Worker=3", func() { BeforeEach(func() { createClusterOptions = fakehelper.TestAllClusterComponentOptions{ diff --git a/tkg/client/machine_deployment.go b/tkg/client/machine_deployment.go index d2e8631dc9..1825f73a8e 100644 --- a/tkg/client/machine_deployment.go +++ b/tkg/client/machine_deployment.go @@ -26,6 +26,7 @@ import ( docker "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" tkgsv1alpha2 "github.com/vmware-tanzu/tanzu-framework/apis/run/v1alpha2" + "github.com/vmware-tanzu/tanzu-framework/util/topology" "github.com/vmware-tanzu/tanzu-framework/tkg/clusterclient" "github.com/vmware-tanzu/tanzu-framework/tkg/constants" @@ -103,12 +104,20 @@ func (c *TkgClient) SetMachineDeployment(options *SetMachineDeploymentOptions) e if err != nil { return errors.Wrap(err, "unable to determine if cluster is clusterclass based") } + if ccBased { - var cluster capi.Cluster - if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { + cluster := &capi.Cluster{} + if err := clusterClient.GetResource(cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { return errors.Wrap(err, "Unable to retrieve cluster resource") } - return DoSetMachineDeploymentCC(clusterClient, &cluster, options) + skip, err := skipMDCreation(clusterClient, c, cluster, options) + if err != nil { + return err + } + if skip { + return nil + } + return DoSetMachineDeploymentCC(clusterClient, cluster, options) } isPacific, err := clusterClient.IsPacificRegionalCluster() @@ -718,7 +727,7 @@ func (c *TkgClient) getClusterClient() (clusterclient.Client, error) { GetClientInterval: 1 * time.Second, GetClientTimeout: 3 * time.Second, } - clusterClient, err := clusterclient.NewClient(currentRegion.SourceFilePath, currentRegion.ContextName, clusterclientOptions) + clusterClient, err := c.clusterClientFactory.NewClient(currentRegion.SourceFilePath, currentRegion.ContextName, clusterclientOptions) if err != nil { return nil, errors.Wrap(err, "Unable to create clusterclient") } @@ -762,3 +771,13 @@ func updateAzureSecret(kcTemplate *v1beta1.KubeadmConfigTemplate, machineTemplat } } } + +func skipMDCreation(clusterClient clusterclient.Client, c *TkgClient, cluster *capi.Cluster, options *SetMachineDeploymentOptions) (bool, error) { + if topology.IsSingleNodeCluster(cluster) && c.IsFeatureActivated(constants.FeatureFlagSingleNodeClusters) { + return true, nil + } else if topology.HasWorkerNodes(cluster) { + return false, errors.New("cluster topology workers are not set. please repair your cluster before trying again") + } + + return false, nil +} diff --git a/tkg/client/machine_deployment_cc.go b/tkg/client/machine_deployment_cc.go index 634ecbb2d7..a0ab9a48b2 100644 --- a/tkg/client/machine_deployment_cc.go +++ b/tkg/client/machine_deployment_cc.go @@ -26,10 +26,6 @@ func DoSetMachineDeploymentCC(clusterClient clusterclient.Client, cluster *capi. var update *capi.MachineDeploymentTopology var base *capi.MachineDeploymentTopology - if cluster.Spec.Topology.Workers == nil || len(cluster.Spec.Topology.Workers.MachineDeployments) < 1 { - return errors.New("cluster topology workers are not set. please repair your cluster before trying again") - } - mdIndex := -1 for i := range cluster.Spec.Topology.Workers.MachineDeployments { if cluster.Spec.Topology.Workers.MachineDeployments[i].Name == options.Name { diff --git a/tkg/client/machine_deployment_cc_test.go b/tkg/client/machine_deployment_cc_test.go index be2cb8cd0e..fabe37e278 100644 --- a/tkg/client/machine_deployment_cc_test.go +++ b/tkg/client/machine_deployment_cc_test.go @@ -6,13 +6,17 @@ package client_test import ( "encoding/json" "errors" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/utils/pointer" capi "sigs.k8s.io/cluster-api/api/v1beta1" . "github.com/vmware-tanzu/tanzu-framework/tkg/client" + "github.com/vmware-tanzu/tanzu-framework/tkg/clusterclient" + "github.com/vmware-tanzu/tanzu-framework/tkg/constants" "github.com/vmware-tanzu/tanzu-framework/tkg/fakes" ) @@ -425,6 +429,7 @@ var _ = Describe("SetMachineDeploymentCC", func() { JustBeforeEach(func() { err = DoSetMachineDeploymentCC(regionalClusterClient, &cluster, &options) + Expect(err).ToNot(HaveOccurred()) }) Context("adding a new MachineDeployment", func() { @@ -654,3 +659,200 @@ var _ = Describe("SetMachineDeploymentCC", func() { }) }) }) + +var _ = Describe("SetMachineDeploymentCC", func() { + Context("SetMachineDeployment", func() { + Context("When Cluster Class is enabled", func() { + var ( + clusterClientFactory *fakes.ClusterClientFactory + clusterClient *fakes.ClusterClient + featureFlagClient *fakes.FeatureFlagClient + options *SetMachineDeploymentOptions + tkgClient *TkgClient + ) + + BeforeEach(func() { + clusterClientFactory = &fakes.ClusterClientFactory{} + clusterClient = &fakes.ClusterClient{} + clusterClientFactory.NewClientReturns(clusterClient, nil) + clusterClient.IsClusterClassBasedReturns(true, nil) + featureFlagClient = &fakes.FeatureFlagClient{} + options = &SetMachineDeploymentOptions{ + ClusterName: "test-cluster", + Namespace: "default", + } + tkgClient, err = CreateTKGClientOptsMutator("../fakes/config/config2.yaml", testingDir, "../fakes/config/bom/tkg-bom-v1.3.1.yaml", 2*time.Second, func(o Options) Options { + o.ClusterClientFactory = clusterClientFactory + o.FeatureFlagClient = featureFlagClient + return o + }) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("When feature toggle is enabled", func() { + BeforeEach(func() { + featureFlagClient.IsConfigFeatureActivatedStub = func(featureFlagName string) (bool, error) { + if featureFlagName == constants.FeatureFlagSingleNodeClusters { + return true, nil + } + return false, nil + } + }) + + It("Should not create machine deployment when creating a single node cluster", func() { + clusterClient.GetResourceCalls(func(c interface{}, name, namespace string, pv clusterclient.PostVerifyrFunc, opt *clusterclient.PollOptions) error { + cc := c.(*capi.Cluster) + fc := singleNodeCluster() + fc.Spec.Topology.Workers = nil + *cc = *fc + return nil + }) + + err = tkgClient.SetMachineDeployment(options) + Expect(err).ToNot(HaveOccurred()) + + Expect(clusterClient.GetResourceCallCount()).To(Equal(1)) + Expect(clusterClient.UpdateResourceCallCount()).To(BeZero()) + + obj, _, _, _, _ := clusterClient.GetResourceArgsForCall(0) + cluster := obj.(*capi.Cluster) + Expect(*cluster.Spec.Topology.ControlPlane.Replicas).To(Equal(int32(1))) + Expect(cluster.Spec.Topology.Workers).To(BeNil()) + }) + + It("Should create machine deployment when creating a multi node cluster", func() { + clusterClient.GetResourceCalls(func(c interface{}, name, namespace string, pv clusterclient.PostVerifyrFunc, opt *clusterclient.PollOptions) error { + cc := c.(*capi.Cluster) + fc := multiNodeCluster() + *cc = *fc + return nil + }) + + err = tkgClient.SetMachineDeployment(multiNodeOptions(options)) + Expect(err).ToNot(HaveOccurred()) + + Expect(clusterClient.GetResourceCallCount()).To(Equal(1)) + Expect(clusterClient.UpdateResourceCallCount()).To(Equal(1)) + obj, _, _, _, _ := clusterClient.GetResourceArgsForCall(0) + cluster := obj.(*capi.Cluster) + Expect(*cluster.Spec.Topology.ControlPlane.Replicas).To(Equal(int32(1))) + Expect(*cluster.Spec.Topology.Workers.MachineDeployments[0].Replicas).To(Equal(int32(1))) + }) + }) + + Context("When feature toggle is disabled", func() { + BeforeEach(func() { + featureFlagClient.IsConfigFeatureActivatedStub = func(featureFlagName string) (bool, error) { + if featureFlagName == constants.FeatureFlagSingleNodeClusters { + return false, nil + } + return false, nil + } + }) + + It("Should create machine deployment", func() { + clusterClient.GetResourceCalls(func(c interface{}, name, namespace string, pv clusterclient.PostVerifyrFunc, opt *clusterclient.PollOptions) error { + cc := c.(*capi.Cluster) + fc := multiNodeCluster() + *cc = *fc + return nil + }) + + err = tkgClient.SetMachineDeployment(multiNodeOptions(options)) + Expect(err).ToNot(HaveOccurred()) + + Expect(clusterClient.GetResourceCallCount()).To(Equal(1)) + Expect(clusterClient.UpdateResourceCallCount()).To(Equal(1)) + obj, _, _, _, _ := clusterClient.GetResourceArgsForCall(0) + cluster := obj.(*capi.Cluster) + Expect(*cluster.Spec.Topology.ControlPlane.Replicas).To(Equal(int32(1))) + Expect(*cluster.Spec.Topology.Workers.MachineDeployments[0].Replicas).To(Equal(int32(1))) + }) + + It("Should return error when trying to create a single node cluster", func() { + clusterClient.GetResourceCalls(func(c interface{}, name, namespace string, pv clusterclient.PostVerifyrFunc, opt *clusterclient.PollOptions) error { + cc := c.(*capi.Cluster) + fc := singleNodeCluster() + fc.Spec.Topology.Workers = nil + *cc = *fc + return nil + }) + + err = tkgClient.SetMachineDeployment(options) + Expect(err).To(MatchError("cluster topology workers are not set. please repair your cluster before trying again")) + + Expect(clusterClient.GetResourceCallCount()).To(Equal(1)) + Expect(clusterClient.UpdateResourceCallCount()).To(BeZero()) + + obj, _, _, _, _ := clusterClient.GetResourceArgsForCall(0) + cluster := obj.(*capi.Cluster) + Expect(*cluster.Spec.Topology.ControlPlane.Replicas).To(Equal(int32(1))) + Expect(cluster.Spec.Topology.Workers).To(BeNil()) + }) + }) + }) + }) +}) + +func singleNodeCluster() *capi.Cluster { + return &capi.Cluster{ + Spec: capi.ClusterSpec{ + Topology: &capi.Topology{ + ControlPlane: capi.ControlPlaneTopology{ + Replicas: pointer.Int32(1), + }, + Workers: &capi.WorkersTopology{ + MachineDeployments: []capi.MachineDeploymentTopology{}, + }, + }, + }, + } +} + +func multiNodeCluster() *capi.Cluster { + worker0Raw, _ := json.Marshal(map[string]interface{}{ + "instanceType": "m5.large", + }) + + return &capi.Cluster{ + Spec: capi.ClusterSpec{ + Topology: &capi.Topology{ + ControlPlane: capi.ControlPlaneTopology{ + Replicas: pointer.Int32(1), + }, + Workers: &capi.WorkersTopology{ + MachineDeployments: []capi.MachineDeploymentTopology{ + { + Name: md0Name, + Replicas: pointer.Int32(1), + Class: tkgWorker, + Variables: &capi.MachineDeploymentVariables{ + Overrides: []capi.ClusterVariable{ + { + Name: "worker", + Value: v1.JSON{ + Raw: worker0Raw, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func multiNodeOptions(options *SetMachineDeploymentOptions) *SetMachineDeploymentOptions { + options.NodePool = NodePool{ + Labels: &map[string]string{ + "os": "ubuntu", + "arch": "amd64", + }, + Replicas: pointer.Int32(1), + WorkerClass: tkgWorker, + TKRResolver: osNameUbuntu, + } + return options +} diff --git a/tkg/client/test_utils.go b/tkg/client/test_utils.go index 68df51e483..4555878680 100644 --- a/tkg/client/test_utils.go +++ b/tkg/client/test_utils.go @@ -39,6 +39,10 @@ func createTKGClient(clusterConfigFile, configDir, defaultBomFile string, timeou return createTKGClientOpts(clusterConfigFile, configDir, defaultBomFile, timeout, func(options Options) Options { return options }) } +func CreateTKGClientOptsMutator(clusterConfigFile, configDir, defaultBomFile string, timeout time.Duration, optMutator func(options Options) Options) (*TkgClient, error) { + return createTKGClientOpts(clusterConfigFile, configDir, defaultBomFile, timeout, optMutator) +} + func createTKGClientOpts(clusterConfigFile, configDir, defaultBomFile string, timeout time.Duration, optMutator func(options Options) Options) (*TkgClient, error) { setupTestingFiles(clusterConfigFile, configDir, defaultBomFile) appConfig := types.AppConfig{ diff --git a/tkg/client/upgrade_cluster.go b/tkg/client/upgrade_cluster.go index 31009c64af..92dd9cde7e 100644 --- a/tkg/client/upgrade_cluster.go +++ b/tkg/client/upgrade_cluster.go @@ -1021,7 +1021,8 @@ func (c *TkgClient) createVsphereInfrastructureTemplateForUpgrade(regionalCluste vcClient, dcName, err := regionalClusterClient.GetVCClientAndDataCenter( clusterUpgradeConfig.ClusterName, clusterUpgradeConfig.ClusterNamespace, - kcp.Spec.MachineTemplate.InfrastructureRef.Name) + kcp.Spec.MachineTemplate.InfrastructureRef.Name, + c.vcClientFactory) if err != nil { return errors.Wrap(err, "unable to create vsphere client") } diff --git a/tkg/client/validate.go b/tkg/client/validate.go index 85860bd57c..61d0ac8630 100644 --- a/tkg/client/validate.go +++ b/tkg/client/validate.go @@ -467,7 +467,7 @@ func (c *TkgClient) GetVSphereEndpoint(clusterClient clusterclient.Client) (vc.C vsphereInsecure := (vsphereInsecureString == trueString) vsphereThumbprint, _ := c.TKGConfigReaderWriter().Get(constants.ConfigVariableVsphereTLSThumbprint) - return vc.GetAuthenticatedVCClient(server, username, password, vsphereThumbprint, vsphereInsecure) + return vc.GetAuthenticatedVCClient(server, username, password, vsphereThumbprint, vsphereInsecure, c.vcClientFactory) } vcHost, err := c.TKGConfigReaderWriter().Get(constants.ConfigVariableVsphereServer) @@ -500,7 +500,7 @@ func (c *TkgClient) GetVSphereEndpoint(clusterClient clusterclient.Client) (vc.C return nil, errors.Wrap(err, "failed to parse vc host") } vcURL.Path = "/sdk" - vcClient, err := vc.NewClient(vcURL, thumbprint, vsphereInsecure) + vcClient, err := c.vcClientFactory.NewClient(vcURL, thumbprint, vsphereInsecure) if err != nil { return nil, errors.Wrap(err, "failed to create vc client") } diff --git a/tkg/clusterclient/clusterclient.go b/tkg/clusterclient/clusterclient.go index 464f38e4f8..f06135830f 100644 --- a/tkg/clusterclient/clusterclient.go +++ b/tkg/clusterclient/clusterclient.go @@ -71,6 +71,7 @@ import ( runv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/run/v1alpha1" runv1alpha3 "github.com/vmware-tanzu/tanzu-framework/apis/run/v1alpha3" capdiscovery "github.com/vmware-tanzu/tanzu-framework/capabilities/client/pkg/discovery" + "github.com/vmware-tanzu/tanzu-framework/cli/runtime/config" tmcv1alpha1 "github.com/vmware-tanzu/tanzu-framework/tkg/api/tmc/v1alpha1" azureclient "github.com/vmware-tanzu/tanzu-framework/tkg/azure" "github.com/vmware-tanzu/tanzu-framework/tkg/buildinfo" @@ -244,7 +245,7 @@ type Client interface { // CloneWithTimeout returns a new client with the same attributes of the current one except for get client timeout settings CloneWithTimeout(getClientTimeout time.Duration) Client // GetVCClientAndDataCenter returns vsphere client and datacenter name by reading on cluster resources - GetVCClientAndDataCenter(clusterName, clusterNamespace, vsphereMachineTemplateObjectName string) (vc.Client, string, error) + GetVCClientAndDataCenter(clusterName, clusterNamespace, vsphereMachineTemplateObjectName string, vcClientFactory vc.VcClientFactory) (vc.Client, string, error) // PatchK8SVersionToPacificCluster patches the Pacific TKC object to update the k8s version on the cluster PatchK8SVersionToPacificCluster(clusterName, namespace string, kubernetesVersion string) error // WaitForPacificClusterK8sVersionUpdate waits for the Pacific TKC cluster to update k8s version @@ -778,6 +779,9 @@ func verifyKubernetesUpgradeForWorkerNodes(clusterStatusInfo *ClusterStatusInfo, } var desiredReplica int32 = 1 + if config.IsFeatureActivated(constants.FeatureFlagSingleNodeClusters) && len(clusterStatusInfo.WorkerMachineObjects) == 0 { + return nil + } errList := []error{} for i := range clusterStatusInfo.MDObjects { @@ -1034,12 +1038,16 @@ func (c *client) GetClusterStatusInfo(clusterName, namespace string, workloadClu errList = append(errList, err) } - if clusterStatusInfo.MDObjects, err = c.GetMDObjectForCluster(clusterName, namespace); err != nil { + if clusterStatusInfo.CPMachineObjects, clusterStatusInfo.WorkerMachineObjects, err = c.GetMachineObjectsForCluster(clusterName, namespace); err != nil { errList = append(errList, err) } - if clusterStatusInfo.CPMachineObjects, clusterStatusInfo.WorkerMachineObjects, err = c.GetMachineObjectsForCluster(clusterName, namespace); err != nil { - errList = append(errList, err) + singleNodeCluster := len(clusterStatusInfo.CPMachineObjects) == 1 && len(clusterStatusInfo.WorkerMachineObjects) == 0 + + if !singleNodeCluster { + if clusterStatusInfo.MDObjects, err = c.GetMDObjectForCluster(clusterName, namespace); err != nil { + errList = append(errList, err) + } } clusterStatusInfo.RetrievalError = kerrors.NewAggregate(errList) diff --git a/tkg/clusterclient/clusterclient_test.go b/tkg/clusterclient/clusterclient_test.go index f8b9971068..3f6454636f 100644 --- a/tkg/clusterclient/clusterclient_test.go +++ b/tkg/clusterclient/clusterclient_test.go @@ -130,6 +130,7 @@ var _ = Describe("Cluster Client", func() { currentNamespace string clientset *fakes.CRTClusterClient discoveryClient *fakes.DiscoveryClient + featureFlagClient *fakes.FeatureFlagClient err error poller *fakes.Poller kubeconfigbytes []byte @@ -161,6 +162,7 @@ var _ = Describe("Cluster Client", func() { crtClientFactory.NewClientReturns(clientset, nil) discoveryClientFactory = &fakes.DiscoveryClientFactory{} discoveryClientFactory.NewDiscoveryClientForConfigReturns(discoveryClient, nil) + featureFlagClient = &fakes.FeatureFlagClient{} poller.PollImmediateWithGetterCalls(func(interval, timeout time.Duration, getterFunc GetterFunc) (interface{}, error) { return getterFunc() }) @@ -532,6 +534,47 @@ var _ = Describe("Cluster Client", func() { Expect(err).NotTo(HaveOccurred()) }) }) + Context("Waits for single node cluster initialization", func() { + JustBeforeEach(func() { + machineObjects = append(machineObjects, getDummyMachine("fake-machine-1", "fake-new-version", true)) + clientset.GetCalls(func(ctx context.Context, namespace types.NamespacedName, cluster crtclient.Object) error { + conditions := capi.Conditions{} + conditions = append(conditions, capi.Condition{ + Type: capi.InfrastructureReadyCondition, + Status: corev1.ConditionTrue, + }) + conditions = append(conditions, capi.Condition{ + Type: capi.ControlPlaneReadyCondition, + Status: corev1.ConditionTrue, + }) + cluster.(*capi.Cluster).Status.Conditions = conditions + cluster.(*capi.Cluster).Spec.Topology = &capi.Topology{ + Workers: nil, + ControlPlane: capi.ControlPlaneTopology{ + Replicas: pointer.Int32(1), + }, + } + return nil + }) + clientset.ListCalls(func(ctx context.Context, o crtclient.ObjectList, opts ...crtclient.ListOption) error { + switch o := o.(type) { + case *capi.MachineList: + o.Items = append(o.Items, machineObjects...) + case *capi.MachineDeploymentList: + o.Items = []capi.MachineDeployment{} + case *controlplanev1.KubeadmControlPlaneList: + o.Items = append(o.Items, getDummyKCP(kcpReplicas.SpecReplica, kcpReplicas.Replicas, kcpReplicas.ReadyReplicas, kcpReplicas.UpdatedReplicas)) + default: + return errors.New("invalid object type") + } + return nil + }) + err = clstClient.WaitForClusterInitialized("fake-clusterName", "fake-namespace") + }) + It("should not return an error", func() { + Expect(err).NotTo(HaveOccurred()) + }) + }) }) Describe("Wait For Control plane available", func() { @@ -1516,6 +1559,7 @@ var _ = Describe("Cluster Client", func() { BeforeEach(func() { reInitialize() kubeConfigPath := getConfigFilePath("config1.yaml") + clstClient, err = NewClient(kubeConfigPath, "", clusterClientOptions) Expect(err).NotTo(HaveOccurred()) @@ -1614,6 +1658,33 @@ var _ = Describe("Cluster Client", func() { Expect(err).NotTo(HaveOccurred()) }) }) + + Context("When no replicas of worker machines exists", func() { + It("should not return an error", func() { + mdReplicas = Replicas{SpecReplica: 0, Replicas: 0, ReadyReplicas: 0, UpdatedReplicas: 0} + machineObjects = append(machineObjects, getDummyMachine("fake-machine-1", "fake-new-version", true)) + clientset.ListCalls(func(ctx context.Context, o crtclient.ObjectList, opts ...crtclient.ListOption) error { + switch o := o.(type) { + case *capi.MachineList: + o.Items = append(o.Items, machineObjects...) + case *capi.MachineDeploymentList: + o.Items = []capi.MachineDeployment{} + case *controlplanev1.KubeadmControlPlaneList: + o.Items = append(o.Items, getDummyKCP(kcpReplicas.SpecReplica, kcpReplicas.Replicas, kcpReplicas.ReadyReplicas, kcpReplicas.UpdatedReplicas)) + default: + return errors.New("invalid object type") + } + return nil + }) + featureFlagClient.IsConfigFeatureActivatedStub = func(featureFlagName string) (bool, error) { + if featureFlagName == constants.FeatureFlagSingleNodeClusters { + return true, nil + } + return true, nil + } + Expect(err).NotTo(HaveOccurred()) + }) + }) }) Describe("Wait for Pacific cluster Kubernetes version update ", func() { diff --git a/tkg/clusterclient/templates.go b/tkg/clusterclient/templates.go index 4d1b78a48f..d2630d75fe 100644 --- a/tkg/clusterclient/templates.go +++ b/tkg/clusterclient/templates.go @@ -10,7 +10,7 @@ import ( "github.com/vmware-tanzu/tanzu-framework/tkg/vc" ) -func (c *client) GetVCClientAndDataCenter(clusterName, clusterNamespace, vsphereMachineTemplateObjectName string) (vc.Client, string, error) { +func (c *client) GetVCClientAndDataCenter(clusterName, clusterNamespace, vsphereMachineTemplateObjectName string, vcClientFactory vc.VcClientFactory) (vc.Client, string, error) { if c.verificationClientFactory != nil && c.verificationClientFactory.GetVCClientAndDataCenter != nil { return c.verificationClientFactory.GetVCClientAndDataCenter(clusterName, clusterNamespace, vsphereMachineTemplateObjectName) } @@ -30,7 +30,7 @@ func (c *client) GetVCClientAndDataCenter(clusterName, clusterNamespace, vsphere // TODO: Read `vsphereInsecure`, `vsphereThumbprint` from cluster object // if this values are not available for old cluster use Insecure 'true' by default vsphereInsecure := true - vcClient, err := vc.GetAuthenticatedVCClient(vsphereServer, vsphereUsername, vspherePassword, "", vsphereInsecure) + vcClient, err := vc.GetAuthenticatedVCClient(vsphereServer, vsphereUsername, vspherePassword, "", vsphereInsecure, vcClientFactory) if err != nil { return nil, "", errors.Wrap(err, "unable to retrieve vSphere Client to retrieve VM Template") } diff --git a/tkg/constants/featureflags.go b/tkg/constants/featureflags.go index 28880ab722..9c4a9d86a6 100644 --- a/tkg/constants/featureflags.go +++ b/tkg/constants/featureflags.go @@ -35,4 +35,7 @@ const ( // based cluster even if user has done any customization to the provider templates // Note: This is a hidden feature-flag that doesn't get persisted to config.yaml by default FeatureFlagForceDeployClusterWithClusterClass = "features.cluster.force-deploy-cluster-with-clusterclass" + // FeatureFlagSingleNodeClusters is to enable Single Node Cluster deployment via tanzu CLI. + // Setting the feature flag to true will allow the creation of Single Node Clusters. + FeatureFlagSingleNodeClusters = "features.cluster.single-node-clusters" ) diff --git a/tkg/fakes/clusterclient.go b/tkg/fakes/clusterclient.go index 79467f6fc0..9ecbd7b04d 100644 --- a/tkg/fakes/clusterclient.go +++ b/tkg/fakes/clusterclient.go @@ -576,12 +576,13 @@ type ClusterClient struct { result1 []v1alpha1a.TanzuKubernetesRelease result2 error } - GetVCClientAndDataCenterStub func(string, string, string) (vc.Client, string, error) + GetVCClientAndDataCenterStub func(string, string, string, vc.VcClientFactory) (vc.Client, string, error) getVCClientAndDataCenterMutex sync.RWMutex getVCClientAndDataCenterArgsForCall []struct { arg1 string arg2 string arg3 string + arg4 vc.VcClientFactory } getVCClientAndDataCenterReturns struct { result1 vc.Client @@ -3942,20 +3943,21 @@ func (fake *ClusterClient) GetTanzuKubernetesReleasesReturnsOnCall(i int, result }{result1, result2} } -func (fake *ClusterClient) GetVCClientAndDataCenter(arg1 string, arg2 string, arg3 string) (vc.Client, string, error) { +func (fake *ClusterClient) GetVCClientAndDataCenter(arg1 string, arg2 string, arg3 string, arg4 vc.VcClientFactory) (vc.Client, string, error) { fake.getVCClientAndDataCenterMutex.Lock() ret, specificReturn := fake.getVCClientAndDataCenterReturnsOnCall[len(fake.getVCClientAndDataCenterArgsForCall)] fake.getVCClientAndDataCenterArgsForCall = append(fake.getVCClientAndDataCenterArgsForCall, struct { arg1 string arg2 string arg3 string - }{arg1, arg2, arg3}) + arg4 vc.VcClientFactory + }{arg1, arg2, arg3, arg4}) stub := fake.GetVCClientAndDataCenterStub fakeReturns := fake.getVCClientAndDataCenterReturns - fake.recordInvocation("GetVCClientAndDataCenter", []interface{}{arg1, arg2, arg3}) + fake.recordInvocation("GetVCClientAndDataCenter", []interface{}{arg1, arg2, arg3, arg4}) fake.getVCClientAndDataCenterMutex.Unlock() if stub != nil { - return stub(arg1, arg2, arg3) + return stub(arg1, arg2, arg3, arg4) } if specificReturn { return ret.result1, ret.result2, ret.result3 @@ -3969,17 +3971,17 @@ func (fake *ClusterClient) GetVCClientAndDataCenterCallCount() int { return len(fake.getVCClientAndDataCenterArgsForCall) } -func (fake *ClusterClient) GetVCClientAndDataCenterCalls(stub func(string, string, string) (vc.Client, string, error)) { +func (fake *ClusterClient) GetVCClientAndDataCenterCalls(stub func(string, string, string, vc.VcClientFactory) (vc.Client, string, error)) { fake.getVCClientAndDataCenterMutex.Lock() defer fake.getVCClientAndDataCenterMutex.Unlock() fake.GetVCClientAndDataCenterStub = stub } -func (fake *ClusterClient) GetVCClientAndDataCenterArgsForCall(i int) (string, string, string) { +func (fake *ClusterClient) GetVCClientAndDataCenterArgsForCall(i int) (string, string, string, vc.VcClientFactory) { fake.getVCClientAndDataCenterMutex.RLock() defer fake.getVCClientAndDataCenterMutex.RUnlock() argsForCall := fake.getVCClientAndDataCenterArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } func (fake *ClusterClient) GetVCClientAndDataCenterReturns(result1 vc.Client, result2 string, result3 error) { diff --git a/tkg/fakes/config/cluster_vsphere_snc.yaml b/tkg/fakes/config/cluster_vsphere_snc.yaml new file mode 100644 index 0000000000..ce4c31abaa --- /dev/null +++ b/tkg/fakes/config/cluster_vsphere_snc.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Secret +metadata: + name: workload-vsphere-credential + namespace: default +stringData: + password: Admin!23 + username: administrator@vsphere.local +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + annotations: + osInfo: ",," + labels: + cluster-role.tkg.tanzu.vmware.com/management: "" + tanzuKubernetesRelease: v1.23.5---vmware.1-tkg.1-zshippable + tkg.tanzu.vmware.com/cluster-name: c1 + name: vsphere-workload-cluster1 # CLUSTER_NAME + namespace: namespace-test1 +spec: + clusterNetwork: + pods: + cidrBlocks: # CLUSTER_CIDR + - 10.10.10.10/18 + services: + cidrBlocks: # SERVICE_CIDR + - 100.64.0.0/18 + topology: + class: tkg-vsphere-default # CLUSTER_CLASS + controlPlane: + replicas: 1 # CONTROL_PLANE_MACHINE_COUNT + variables: + - name: controlPlaneTaint + value: false + version: v1.21.2 + workers: + machineDeployments: +--- +apiVersion: v1 +kind: Secret +metadata: + name: workload-vsphere-credential + namespace: default +stringData: + password: Admin!23 + username: administrator@vsphere.local diff --git a/tkg/fakes/config/cluster_vsphere_snc_cp_taint_true.yaml b/tkg/fakes/config/cluster_vsphere_snc_cp_taint_true.yaml new file mode 100644 index 0000000000..b709291194 --- /dev/null +++ b/tkg/fakes/config/cluster_vsphere_snc_cp_taint_true.yaml @@ -0,0 +1,27 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + annotations: + osInfo: ",," + labels: + cluster-role.tkg.tanzu.vmware.com/management: "" + tanzuKubernetesRelease: v1.23.5---vmware.1-tkg.1-zshippable + tkg.tanzu.vmware.com/cluster-name: c1 + name: vsphere-workload-cluster1 # CLUSTER_NAME + namespace: namespace-test1 +spec: + clusterNetwork: + pods: + cidrBlocks: # CLUSTER_CIDR + - 10.10.10.10/18 + services: + cidrBlocks: # SERVICE_CIDR + - 100.64.0.0/18 + topology: + class: tkg-vsphere-default # CLUSTER_CLASS + controlPlane: + replicas: 1 # CONTROL_PLANE_MACHINE_COUNT + variables: + - name: controlPlaneTaint + value: true + version: v1.21.2 diff --git a/tkg/fakes/config/cluster_vsphere_snc_invalid_cp_taint.yaml b/tkg/fakes/config/cluster_vsphere_snc_invalid_cp_taint.yaml new file mode 100644 index 0000000000..a0e94a23f1 --- /dev/null +++ b/tkg/fakes/config/cluster_vsphere_snc_invalid_cp_taint.yaml @@ -0,0 +1,27 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + annotations: + osInfo: ",," + labels: + cluster-role.tkg.tanzu.vmware.com/management: "" + tanzuKubernetesRelease: v1.23.5---vmware.1-tkg.1-zshippable + tkg.tanzu.vmware.com/cluster-name: c1 + name: vsphere-workload-cluster1 # CLUSTER_NAME + namespace: namespace-test1 +spec: + clusterNetwork: + pods: + cidrBlocks: # CLUSTER_CIDR + - 10.10.10.10/18 + services: + cidrBlocks: # SERVICE_CIDR + - 100.64.0.0/18 + topology: + class: tkg-vsphere-default # CLUSTER_CLASS + controlPlane: + replicas: 1 # CONTROL_PLANE_MACHINE_COUNT + variables: + - name: controlPlaneTaint + value: invalid control plane taint + version: v1.21.2 diff --git a/tkg/fakes/config/cluster_vsphere_snc_omit_workers.yaml b/tkg/fakes/config/cluster_vsphere_snc_omit_workers.yaml new file mode 100644 index 0000000000..ad76618f0c --- /dev/null +++ b/tkg/fakes/config/cluster_vsphere_snc_omit_workers.yaml @@ -0,0 +1,28 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + annotations: + osInfo: ",," + labels: + cluster-role.tkg.tanzu.vmware.com/management: "" + tanzuKubernetesRelease: v1.23.5---vmware.1-tkg.1-zshippable + tkg.tanzu.vmware.com/cluster-name: c1 + name: vsphere-workload-cluster1 # CLUSTER_NAME + namespace: namespace-test1 +spec: + clusterNetwork: + pods: + cidrBlocks: # CLUSTER_CIDR + - 10.10.10.10/18 + services: + cidrBlocks: # SERVICE_CIDR + - 100.64.0.0/18 + topology: + class: tkg-vsphere-default # CLUSTER_CLASS + controlPlane: + replicas: 1 # CONTROL_PLANE_MACHINE_COUNT + variables: + - name: controlPlaneTaint + value: true + + version: v1.21.2 diff --git a/tkg/fakes/tkgconfigreaderwriterclient.go b/tkg/fakes/tkgconfigreaderwriterclient.go new file mode 100644 index 0000000000..70ce3b82b6 --- /dev/null +++ b/tkg/fakes/tkgconfigreaderwriterclient.go @@ -0,0 +1,169 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fakes + +import ( + "sync" + + "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" + + "github.com/vmware-tanzu/tanzu-framework/tkg/tkgconfigreaderwriter" +) + +type TKGConfigReaderWriterClient struct { + ClusterConfigClientStub func() config.Client + clusterConfigClientMutex sync.RWMutex + clusterConfigClientArgsForCall []struct { + } + clusterConfigClientReturns struct { + result1 config.Client + } + clusterConfigClientReturnsOnCall map[int]struct { + result1 config.Client + } + TKGConfigReaderWriterStub func() tkgconfigreaderwriter.TKGConfigReaderWriter + tKGConfigReaderWriterMutex sync.RWMutex + tKGConfigReaderWriterArgsForCall []struct { + } + tKGConfigReaderWriterReturns struct { + result1 tkgconfigreaderwriter.TKGConfigReaderWriter + } + tKGConfigReaderWriterReturnsOnCall map[int]struct { + result1 tkgconfigreaderwriter.TKGConfigReaderWriter + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *TKGConfigReaderWriterClient) ClusterConfigClient() config.Client { + fake.clusterConfigClientMutex.Lock() + ret, specificReturn := fake.clusterConfigClientReturnsOnCall[len(fake.clusterConfigClientArgsForCall)] + fake.clusterConfigClientArgsForCall = append(fake.clusterConfigClientArgsForCall, struct { + }{}) + stub := fake.ClusterConfigClientStub + fakeReturns := fake.clusterConfigClientReturns + fake.recordInvocation("ClusterConfigClient", []interface{}{}) + fake.clusterConfigClientMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *TKGConfigReaderWriterClient) ClusterConfigClientCallCount() int { + fake.clusterConfigClientMutex.RLock() + defer fake.clusterConfigClientMutex.RUnlock() + return len(fake.clusterConfigClientArgsForCall) +} + +func (fake *TKGConfigReaderWriterClient) ClusterConfigClientCalls(stub func() config.Client) { + fake.clusterConfigClientMutex.Lock() + defer fake.clusterConfigClientMutex.Unlock() + fake.ClusterConfigClientStub = stub +} + +func (fake *TKGConfigReaderWriterClient) ClusterConfigClientReturns(result1 config.Client) { + fake.clusterConfigClientMutex.Lock() + defer fake.clusterConfigClientMutex.Unlock() + fake.ClusterConfigClientStub = nil + fake.clusterConfigClientReturns = struct { + result1 config.Client + }{result1} +} + +func (fake *TKGConfigReaderWriterClient) ClusterConfigClientReturnsOnCall(i int, result1 config.Client) { + fake.clusterConfigClientMutex.Lock() + defer fake.clusterConfigClientMutex.Unlock() + fake.ClusterConfigClientStub = nil + if fake.clusterConfigClientReturnsOnCall == nil { + fake.clusterConfigClientReturnsOnCall = make(map[int]struct { + result1 config.Client + }) + } + fake.clusterConfigClientReturnsOnCall[i] = struct { + result1 config.Client + }{result1} +} + +func (fake *TKGConfigReaderWriterClient) TKGConfigReaderWriter() tkgconfigreaderwriter.TKGConfigReaderWriter { + fake.tKGConfigReaderWriterMutex.Lock() + ret, specificReturn := fake.tKGConfigReaderWriterReturnsOnCall[len(fake.tKGConfigReaderWriterArgsForCall)] + fake.tKGConfigReaderWriterArgsForCall = append(fake.tKGConfigReaderWriterArgsForCall, struct { + }{}) + stub := fake.TKGConfigReaderWriterStub + fakeReturns := fake.tKGConfigReaderWriterReturns + fake.recordInvocation("TKGConfigReaderWriter", []interface{}{}) + fake.tKGConfigReaderWriterMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *TKGConfigReaderWriterClient) TKGConfigReaderWriterCallCount() int { + fake.tKGConfigReaderWriterMutex.RLock() + defer fake.tKGConfigReaderWriterMutex.RUnlock() + return len(fake.tKGConfigReaderWriterArgsForCall) +} + +func (fake *TKGConfigReaderWriterClient) TKGConfigReaderWriterCalls(stub func() tkgconfigreaderwriter.TKGConfigReaderWriter) { + fake.tKGConfigReaderWriterMutex.Lock() + defer fake.tKGConfigReaderWriterMutex.Unlock() + fake.TKGConfigReaderWriterStub = stub +} + +func (fake *TKGConfigReaderWriterClient) TKGConfigReaderWriterReturns(result1 tkgconfigreaderwriter.TKGConfigReaderWriter) { + fake.tKGConfigReaderWriterMutex.Lock() + defer fake.tKGConfigReaderWriterMutex.Unlock() + fake.TKGConfigReaderWriterStub = nil + fake.tKGConfigReaderWriterReturns = struct { + result1 tkgconfigreaderwriter.TKGConfigReaderWriter + }{result1} +} + +func (fake *TKGConfigReaderWriterClient) TKGConfigReaderWriterReturnsOnCall(i int, result1 tkgconfigreaderwriter.TKGConfigReaderWriter) { + fake.tKGConfigReaderWriterMutex.Lock() + defer fake.tKGConfigReaderWriterMutex.Unlock() + fake.TKGConfigReaderWriterStub = nil + if fake.tKGConfigReaderWriterReturnsOnCall == nil { + fake.tKGConfigReaderWriterReturnsOnCall = make(map[int]struct { + result1 tkgconfigreaderwriter.TKGConfigReaderWriter + }) + } + fake.tKGConfigReaderWriterReturnsOnCall[i] = struct { + result1 tkgconfigreaderwriter.TKGConfigReaderWriter + }{result1} +} + +func (fake *TKGConfigReaderWriterClient) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.clusterConfigClientMutex.RLock() + defer fake.clusterConfigClientMutex.RUnlock() + fake.tKGConfigReaderWriterMutex.RLock() + defer fake.tKGConfigReaderWriterMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *TKGConfigReaderWriterClient) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ tkgconfigreaderwriter.Client = new(TKGConfigReaderWriterClient) diff --git a/tkg/fakes/vcclientfactory.go b/tkg/fakes/vcclientfactory.go new file mode 100644 index 0000000000..ee7de5a815 --- /dev/null +++ b/tkg/fakes/vcclientfactory.go @@ -0,0 +1,121 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fakes + +import ( + "net/url" + "sync" + + "github.com/vmware-tanzu/tanzu-framework/tkg/vc" +) + +type VcClientFactory struct { + NewClientStub func(*url.URL, string, bool) (vc.Client, error) + newClientMutex sync.RWMutex + newClientArgsForCall []struct { + arg1 *url.URL + arg2 string + arg3 bool + } + newClientReturns struct { + result1 vc.Client + result2 error + } + newClientReturnsOnCall map[int]struct { + result1 vc.Client + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *VcClientFactory) NewClient(arg1 *url.URL, arg2 string, arg3 bool) (vc.Client, error) { + fake.newClientMutex.Lock() + ret, specificReturn := fake.newClientReturnsOnCall[len(fake.newClientArgsForCall)] + fake.newClientArgsForCall = append(fake.newClientArgsForCall, struct { + arg1 *url.URL + arg2 string + arg3 bool + }{arg1, arg2, arg3}) + stub := fake.NewClientStub + fakeReturns := fake.newClientReturns + fake.recordInvocation("NewClient", []interface{}{arg1, arg2, arg3}) + fake.newClientMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *VcClientFactory) NewClientCallCount() int { + fake.newClientMutex.RLock() + defer fake.newClientMutex.RUnlock() + return len(fake.newClientArgsForCall) +} + +func (fake *VcClientFactory) NewClientCalls(stub func(*url.URL, string, bool) (vc.Client, error)) { + fake.newClientMutex.Lock() + defer fake.newClientMutex.Unlock() + fake.NewClientStub = stub +} + +func (fake *VcClientFactory) NewClientArgsForCall(i int) (*url.URL, string, bool) { + fake.newClientMutex.RLock() + defer fake.newClientMutex.RUnlock() + argsForCall := fake.newClientArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *VcClientFactory) NewClientReturns(result1 vc.Client, result2 error) { + fake.newClientMutex.Lock() + defer fake.newClientMutex.Unlock() + fake.NewClientStub = nil + fake.newClientReturns = struct { + result1 vc.Client + result2 error + }{result1, result2} +} + +func (fake *VcClientFactory) NewClientReturnsOnCall(i int, result1 vc.Client, result2 error) { + fake.newClientMutex.Lock() + defer fake.newClientMutex.Unlock() + fake.NewClientStub = nil + if fake.newClientReturnsOnCall == nil { + fake.newClientReturnsOnCall = make(map[int]struct { + result1 vc.Client + result2 error + }) + } + fake.newClientReturnsOnCall[i] = struct { + result1 vc.Client + result2 error + }{result1, result2} +} + +func (fake *VcClientFactory) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.newClientMutex.RLock() + defer fake.newClientMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *VcClientFactory) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ vc.VcClientFactory = new(VcClientFactory) diff --git a/tkg/tkgconfigreaderwriter/client.go b/tkg/tkgconfigreaderwriter/client.go index bb63228006..310534f471 100644 --- a/tkg/tkgconfigreaderwriter/client.go +++ b/tkg/tkgconfigreaderwriter/client.go @@ -9,6 +9,8 @@ import ( "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" ) +//go:generate counterfeiter -o ../fakes/tkgconfigreaderwriterclient.go --fake-name TKGConfigReaderWriterClient . Client + // Client implements tkg config client interface type Client interface { // ClusterConfigClient returns clusterctl config client diff --git a/tkg/tkgctl/client.go b/tkg/tkgctl/client.go index a04cf9669d..3de6daf525 100644 --- a/tkg/tkgctl/client.go +++ b/tkg/tkgctl/client.go @@ -25,6 +25,7 @@ import ( providergetterclient "github.com/vmware-tanzu/tanzu-framework/tkg/tkgctl/client" "github.com/vmware-tanzu/tanzu-framework/tkg/types" "github.com/vmware-tanzu/tanzu-framework/tkg/utils" + "github.com/vmware-tanzu/tanzu-framework/tkg/vc" ) type tkgctl struct { @@ -136,6 +137,7 @@ func New(options Options) (TKGClient, error) { //nolint:gocritic TKGPathsClient: allClients.TKGConfigPathsClient, ClusterKubeConfig: clusterKubeConfig, ClusterClientFactory: clusterclient.NewClusterClientFactory(), + VcClientFactory: vc.NewVcClientFactory(), FeatureFlagClient: allClients.FeatureFlagClient, }) if err != nil { diff --git a/tkg/tkgctl/create_cluster.go b/tkg/tkgctl/create_cluster.go index bb85763115..c4b7a6b6c5 100644 --- a/tkg/tkgctl/create_cluster.go +++ b/tkg/tkgctl/create_cluster.go @@ -481,3 +481,9 @@ func (t *tkgctl) getAndDownloadTkrIfNeeded(tkrVersion string) (string, string, e return tkrVersion, k8sVersion, nil } + +// # val 1 - cannot create single node cluster without feature toggle +// - if feature toggle true -> allow cp to be 1 and wokrer 0 +// - if feature toggle false -> worker cannot be 0 - fail saying min worker count is 1 +// # val 2 - control plane taint must be removed for single node cluster +// - if feature toggle true diff --git a/tkg/vc/client.go b/tkg/vc/client.go index 9024e497e6..536a4736d9 100644 --- a/tkg/vc/client.go +++ b/tkg/vc/client.go @@ -52,7 +52,7 @@ type DefaultClient struct { } // GetAuthenticatedVCClient returns authenticated VC client -func GetAuthenticatedVCClient(vcHost, vcUsername, vcPassword, thumbprint string, insecure bool) (Client, error) { +func GetAuthenticatedVCClient(vcHost, vcUsername, vcPassword, thumbprint string, insecure bool, vcClientFactory VcClientFactory) (Client, error) { host := strings.TrimSpace(vcHost) if !strings.HasPrefix(host, "http") { host = "https://" + host @@ -62,7 +62,7 @@ func GetAuthenticatedVCClient(vcHost, vcUsername, vcPassword, thumbprint string, return nil, errors.Wrap(err, "failed to parse vc host") } vcURL.Path = "/sdk" - vcClient, err := NewClient(vcURL, thumbprint, insecure) + vcClient, err := vcClientFactory.NewClient(vcURL, thumbprint, insecure) if err != nil { return nil, errors.Wrap(err, "failed to create vc client") } diff --git a/tkg/vc/interface.go b/tkg/vc/interface.go index 31655d9ae1..59e911f0bb 100644 --- a/tkg/vc/interface.go +++ b/tkg/vc/interface.go @@ -6,6 +6,7 @@ package vc import ( "context" + "net/url" "github.com/vmware-tanzu/tanzu-framework/tkg/tkgconfigreaderwriter" tkgtypes "github.com/vmware-tanzu/tanzu-framework/tkg/types" @@ -29,6 +30,25 @@ const ( TypeVirtualMachine = "VirtualMachine" ) +//go:generate counterfeiter -o ../fakes/vcclientfactory.go --fake-name VcClientFactory . VcClientFactory + +// VcClientFactory a factory for creating VC clients +type VcClientFactory interface { + NewClient(vcURL *url.URL, thumbprint string, insecure bool) (Client, error) +} + +type vcClientFactory struct{} + +// NewClient creates new clusterclient +func (c *vcClientFactory) NewClient(vcURL *url.URL, thumbprint string, insecure bool) (Client, error) { //nolint:gocritic + return NewClient(vcURL, thumbprint, insecure) +} + +// NewVcClientFactory creates new vcclient factory +func NewVcClientFactory() VcClientFactory { + return &vcClientFactory{} +} + //go:generate counterfeiter -o ../fakes/vcclient.go --fake-name VCClient . Client // Client represents a vCenter client diff --git a/util/topology/cluster.go b/util/topology/cluster.go index c88020b7c0..019f23d7ad 100644 --- a/util/topology/cluster.go +++ b/util/topology/cluster.go @@ -9,6 +9,7 @@ import ( "github.com/pkg/errors" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -76,6 +77,18 @@ func GetMDVariable(cluster *clusterv1.Cluster, mdIndex int, name string, value i return errors.Wrap(err, "unmarshalling from JSON into value") } +// IsSingleNodeCluster checks if the cluster topology is single node cluster(with CP count as 1 and worker count as 0). +// Pre-reqs: cluster.Spec.Topology != nil +func IsSingleNodeCluster(cluster *clusterv1.Cluster) bool { + return *cluster.Spec.Topology.ControlPlane.Replicas == *pointer.Int32(1) && HasWorkerNodes(cluster) +} + +// HasWorkerNodes checks if the cluster topology has worker nodes. +// Pre-reqs: cluster.Spec.Topology != nil +func HasWorkerNodes(cluster *clusterv1.Cluster) bool { + return cluster.Spec.Topology.Workers == nil || len(cluster.Spec.Topology.Workers.MachineDeployments) == 0 +} + func jsonValue(value interface{}) (*apiextensionsv1.JSON, error) { data, err := json.Marshal(value) if err != nil { diff --git a/util/topology/cluster_test.go b/util/topology/cluster_test.go index 05e83193af..7fc65eac1c 100644 --- a/util/topology/cluster_test.go +++ b/util/topology/cluster_test.go @@ -149,4 +149,48 @@ var _ = Describe("Cluster variable getters and setters", func() { }) }) }) + + Describe("IsSingleNodeCluster()", func() { + It("should return result", func() { + cluster.Spec.Topology = &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(1), + }, + Workers: &clusterv1.WorkersTopology{ + MachineDeployments: []clusterv1.MachineDeploymentTopology{ + { + Replicas: pointer.Int32(1), + }, + }, + }, + } + Expect(IsSingleNodeCluster(cluster)).To(BeFalse()) + cluster.Spec.Topology = &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(1), + }, + Workers: nil, + } + Expect(IsSingleNodeCluster(cluster)).To(BeTrue()) + }) + }) + + Describe("HasWorkerNodes()", func() { + It("should return result", func() { + cluster.Spec.Topology = &clusterv1.Topology{ + Workers: &clusterv1.WorkersTopology{ + MachineDeployments: []clusterv1.MachineDeploymentTopology{ + { + Replicas: pointer.Int32(1), + }, + }, + }, + } + Expect(HasWorkerNodes(cluster)).To(BeFalse()) + cluster.Spec.Topology = &clusterv1.Topology{ + Workers: nil, + } + Expect(HasWorkerNodes(cluster)).To(BeTrue()) + }) + }) })