diff --git a/docs/data-sources/aws_nodepool.md b/docs/data-sources/aws_nodepool.md index f532f5f..254c32c 100644 --- a/docs/data-sources/aws_nodepool.md +++ b/docs/data-sources/aws_nodepool.md @@ -1,12 +1,12 @@ --- -page_title: "TMC: tmc_nodepool" +page_title: "TMC: tmc_aws_nodepool" layout: "tmc" subcategory: "TKG Cluster" description: |- Get information on a specific nodepool of a AWS cluster in Tanzu Mission Control (TMC) --- -# Data Source: tmc_cluster +# Data Source: tmc_aws_nodepool The TMC Nodepool data resource can be used to get the information of a nodepool for a AWS cluster in Tanzu Mission Control (TMC). @@ -31,9 +31,9 @@ The following arguments are supported: ## Attributes Reference * `id` - The UID of the Tanzu Cluster. -* `description` - (Optional) The description of the nodepool. -* `node_labels` - (Optional) A map of node labels to assign to the resource. -* `cloud_labels` - (Optional) A map of cloud labels to assign to the resource. +* `description` - The description of the nodepool. +* `node_labels` - A map of node labels to assign to the resource. +* `cloud_labels` - A map of cloud labels to assign to the resource. * `availability_zone` - The AWS availability zone for the cluster's worker nodes. * `instance_type` - Instance type of the EC2 nodes to be used as part of the nodepool. * `version` - Version of Kubernetes to be used in the cluster. diff --git a/docs/data-sources/management_cluster.md b/docs/data-sources/management_cluster.md new file mode 100644 index 0000000..5e4e6af --- /dev/null +++ b/docs/data-sources/management_cluster.md @@ -0,0 +1,32 @@ +--- +page_title: "TMC: tmc_management_cluster" +layout: "tmc" +subcategory: "Tanzu Management Clusters" +description: |- + Get information on a Management Cluster in the TMC platform +--- + +# Data Source: tmc_management_cluster + +Use this data source to get the details about a management cluster in TMC platform. + +## Example Usage +# Get details of a cluster group in the Tanzu platform. +```terraform +data "tmc_management_cluster" "example" { + name = "example-cluster" +} +``` + +## Argument Reference + +* `name` - (Required) The name of the management cluster to lookup in the TMC platform. + +## Attributes Reference + +* `id` - Unique Identifiers (UID) of the found management cluster group in the TMC platform. +* `description` - Description of the management cluster. +* `labels` - A mapping of labels of the resource. +* `kubernetes_provider_type` - Type of cluster to be registered into TMC. Can be one of `tkg`, `tkgservice`, `tkghosted` or `other` +* `default_cluster_group` - Default cluster group for the workload clusters. +* `registration_url` - An URL to fetch the Tanzu Agent installation YAML which is necessary to establish connection to the registered cluster (if available) diff --git a/docs/data-sources/namespace.md b/docs/data-sources/namespace.md new file mode 100644 index 0000000..fa28962 --- /dev/null +++ b/docs/data-sources/namespace.md @@ -0,0 +1,36 @@ +--- +page_title: "TMC: tmc_namespace" +layout: "tmc" +subcategory: "Tanzu Namespace" +description: |- + Get information on a specific namespace of a cluster in Tanzu Mission Control (TMC) +--- + +# Data Source: tmc_namespace + +The TMC Namespace data resource can be used to get the information of a namespace for a cluster in Tanzu Mission Control (TMC). + +```terraform +data "tmc_namespace" "example" { + cluster_name = "example-cluster" + management_cluster = "example-hosted" + provisioner_name = "example-provisioner" + name = "example-ns" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the namespace. Changing the name forces recreation of this resource. +* `cluster_name` - (Required) The name of the Tanzu Cluster for which the namespace is to be created. +* `management_cluster` - (Required) Name of the management cluster used to provision the cluster. +* `provisioner_name` - (Required) Name of the provisioner to be used. + +## Attributes Reference + +* `id` - The UID of the Tanzu Cluster. +* `description` - The description of the nodepool. +* `workspace_name` - Name of the workspace for the created namespace. +* `labels` - A map of labels to assign to the resource. diff --git a/docs/resources/aws_nodepool.md b/docs/resources/aws_nodepool.md index 7c46401..554d497 100644 --- a/docs/resources/aws_nodepool.md +++ b/docs/resources/aws_nodepool.md @@ -1,12 +1,12 @@ --- -page_title: "TMC: tmc_nodepool" +page_title: "TMC: tmc_aws_nodepool" layout: "tmc" subcategory: "TKG Cluster" description: |- Creates and manages a nodepool for a AWS cluster in the TMC platform --- -# Resource: tmc_cluster +# Resource: tmc_aws_nodepool The TMC Cluster resource allows requesting the creation of a nodepool for a AWS cluster in Tanzu Mission Control (TMC). diff --git a/docs/resources/management_cluster.md b/docs/resources/management_cluster.md new file mode 100644 index 0000000..1832053 --- /dev/null +++ b/docs/resources/management_cluster.md @@ -0,0 +1,39 @@ +--- +page_title: "TMC: tmc_management_cluster" +layout: "tmc" +subcategory: "Tanzu Management Clusters" +description: |- + Creates and Registers a Management Cluster in the TMC platform +--- + +# Resource: tmc_management_cluster + +The TMC Management Cluster resource allows requesting the creation of a management cluster in Tanzu Mission Control (TMC). + +!> **Note**: This resource does not support `update` operation and hence will be destroyed and recreated for every change. + +```terraform +resource "tmc_management_cluster" "example" { + name = "tf-mgmt-cluster" + description = "terraform created mgmt cluster" + kubernetes_provider_type = "tkg" + default_cluster_group = "default" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Tanzu Management Cluster. Changing the name forces recreation of this resource. +* `description` - (Optional) The description of the Tanzu Management Cluster. +* `labels` - (Optional) A map of labels to assign to the resource. +* `kubernetes_provider_type` - (Required) Type of cluster to be registered into TMC. Can be one of `tkg`, `tkgservice`, `tkghosted` or `other` +* `default_cluster_group` - (Required) Default cluster group for the workload clusters. + +## Attributes Reference + +In addition to all arguments above, the following attribute is exported: + +* `id` - The UID of the Tanzu Management Cluster. +* `registration_url` - An URL to fetch the Tanzu Agent installation YAML which is necessary to establish connection to the registered cluster. diff --git a/docs/resources/namespace.md b/docs/resources/namespace.md new file mode 100644 index 0000000..86b51ce --- /dev/null +++ b/docs/resources/namespace.md @@ -0,0 +1,43 @@ +--- +page_title: "TMC: tmc_namespace" +layout: "tmc" +subcategory: "Tanzu Namespace" +description: |- + Creates and manages a namespace for a cluster in the TMC platform +--- + +# Resource: tmc_namespace + +The TMC Namespace resource allows requesting the creation of a namespace for a cluster in Tanzu Mission Control (TMC). + +```terraform +resource "tmc_namespace" "example" { + name = "example-ns" + description = "terraform created mgmt cluster" + cluster_name = "example-cluster" + management_cluster = "example-hosted" + provisioner_name = "example-provisioner" + workspace_name = "default" + labels = { + "CreatedBy" = "terraform" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Namespace. Changing the name forces recreation of this resource. +* `description` - (Optional) The description of the namespace. +* `cluster_name` - (Required) The name of the Tanzu Cluster for which the namespace is to be created. +* `management_cluster` - (Required) Name of the management cluster used to provision the cluster. +* `provisioner_name` - (Required) Name of the provisioner to be used. +* `workspace_name` - (Required) Name of the workspace for the created namespace. +* `labels` - (Optional) A map of labels to assign to the resource. + +## Attributes Reference + +In addition to all arguments above, the following attribute is exported: + +* `id` - The UID of the Tanzu Namespace. \ No newline at end of file diff --git a/docs/resources/tmc_vsphere_cluster.md b/docs/resources/tmc_vsphere_cluster.md new file mode 100644 index 0000000..b95c563 --- /dev/null +++ b/docs/resources/tmc_vsphere_cluster.md @@ -0,0 +1,78 @@ +--- +page_title: "TMC: tmc_vsphere_cluster" +layout: "tmc" +subcategory: "TKG Cluster" +description: |- + Creates and manages a Vsphere Cluster in the TMC platform +--- + +# Resource: tmc_cluster + +The TMC Cluster resource allows requesting the creation of a Vsphere cluster in Tanzu Mission Control (TMC). It also deals with managing the attributes and lifecycle of the cluster. + + +```terraform +resource "tmc_vsphere_cluster" "example" { + name = "example-vsphere-cluster" + management_cluster = "example-vpshere-mgmt-cluster" + provisioner_name = "example-provisioner" + + version = "v1.21.6+vmware.1-tkg.1.b3d708a" + cluster_group = "default" + + control_plane_spec { + class = "best-effort-xsmall" + storage_class = "vsphere-tanzu-example-storage-policy" + } + + nodepool { + nodepool_name = "example-nodepool" + worker_node_count = 1 + node_class = "best-effort-small" + node_storage_class = "vsphere-tanzu-example-storage-policy" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) (Forces Replacement) The name of the Tanzu Cluster. Changing the name forces recreation of this resource. +* `description` - (Optional) (Forces Replacement) The description of the Tanzu Cluster. +* `labels` - (Optional) A map of labels to assign to the resource. +* `cluster_group` - (Required) A map of labels to assign to the resource. +* `management_cluster` - (Required) (Forces Replacement) Name of the management cluster used to provision the cluster. +* `provisioner_name` - (Required) (Forces Replacement) Name of the provisioner to be used. +* `version` - (Required) (Forces Replacement) Version of Kubernetes to be used in the cluster. +* `region` - (Required) (Forces Replacement) AWS region of the cluster. +* `pod_cidrblock` - (Optional) (Forces Replacement) Pod CIDR for Kubernetes pods. Defaults to 192.168.0.0/16. +* `service_cidrblock` - (Optional) (Forces Replacement) Service CIDR for Kubernetes services. Defaults to 10.96.0.0/12. +* [`control_plane_spec`](#control_plane_spec) - (Required) (Forces Replacement) Contains information related to the Control Plane of the cluster +* [`nodepool`](#nodepool) - (Required) (Forces Replacement) Contains information related to the Nodepool of the cluster + +## Nested Blocks + +#### `control_plane_spec` + +#### Arguments + +* `class` - (Required) Indicates the size of the VMs to be provisioned. +* `storage_class` - (Required) Storage Class to be used for storage of the disks which store the root filesystems of the nodes + +#### `nodepool` + +#### Arguments + +* `nodepool_name` - (Required) Determines the name of the nodepool +* `worker_node_count` - (Required) Determines the number of worker nodes provisioned +* `node_class` - (Required) Determines the class of the worker node +* `node_storage_class` - (Required) Determines the storage policy used for the worker node + + +## Attributes Reference + +In addition to all arguments above, the following attribute is exported: + +* `id` - The UID of the Tanzu Cluster. +* `resource_version` - An identifier used to track changes to the resource diff --git a/tanzuclient/mgmt_cluster.go b/tanzuclient/mgmt_cluster.go new file mode 100644 index 0000000..2f58f9c --- /dev/null +++ b/tanzuclient/mgmt_cluster.go @@ -0,0 +1,107 @@ +package tanzuclient + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" +) + +var k8sProviderTypeMap = map[string]string{ + "tkg": "VMWARE_TANZU_KUBERNETES_GRID", + "tkgservice": "VMWARE_TANZU_KUBERNETES_GRID_SERVICE", + "tkghosted": "VMWARE_TANZU_KUBERNETES_GRID_HOSTED", + "other": "KUBERNETES_PROVIDER_UNSPECIFIED", +} + +type MgmtClusterSpec struct { + KubernetesProviderType string `json:"kubernetesProviderType"` + DefaultClusterGroup string `json:"defaultClusterGroup"` +} + +type ManagementCluster struct { + FullName *FullName `json:"fullName"` + Meta *MetaData `json:"meta"` + Spec *MgmtClusterSpec `json:"spec"` + Status struct { + RegistrationURL string `json:"registrationUrl,omitempty"` + } +} + +type MgmtClusterJsonObject struct { + MgmtCluster ManagementCluster `json:"managementCluster"` +} + +func (c *Client) CreateMgmtCluster(name string, defaultCg string, k8sProviderType string, description string, labels map[string]interface{}) (*ManagementCluster, error) { + requestURL := fmt.Sprintf("%s/v1alpha1/managementclusters", c.baseURL) + + newMgmtCluster := &ManagementCluster{ + FullName: &FullName{ + Name: name, + }, + Meta: &MetaData{ + Description: description, + Labels: labels, + }, + Spec: &MgmtClusterSpec{ + KubernetesProviderType: k8sProviderTypeMap[k8sProviderType], + DefaultClusterGroup: defaultCg, + }, + } + + newMgmtClusterObject := &MgmtClusterJsonObject{ + MgmtCluster: *newMgmtCluster, + } + + json_data, err := json.Marshal(newMgmtClusterObject) // returns []byte + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(json_data)) + if err != nil { + return nil, err + } + + res := MgmtClusterJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return nil, err + } + + return &res.MgmtCluster, nil +} + +func (c *Client) GetMgmtCluster(name string) (*ManagementCluster, error) { + requestURL := fmt.Sprintf("%s/v1alpha1/managementclusters/%s", c.baseURL, name) + + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return nil, err + } + + res := MgmtClusterJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return nil, err + } + + return &res.MgmtCluster, nil +} + +func (c *Client) DeleteMgmtCluster(name string) error { + requestURL := fmt.Sprintf("%s/v1alpha1/managementclusters/%s", c.baseURL, name) + + req, err := http.NewRequest("DELETE", requestURL, nil) + if err != nil { + return err + } + + res := MgmtClusterJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return err + } + + return nil +} diff --git a/tanzuclient/namespace.go b/tanzuclient/namespace.go new file mode 100644 index 0000000..caa3005 --- /dev/null +++ b/tanzuclient/namespace.go @@ -0,0 +1,107 @@ +package tanzuclient + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" +) + +type NamespaceSpec struct { + WorkspaceName string `json:"workspaceName"` +} + +type Namespace struct { + FullName *FullName `json:"fullName"` + Meta *MetaData `json:"meta"` + Spec *NamespaceSpec `json:"spec"` +} + +type NamespaceJsonObject struct { + Namespace Namespace `json:"namespace"` +} + +type NamespaceOpts struct { + Description string + Labels map[string]interface{} + ManagementCluster string + ProvisionerName string + ClusterName string + WorkspaceName string +} + +func (c *Client) CreateNamespace(name string, opts NamespaceOpts) (*Namespace, error) { + requestURL := fmt.Sprintf("%s/v1alpha1/clusters/%s/namespaces?fullName.managementClusterName=%s&fullName.provisionerName=%s", c.baseURL, opts.ClusterName, opts.ManagementCluster, opts.ProvisionerName) + + newNamespace := &Namespace{ + FullName: &FullName{ + Name: name, + ProvisionerName: opts.ProvisionerName, + ManagementClusterName: opts.ManagementCluster, + ClusterName: opts.ClusterName, + }, + Meta: &MetaData{ + Description: opts.Description, + Labels: opts.Labels, + }, + Spec: &NamespaceSpec{ + WorkspaceName: opts.WorkspaceName, + }, + } + + newNamespaceObject := &NamespaceJsonObject{ + Namespace: *newNamespace, + } + + json_data, err := json.Marshal(newNamespaceObject) // returns []byte + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(json_data)) + if err != nil { + return nil, err + } + + res := NamespaceJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return nil, err + } + + return &res.Namespace, nil +} + +func (c *Client) GetNamespace(name string, clusterName string, managementClusterName string, provisionerName string) (*Namespace, error) { + requestURL := fmt.Sprintf("%s/v1alpha1/clusters/%s/namespaces/%s?fullName.managementClusterName=%s&fullName.provisionerName=%s", c.baseURL, clusterName, name, managementClusterName, provisionerName) + + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return nil, err + } + + res := NamespaceJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return nil, err + } + + return &res.Namespace, nil +} + +func (c *Client) DeleteNamespace(name string, clusterName string, managementClusterName string, provisionerName string) error { + requestURL := fmt.Sprintf("%s/v1alpha1/clusters/%s/namespaces/%s?fullName.managementClusterName=%s&fullName.provisionerName=%s", c.baseURL, clusterName, name, managementClusterName, provisionerName) + + req, err := http.NewRequest("DELETE", requestURL, nil) + if err != nil { + return err + } + + res := NamespaceJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return err + } + + return nil +} diff --git a/tanzuclient/vsphere.go b/tanzuclient/vsphere.go new file mode 100644 index 0000000..11059d5 --- /dev/null +++ b/tanzuclient/vsphere.go @@ -0,0 +1,235 @@ +package tanzuclient + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strconv" +) + +type Volume struct { + Name string `json:"name"` + MountPath string `json:"mountPath"` + Capacity int `json:"capacity"` + StorageClass string `json:"storageClass"` +} + +type VsphereControlPlane struct { + Volumes []Volume `json:"volumes,omitempty"` + Class string `json:"class"` + StorageClass string `json:"storageClass"` +} + +type Vsphere struct { + Settings struct { + Network struct { + Pods struct { + CidrBlocks []string `json:"cidrBlocks"` + } `json:"pods"` + Services struct { + CidrBlocks []string `json:"cidrBlocks"` + } `json:"services"` + } `json:"network"` + } `json:"settings"` + Distribution struct { + Version string `json:"version"` + } `json:"distribution"` + Topology struct { + ControlPlane VsphereControlPlane `json:"controlPlane"` + NodePools []VsphereNodepool `json:"nodePools"` + } `json:"topology"` +} + +type VsphereNodepool struct { + Spec VsphereNodeSpec `json:"spec"` + Info struct { + Name string `json:"name"` + } `json:"info"` +} + +type VsphereNodeSpec struct { + NodeCount string `json:"workerNodeCount"` + NodeSpec VsphereControlPlane `json:"tkgServiceVsphere"` +} + +type VsphereSpec struct { + ClusterGroupName string `json:"clusterGroupName"` + TkgVsphereService Vsphere `json:"tkgServiceVsphere"` +} + +type VsphereCluster struct { + FullName *FullName `json:"fullName"` + Meta *MetaData `json:"meta"` + Spec *VsphereSpec `json:"spec"` +} + +type VsphereJsonObject struct { + Cluster VsphereCluster `json:"cluster"` +} + +type VpshereNodepoolOpts struct { + Name string + Class string + StorageClass string + WorkerNodeCount int +} + +type VsphereOpts struct { + Version string + Class string + StorageClass string + PodCidrBlock string + ServiceCidrBlock string + NodepoolOpts []VpshereNodepoolOpts +} + +func (c *Client) GetVsphereCluster(fullName string, managementClusterName string, provisionerName string) (*VsphereCluster, error) { + requestURL := fmt.Sprintf("%s/v1alpha1/clusters/%s?fullName.managementClusterName=%s&fullName.provisionerName=%s", c.baseURL, fullName, managementClusterName, provisionerName) + + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return nil, err + } + + res := VsphereJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return nil, err + } + + return &res.Cluster, nil +} + +func (c *Client) CreateVsphereCluster(name string, managementClusterName string, provisionerName string, cluster_group string, description string, labels map[string]interface{}, opts *VsphereOpts) (*VsphereCluster, error) { + + nodePoolSpec := makeNodePoolSpec(opts.NodepoolOpts) + + newCluster := &VsphereCluster{ + FullName: &FullName{ + Name: name, + ManagementClusterName: managementClusterName, + ProvisionerName: provisionerName, + }, + Meta: &MetaData{ + Description: description, + Labels: labels, + }, + Spec: &VsphereSpec{ + ClusterGroupName: cluster_group, + TkgVsphereService: Vsphere{ + Settings: struct { + Network struct { + Pods struct { + CidrBlocks []string "json:\"cidrBlocks\"" + } "json:\"pods\"" + Services struct { + CidrBlocks []string "json:\"cidrBlocks\"" + } "json:\"services\"" + } "json:\"network\"" + }{ + Network: struct { + Pods struct { + CidrBlocks []string "json:\"cidrBlocks\"" + } "json:\"pods\"" + Services struct { + CidrBlocks []string "json:\"cidrBlocks\"" + } "json:\"services\"" + }{ + Pods: struct { + CidrBlocks []string "json:\"cidrBlocks\"" + }{ + CidrBlocks: []string{opts.PodCidrBlock}, + }, + Services: struct { + CidrBlocks []string "json:\"cidrBlocks\"" + }{ + CidrBlocks: []string{opts.ServiceCidrBlock}, + }, + }, + }, + Distribution: struct { + Version string "json:\"version\"" + }{ + Version: opts.Version, + }, + Topology: struct { + ControlPlane VsphereControlPlane "json:\"controlPlane\"" + NodePools []VsphereNodepool "json:\"nodePools\"" + }{ + ControlPlane: VsphereControlPlane{ + Class: opts.Class, + StorageClass: opts.StorageClass, + }, + NodePools: nodePoolSpec, + }, + }, + }, + } + + newClusterObject := &VsphereJsonObject{ + Cluster: *newCluster, + } + + json_data, err := json.Marshal(newClusterObject) // returns []byte + if err != nil { + return nil, err + } + + requestURL := fmt.Sprintf("%s/v1alpha1/clusters", c.baseURL) + + req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(json_data)) + if err != nil { + return nil, err + } + + res := VsphereJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return nil, err + } + + return &res.Cluster, nil +} + +func (c *Client) DeleteVsphereCluster(name string, managementClusterName string, provisionerName string) error { + requestURL := fmt.Sprintf("%s/v1alpha1/clusters/%s?fullName.managementClusterName=%s&fullName.provisionerName=%s", c.baseURL, name, managementClusterName, provisionerName) + + req, err := http.NewRequest("DELETE", requestURL, nil) + if err != nil { + return err + } + + res := VsphereJsonObject{} + + if err := c.sendRequest(req, &res); err != nil { + return err + } + + return nil +} + +func makeNodePoolSpec(vpshereNodepoolOpts []VpshereNodepoolOpts) []VsphereNodepool { + npSpec := make([]VsphereNodepool, 0) + + for i := 0; i < len(vpshereNodepoolOpts); i++ { + toAppend := &VsphereNodepool{ + Spec: VsphereNodeSpec{ + NodeCount: strconv.Itoa(vpshereNodepoolOpts[i].WorkerNodeCount), + NodeSpec: VsphereControlPlane{ + Class: vpshereNodepoolOpts[i].Class, + StorageClass: vpshereNodepoolOpts[i].StorageClass, + }, + }, + Info: struct { + Name string "json:\"name\"" + }{ + Name: vpshereNodepoolOpts[i].Name, + }, + } + + npSpec = append(npSpec, *toAppend) + } + + return npSpec +} diff --git a/tmc/data_source_tmc_management_cluster.go b/tmc/data_source_tmc_management_cluster.go new file mode 100644 index 0000000..ea32cf5 --- /dev/null +++ b/tmc/data_source_tmc_management_cluster.go @@ -0,0 +1,92 @@ +package tmc + +import ( + "context" + "fmt" + + "github.com/codaglobal/terraform-provider-tmc/tanzuclient" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceTmcManagementCluster() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceTmcManagementClusterRead, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Unique ID of the Cluster Group", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Unique Name of the Tanzu Management Cluster in your Org", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !IsValidTanzuName(v) { + errs = append(errs, fmt.Errorf("name should contain only lowercase letters, numbers or hyphens and should begin with either an alphabet or number")) + } + return + }, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "Optional Description for the management cluster", + }, + "labels": labelsSchemaComputed(), + "kubernetes_provider_type": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates the k8s provider Type", + }, + "default_cluster_group": { + Type: schema.TypeString, + Computed: true, + Description: "Default cluster group for the workload clusters", + }, + "registration_url": { + Type: schema.TypeString, + Computed: true, + Description: "URL to fetch the TMC registration YAML.", + }, + }, + } +} + +func dataSourceTmcManagementClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*tanzuclient.Client) + + var diags diag.Diagnostics + + mgmtClusterName := d.Get("name").(string) + + mgmtCluster, err := client.GetMgmtCluster(mgmtClusterName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read management cluster", + Detail: fmt.Sprintf("Error reading resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("description", mgmtCluster.Meta.Description) + if err := d.Set("labels", mgmtCluster.Meta.Labels); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read cluster group", + Detail: fmt.Sprintf("Error setting labels for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("kubernetes_provider_type", mgmtCluster.Spec.KubernetesProviderType) + d.Set("default_cluster_group", mgmtCluster.Spec.DefaultClusterGroup) + d.Set("registration_url", mgmtCluster.Status.RegistrationURL) + + d.SetId(string(mgmtCluster.Meta.UID)) + + return diags +} diff --git a/tmc/data_source_tmc_namespace.go b/tmc/data_source_tmc_namespace.go new file mode 100644 index 0000000..4b09b80 --- /dev/null +++ b/tmc/data_source_tmc_namespace.go @@ -0,0 +1,98 @@ +package tmc + +import ( + "context" + "fmt" + + "github.com/codaglobal/terraform-provider-tmc/tanzuclient" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceTmcNamespace() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceTmcNamespaceRead, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Unique ID of the Cluster Group", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Unique Name of the Namespace in your Org", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !IsValidTanzuName(v) { + errs = append(errs, fmt.Errorf("name should contain only lowercase letters, numbers or hyphens and should begin with either an alphabet or number")) + } + return + }, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the cluster in which the namespace is to be created", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "Description of the Namespace", + }, + "management_cluster": { + Type: schema.TypeString, + Required: true, + Description: "Name of the management cluster used", + }, + "provisioner_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the provisioner", + }, + "labels": labelsSchemaComputed(), + "workspace_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the workspace for the namespace", + }, + }, + } +} + +func dataSourceTmcNamespaceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + NamespaceName := d.Get("name").(string) + clusterName := d.Get("cluster_name").(string) + managementClusterName := d.Get("management_cluster").(string) + provisionerName := d.Get("provisioner_name").(string) + + Namespace, err := client.GetNamespace(NamespaceName, clusterName, managementClusterName, provisionerName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read namespace", + Detail: fmt.Sprintf("Error reading resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("description", Namespace.Meta.Description) + if err := d.Set("labels", Namespace.Meta.Labels); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read namespace", + Detail: fmt.Sprintf("Error setting labels for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("workspace_name", Namespace.Spec.WorkspaceName) + + d.SetId(Namespace.Meta.UID) + + return nil +} diff --git a/tmc/provider.go b/tmc/provider.go index 7d0604a..69b4d0e 100644 --- a/tmc/provider.go +++ b/tmc/provider.go @@ -43,6 +43,8 @@ func Provider() *schema.Provider { "tmc_aws_storage_credential": dataSourceTmcAwsStorageCredential(), "tmc_observability_credential": dataSourceTmcObservabilityCredential(), "tmc_cluster_backup": dataSourceTmcClusterBackup(), + "tmc_namespace": dataSourceTmcNamespace(), + "tmc_management_cluster": dataSourceTmcManagementCluster(), }, // List of Resources supported by the provider @@ -56,6 +58,9 @@ func Provider() *schema.Provider { "tmc_aws_storage_credential": resourceTmcAwsStorageCredential(), "tmc_observability_credential": resourceTmcObservabilityCredential(), "tmc_cluster_backup": resourceTmcClusterBackup(), + "tmc_vsphere_cluster": resourceVsphereCluster(), + "tmc_namespace": resourceTmcNamespace(), + "tmc_management_cluster": resourceTmcManagementCluster(), }, } diff --git a/tmc/resource_tmc_management_cluster.go b/tmc/resource_tmc_management_cluster.go new file mode 100644 index 0000000..4be9fc1 --- /dev/null +++ b/tmc/resource_tmc_management_cluster.go @@ -0,0 +1,153 @@ +package tmc + +import ( + "context" + "fmt" + + "github.com/codaglobal/terraform-provider-tmc/tanzuclient" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceTmcManagementCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceTmcManagementClusterCreate, + ReadContext: resourceTmcManagementClusterRead, + DeleteContext: resourceTmcManagementClusterDelete, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Unique ID of the Tanzu Cluster Group", + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Unique Name of the Tanzu Management Cluster in your Org", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !IsValidTanzuName(v) { + errs = append(errs, fmt.Errorf("name should contain only lowercase letters, numbers or hyphens and should begin with either an alphabet or number")) + } + return + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional Description for the management cluster", + }, + "labels": labelsSchemaImmutable(), + "kubernetes_provider_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Indicates the k8s provider Type", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !isValidK8sProviderType(v) { + errs = append(errs, fmt.Errorf("invalid kubernetes_provider_type specified. it can be one of tkg, tkgservice, tkghosted or other")) + } + return + }, + }, + "default_cluster_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Default cluster group for the workload clusters", + }, + "registration_url": { + Type: schema.TypeString, + Computed: true, + Description: "URL to fetch the TMC registration YAML.", + }, + }, + } +} + +func resourceTmcManagementClusterCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + mgmtClusterName := d.Get("name").(string) + k8sProviderType := d.Get("kubernetes_provider_type").(string) + defaultCg := d.Get("default_cluster_group").(string) + description := d.Get("description").(string) + labels := d.Get("labels").(map[string]interface{}) + + mgmtCluster, err := client.CreateMgmtCluster(mgmtClusterName, defaultCg, k8sProviderType, description, labels) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to create Management cluster", + Detail: fmt.Sprintf("Error creating resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.SetId(mgmtCluster.Meta.UID) + + return resourceTmcManagementClusterRead(ctx, d, m) +} + +func resourceTmcManagementClusterRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + client := m.(*tanzuclient.Client) + + mgmtClusterName := d.Get("name").(string) + + mgmtCluster, err := client.GetMgmtCluster(mgmtClusterName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read Management cluster", + Detail: fmt.Sprintf("Error creating resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("description", mgmtCluster.Meta.Description) + if err := d.Set("labels", mgmtCluster.Meta.Labels); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read AWS cluster", + Detail: fmt.Sprintf("Error getting labels for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("kubernetes_provider_type", mgmtCluster.Spec.KubernetesProviderType) + d.Set("default_cluster_group", mgmtCluster.Spec.DefaultClusterGroup) + d.Set("registration_url", mgmtCluster.Status.RegistrationURL) + + return nil +} + +func resourceTmcManagementClusterDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + mgmtClusterName := d.Get("name").(string) + + err := client.DeleteMgmtCluster(mgmtClusterName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to delete management cluster", + Detail: fmt.Sprintf("Error deleting resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.SetId("") + + return nil +} diff --git a/tmc/resource_tmc_namespace.go b/tmc/resource_tmc_namespace.go new file mode 100644 index 0000000..8955a68 --- /dev/null +++ b/tmc/resource_tmc_namespace.go @@ -0,0 +1,172 @@ +package tmc + +import ( + "context" + "fmt" + + "github.com/codaglobal/terraform-provider-tmc/tanzuclient" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceTmcNamespace() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceTmcNamespaceCreate, + ReadContext: resourceTmcNamespaceRead, + DeleteContext: resourceTmcNamespaceDelete, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Unique ID of the Namespace", + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Unique Name of the Namespace in your Org", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !IsValidTanzuName(v) { + errs = append(errs, fmt.Errorf("name should contain only lowercase letters, numbers or hyphens and should begin with either an alphabet or number")) + } + return + }, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the cluster in which the namespace is to be created", + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Description of the Namespace", + }, + "management_cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the management cluster used", + }, + "provisioner_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the provisioner", + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // Ignore changes to the creator label added automatically added by TMC and + // also ignore changes when the labels field itself is deleted when updating + return k == "labels.tmc.cloud.vmware.com/creator" || k == "labels.%" || k == "labels.tmc.cloud.vmware.com/managed" || k == "labels.tmc.cloud.vmware.com/workspace" + }, + }, + "workspace_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the workspace for the namespace", + }, + }, + } +} + +func resourceTmcNamespaceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + NamespaceName := d.Get("name").(string) + + opts := &tanzuclient.NamespaceOpts{ + Description: d.Get("description").(string), + Labels: d.Get("labels").(map[string]interface{}), + ManagementCluster: d.Get("management_cluster").(string), + ProvisionerName: d.Get("provisioner_name").(string), + ClusterName: d.Get("cluster_name").(string), + WorkspaceName: d.Get("workspace_name").(string), + } + + Namespace, err := client.CreateNamespace(NamespaceName, *opts) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to create namespace", + Detail: fmt.Sprintf("Error creating resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.SetId(Namespace.Meta.UID) + + return resourceTmcNamespaceRead(ctx, d, m) +} + +func resourceTmcNamespaceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + NamespaceName := d.Get("name").(string) + clusterName := d.Get("cluster_name").(string) + managementClusterName := d.Get("management_cluster").(string) + provisionerName := d.Get("provisioner_name").(string) + + Namespace, err := client.GetNamespace(NamespaceName, clusterName, managementClusterName, provisionerName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read namespace", + Detail: fmt.Sprintf("Error reading resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("description", Namespace.Meta.Description) + if err := d.Set("labels", Namespace.Meta.Labels); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read namespace", + Detail: fmt.Sprintf("Error setting labels for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("workspace_name", Namespace.Spec.WorkspaceName) + + return diags +} + +func resourceTmcNamespaceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + NamespaceName := d.Get("name").(string) + clusterName := d.Get("cluster_name").(string) + managementClusterName := d.Get("management_cluster").(string) + provisionerName := d.Get("provisioner_name").(string) + + err := client.DeleteNamespace(NamespaceName, clusterName, managementClusterName, provisionerName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to delete namespace", + Detail: fmt.Sprintf("Error deleting resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.SetId("") + + return diags +} diff --git a/tmc/resource_tmc_vsphere_cluster.go b/tmc/resource_tmc_vsphere_cluster.go new file mode 100644 index 0000000..5e653c8 --- /dev/null +++ b/tmc/resource_tmc_vsphere_cluster.go @@ -0,0 +1,319 @@ +package tmc + +import ( + "context" + "fmt" + "strconv" + + "github.com/codaglobal/terraform-provider-tmc/tanzuclient" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceVsphereCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceVsphereClusterCreate, + ReadContext: resourceVsphereClusterRead, + DeleteContext: resourceVsphereClusterDelete, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Unique ID of the Cluster", + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: "Resource version of the Cluster", + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the Cluster", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !IsValidTanzuName(v) { + errs = append(errs, fmt.Errorf("name should contain only lowercase letters, numbers or hyphens and should begin with either an alphabet or number")) + } + return + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Description of the Cluster", + }, + "management_cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of an existing management cluster to be used", + }, + "provisioner_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of an existing provisioner to be used", + }, + "cluster_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the cluster group", + }, + "labels": labelsSchemaImmutable(), + "pod_cidrblock": { + Type: schema.TypeString, + Description: "CIDR block used by the Cluster's Pods", + Optional: true, + ForceNew: true, + Default: "192.168.0.0/16", + }, + "service_cidrblock": { + Type: schema.TypeString, + Description: "CIDR block used by the Cluster's Services", + Optional: true, + ForceNew: true, + Default: "10.96.0.0/12", + }, + "version": { + Type: schema.TypeString, + Description: "Kubernetes version to be used", + ForceNew: true, + Required: true, + }, + "control_plane_spec": { + Type: schema.TypeList, + Description: "Contains information related to the Control Plane of the cluster", + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "class": { + Type: schema.TypeString, + Description: "Indicates the size of the VMs to be provisioned", + Required: true, + ForceNew: true, + }, + "storage_class": { + Type: schema.TypeString, + Description: "Storage Class to be used for storage of the disks which store the root filesystems of the nodes", + Required: true, + ForceNew: true, + }, + }, + }, + }, + "nodepool": { + Type: schema.TypeList, + Description: "Contains specifications for a nodepool which is part of the cluster", + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nodepool_name": { + Type: schema.TypeString, + Description: "Determines the name of the nodepool", + Required: true, + ForceNew: true, + }, + "worker_node_count": { + Type: schema.TypeInt, + Description: "Determines the number of worker nodes provisioned", + Required: true, + ForceNew: true, + }, + "node_class": { + Type: schema.TypeString, + Description: "Determines the class of the worker node", + Required: true, + ForceNew: true, + }, + "node_storage_class": { + Type: schema.TypeString, + Description: "Determines the storage policy used for the worker node", + Required: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceVsphereClusterCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + clusterName := d.Get("name").(string) + managementClusterName := d.Get("management_cluster").(string) + provisionerName := d.Get("provisioner_name").(string) + description := d.Get("description").(string) + labels := d.Get("labels").(map[string]interface{}) + cluster_group := d.Get("cluster_group").(string) + controlPlaneSpec := d.Get("control_plane_spec").([]interface{})[0].(map[string]interface{}) + + nodePoolOpts := makeNodepoolOpts(d.Get("nodepool").([]interface{})) + + opts := &tanzuclient.VsphereOpts{ + Version: d.Get("version").(string), + Class: controlPlaneSpec["class"].(string), + StorageClass: controlPlaneSpec["storage_class"].(string), + PodCidrBlock: d.Get("pod_cidrblock").(string), + ServiceCidrBlock: d.Get("service_cidrblock").(string), + NodepoolOpts: nodePoolOpts, + } + + vSphereCluster, err := client.CreateVsphereCluster(clusterName, managementClusterName, provisionerName, cluster_group, description, labels, opts) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to create vSphere Cluster", + Detail: fmt.Sprintf("Error creating resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.SetId(vSphereCluster.Meta.UID) + + resourceVsphereClusterRead(ctx, d, m) + + return diags + +} + +func resourceVsphereClusterRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + clusterName := d.Get("name").(string) + managementClusterName := d.Get("management_cluster").(string) + provisionerName := d.Get("provisioner_name").(string) + + cluster, err := client.GetVsphereCluster(clusterName, managementClusterName, provisionerName) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read vSphere Cluster", + Detail: fmt.Sprintf("Error creating resource %s: %s", d.Get("name"), err), + }) + } + + d.Set("resource_version", cluster.Meta.ResourceVersion) + d.Set("description", cluster.Meta.Description) + d.Set("cluster_group", cluster.Spec.ClusterGroupName) + if err := d.Set("labels", cluster.Meta.Labels); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read vSphere cluster", + Detail: fmt.Sprintf("Error getting labels for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.Set("version", cluster.Spec.TkgVsphereService.Distribution.Version) + d.Set("pod_cidrblock", cluster.Spec.TkgVsphereService.Settings.Network.Pods.CidrBlocks[0]) + d.Set("service_cidrblock", cluster.Spec.TkgVsphereService.Settings.Network.Services.CidrBlocks[0]) + + spec := make([]map[string]interface{}, 0) + cp_spec := flatten_vsphere_control_plane_spec(&cluster.Spec.TkgVsphereService.Topology.ControlPlane) + spec = append(spec, cp_spec) + np_spec := flatten_vsphere_nodepool_spec(&cluster.Spec.TkgVsphereService.Topology.NodePools) + + if err := d.Set("control_plane_spec", spec); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read vSphere cluster", + Detail: fmt.Sprintf("Error getting control plane information for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + if err := d.Set("nodepool", np_spec); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read vSphere cluster", + Detail: fmt.Sprintf("Error getting control plane information for resource %s: %s", d.Get("name"), err), + }) + return diags + } + + return diags +} + +func resourceVsphereClusterDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + + var diags diag.Diagnostics + + client := m.(*tanzuclient.Client) + + clusterName := d.Get("name").(string) + managementClusterName := d.Get("management_cluster").(string) + provisionerName := d.Get("provisioner_name").(string) + + if err := client.DeleteVsphereCluster(clusterName, managementClusterName, provisionerName); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to delete vSphere Cluster", + Detail: fmt.Sprintf("Error deleting resource %s: %s", d.Get("name"), err), + }) + return diags + } + + d.SetId("") + + return nil +} + +func flatten_vsphere_control_plane_spec(vsphereSpec *tanzuclient.VsphereControlPlane) map[string]interface{} { + cp_spec := make(map[string]interface{}) + + cp_spec["class"] = vsphereSpec.Class + cp_spec["storage_class"] = vsphereSpec.StorageClass + + return cp_spec +} + +func flatten_vsphere_nodepool_spec(vsphereNodepool *[]tanzuclient.VsphereNodepool) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + for i := 0; i < len(*vsphereNodepool); i++ { + toAppend := make(map[string]interface{}) + + toAppend["nodepool_name"] = (*vsphereNodepool)[i].Info.Name + toAppend["worker_node_count"], _ = strconv.Atoi((*vsphereNodepool)[i].Spec.NodeCount) + toAppend["node_class"] = (*vsphereNodepool)[i].Spec.NodeSpec.Class + toAppend["node_storage_class"] = (*vsphereNodepool)[i].Spec.NodeSpec.StorageClass + + result = append(result, toAppend) + } + + return result +} + +func makeNodepoolOpts(arrayOfNodePoolSpec []interface{}) []tanzuclient.VpshereNodepoolOpts { + + npSpec := make([]tanzuclient.VpshereNodepoolOpts, 0) + + for i := 0; i < len(arrayOfNodePoolSpec); i++ { + toAppend := &tanzuclient.VpshereNodepoolOpts{ + Name: arrayOfNodePoolSpec[0].(map[string]interface{})["nodepool_name"].(string), + Class: arrayOfNodePoolSpec[0].(map[string]interface{})["node_class"].(string), + StorageClass: arrayOfNodePoolSpec[0].(map[string]interface{})["node_storage_class"].(string), + WorkerNodeCount: arrayOfNodePoolSpec[0].(map[string]interface{})["worker_node_count"].(int), + } + + npSpec = append(npSpec, *toAppend) + } + + return npSpec +} diff --git a/tmc/utils.go b/tmc/utils.go index e07b39d..406eefa 100644 --- a/tmc/utils.go +++ b/tmc/utils.go @@ -24,3 +24,15 @@ func InvalidTanzuNameError(resourceName string) diag.Diagnostics { return diags } + +func isValidK8sProviderType(lookup string) bool { + switch lookup { + case + "tkg", + "tkgservice", + "tkghosted", + "other": + return true + } + return false +}