Skip to content

Commit

Permalink
Fixed errors in the Kubernetes resource and remove unnecessary fields (
Browse files Browse the repository at this point in the history
…#129)

* Fixed errors in the Kubernetes resource and remove unnecessary fields
* Reformat code
* Fixed all the tests to use the new fields
* Fix indentation
  • Loading branch information
alejandrojnm committed Apr 5, 2022
1 parent 0cd7675 commit 98fb75c
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 240 deletions.
95 changes: 1 addition & 94 deletions civo/datasource_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ func dataSourceKubernetesCluster() *schema.Resource {
Computed: true,
Description: "A list of application installed",
},
"instances": dataSourceInstanceSchema(),
"installed_applications": dataSourceApplicationSchema(),
"pools": dataSourcenodePoolSchema(),
"status": {
Expand Down Expand Up @@ -115,54 +114,6 @@ func dataSourceKubernetesCluster() *schema.Resource {
}
}

// schema for the instances
func dataSourceInstanceSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"hostname": {
Type: schema.TypeString,
Computed: true,
Description: "The hostname of the instance",
},
"size": {
Type: schema.TypeString,
Computed: true,
Description: "The size of the instance",
},
"cpu_cores": {
Type: schema.TypeInt,
Computed: true,
Description: "Total CPU of the instance",
},
"ram_mb": {
Type: schema.TypeInt,
Computed: true,
Description: "Total RAM of the instance",
},
"disk_gb": {
Type: schema.TypeInt,
Computed: true,
Description: "The size of the instance disk",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "The status of the instance",
},
"tags": {
Type: schema.TypeSet,
Computed: true,
Description: "The tag of the instance",
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
}
}

// schema for the node pool in the cluster
func dataSourcenodePoolSchema() *schema.Schema {
return &schema.Schema{
Expand Down Expand Up @@ -191,7 +142,6 @@ func dataSourcenodePoolSchema() *schema.Schema {
Description: "A list of the instance in the pool",
Elem: &schema.Schema{Type: schema.TypeString},
},
"instances": dataSourceInstanceSchema(),
},
},
}
Expand Down Expand Up @@ -271,56 +221,13 @@ func dataSourceKubernetesClusterRead(ctx context.Context, d *schema.ResourceData
d.Set("dns_entry", foundCluster.DNSEntry)
d.Set("created_at", foundCluster.CreatedAt.UTC().String())

if err := d.Set("pools", dsflattenNodePool(foundCluster)); err != nil {
if err := d.Set("pools", flattenNodePool(foundCluster)); err != nil {
return diag.Errorf("[ERR] error retrieving the pools for kubernetes cluster error: %#v", err)
}

if err := d.Set("instances", flattenInstances(foundCluster.Instances)); err != nil {
return diag.Errorf("[ERR] error retrieving the instances for kubernetes cluster error: %#v", err)
}

if err := d.Set("installed_applications", flattenInstalledApplication(foundCluster.InstalledApplications)); err != nil {
return diag.Errorf("[ERR] error retrieving the installed application for kubernetes cluster error: %#v", err)
}

return nil
}

// function to flatten all instances inside the cluster
func dsflattenNodePool(cluster *civogo.KubernetesCluster) []interface{} {

if cluster.Pools == nil {
return nil
}

flattenedPool := make([]interface{}, 0)
for _, pool := range cluster.Pools {
flattenedPoolInstance := make([]interface{}, 0)
for _, v := range pool.Instances {

rawPoolInstance := map[string]interface{}{
"hostname": v.Hostname,
"size": pool.Size,
"cpu_cores": v.CPUCores,
"ram_mb": v.RAMMegabytes,
"disk_gb": v.DiskGigabytes,
"status": v.Status,
"tags": v.Tags,
}
flattenedPoolInstance = append(flattenedPoolInstance, rawPoolInstance)
}
instanceName := append(pool.InstanceNames, pool.InstanceNames...)

rawPool := map[string]interface{}{
"id": pool.ID,
"node_count": pool.Count,
"size": pool.Size,
"instance_names": instanceName,
"instances": flattenedPoolInstance,
}

flattenedPool = append(flattenedPool, rawPool)
}

return flattenedPool
}
24 changes: 12 additions & 12 deletions civo/datasource_kubernetes_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,8 @@ func TestAccDataSourceCivoKubernetesCluster_basic(t *testing.T) {
Config: testAccDataSourceCivoKubernetesClusterConfig(name),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(datasourceName, "name", name),
resource.TestCheckResourceAttr(datasourceName, "num_target_nodes", "2"),
resource.TestCheckResourceAttr(datasourceName, "target_nodes_size", "g2.small"),
resource.TestCheckResourceAttr(datasourceName, "instances.0.cpu_cores", "1"),
resource.TestCheckResourceAttr(datasourceName, "instances.0.ram_mb", "2048"),
resource.TestCheckResourceAttr(datasourceName, "instances.0.disk_gb", "25"),
resource.TestCheckResourceAttr(datasourceName, "pools.0.node_count", "2"),
resource.TestCheckResourceAttr(datasourceName, "pools.0.size", "g2.small"),
resource.TestCheckResourceAttrSet(datasourceName, "kubeconfig"),
resource.TestCheckResourceAttrSet(datasourceName, "api_endpoint"),
resource.TestCheckResourceAttrSet(datasourceName, "master_ip"),
Expand All @@ -46,11 +43,8 @@ func TestAccDataSourceCivoKubernetesClusterByID_basic(t *testing.T) {
Config: testAccDataSourceCivoKubernetesClusterByIDConfig(name),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(datasourceName, "name", name),
resource.TestCheckResourceAttr(datasourceName, "num_target_nodes", "2"),
resource.TestCheckResourceAttr(datasourceName, "target_nodes_size", "g2.small"),
resource.TestCheckResourceAttr(datasourceName, "instances.0.cpu_cores", "1"),
resource.TestCheckResourceAttr(datasourceName, "instances.0.ram_mb", "2048"),
resource.TestCheckResourceAttr(datasourceName, "instances.0.disk_gb", "25"),
resource.TestCheckResourceAttr(datasourceName, "pools.0.node_count", "2"),
resource.TestCheckResourceAttr(datasourceName, "pools.0.size", "g2.small"),
resource.TestCheckResourceAttrSet(datasourceName, "kubeconfig"),
resource.TestCheckResourceAttrSet(datasourceName, "api_endpoint"),
resource.TestCheckResourceAttrSet(datasourceName, "master_ip"),
Expand All @@ -64,7 +58,10 @@ func testAccDataSourceCivoKubernetesClusterConfig(name string) string {
return fmt.Sprintf(`
resource "civo_kubernetes_cluster" "my-cluster" {
name = "%s"
num_target_nodes = 2
pools {
node_count = 2
size = "g2.small"
}
}
data "civo_kubernetes_cluster" "foobar" {
Expand All @@ -77,7 +74,10 @@ func testAccDataSourceCivoKubernetesClusterByIDConfig(name string) string {
return fmt.Sprintf(`
resource "civo_kubernetes_cluster" "my-cluster" {
name = "%s"
num_target_nodes = 2
pools {
node_count = 2
size = "g2.small"
}
}
data "civo_kubernetes_cluster" "foobar" {
Expand Down
120 changes: 10 additions & 110 deletions civo/resource_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ func resourceKubernetesCluster() *schema.Resource {
Description: "The existing firewall ID to use for this cluster",
},
// Computed resource
"instances": instanceSchema(),
"installed_applications": applicationSchema(),
"pools": nodePoolSchema(),
"status": {
Expand Down Expand Up @@ -140,54 +139,6 @@ func resourceKubernetesCluster() *schema.Resource {
}
}

// schema for the instances
func instanceSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"hostname": {
Type: schema.TypeString,
Computed: true,
Description: "Instance's hostname",
},
"size": {
Type: schema.TypeString,
Computed: true,
Description: "Instance's size",
},
"cpu_cores": {
Type: schema.TypeInt,
Computed: true,
Description: "Instance's CPU cores",
},
"ram_mb": {
Type: schema.TypeInt,
Computed: true,
Description: "Instance's RAM (MB)",
},
"disk_gb": {
Type: schema.TypeInt,
Computed: true,
Description: "Instance's disk (GB)",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Instance's status",
},
"tags": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "Instance's tags",
},
},
},
}
}

// schema for the node pool in the cluster
func nodePoolSchema() *schema.Schema {
return &schema.Schema{
Expand Down Expand Up @@ -218,7 +169,6 @@ func nodePoolSchema() *schema.Schema {
Elem: &schema.Schema{Type: schema.TypeString},
Description: "Instance names in the nodepool",
},
"instances": instanceSchema(),
},
},
}
Expand Down Expand Up @@ -359,7 +309,6 @@ func resourceKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData
}

return resourceKubernetesClusterRead(ctx, d, m)

}

// function to read the kubernetes cluster
Expand Down Expand Up @@ -399,11 +348,7 @@ func resourceKubernetesClusterRead(ctx context.Context, d *schema.ResourceData,
d.Set("created_at", resp.CreatedAt.UTC().String())
d.Set("firewall_id", resp.FirewallID)

if err := d.Set("instances", flattenInstances(resp.Instances)); err != nil {
return diag.Errorf("[ERR] error retrieving the instances for kubernetes cluster error: %#v", err)
}

if err := d.Set("pools", flattenNodePool(resp, d)); err != nil {
if err := d.Set("pools", flattenNodePool(resp)); err != nil {
return diag.Errorf("[ERR] error retrieving the pool for kubernetes cluster error: %#v", err)
}

Expand Down Expand Up @@ -536,67 +481,22 @@ func resourceKubernetesClusterDelete(ctx context.Context, d *schema.ResourceData
}

// function to flatten all instances inside the cluster
func flattenInstances(instances []civogo.KubernetesInstance) []interface{} {
if instances == nil {
return nil
}

flattenedInstances := make([]interface{}, 0)
for _, instance := range instances {
rawInstance := map[string]interface{}{
"hostname": instance.Hostname,
"cpu_cores": instance.CPUCores,
"ram_mb": instance.RAMMegabytes,
"disk_gb": instance.DiskGigabytes,
"status": instance.Status,
}

flattenedInstances = append(flattenedInstances, rawInstance)
}

return flattenedInstances
}

// function to flatten all instances inside the cluster
func flattenNodePool(cluster *civogo.KubernetesCluster, d *schema.ResourceData) []interface{} {

func flattenNodePool(cluster *civogo.KubernetesCluster) []interface{} {
if cluster.Pools == nil {
return nil
}

currentPools := d.Get("pools").([]interface{})[0].(map[string]interface{})

flattenedPool := make([]interface{}, 0)
for _, pool := range cluster.Pools {
if currentPools["id"].(string) == pool.ID {
flattenedPoolInstance := make([]interface{}, 0)
for _, v := range pool.Instances {

rawPoolInstance := map[string]interface{}{
"hostname": v.Hostname,
"size": pool.Size,
"cpu_cores": v.CPUCores,
"ram_mb": v.RAMMegabytes,
"disk_gb": v.DiskGigabytes,
"status": v.Status,
"tags": v.Tags,
}
flattenedPoolInstance = append(flattenedPoolInstance, rawPoolInstance)
}
instanceName := append(pool.InstanceNames, pool.InstanceNames...)

rawPool := map[string]interface{}{
"id": pool.ID,
"node_count": pool.Count,
"size": pool.Size,
"instance_names": instanceName,
"instances": flattenedPoolInstance,
}

flattenedPool = append(flattenedPool, rawPool)
}
instanceName := append(cluster.Pools[0].InstanceNames, cluster.Pools[0].InstanceNames...)
rawPool := map[string]interface{}{
"id": cluster.Pools[0].ID,
"node_count": cluster.Pools[0].Count,
"size": cluster.Pools[0].Size,
"instance_names": instanceName,
}

flattenedPool = append(flattenedPool, rawPool)

return flattenedPool
}

Expand Down
8 changes: 8 additions & 0 deletions civo/resource_kubernetes_cluster_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@ func resourceKubernetesClusterNodePool() *schema.Resource {
Computed: true,
Description: "the size of each node (optional, the default is currently g4s.kube.medium)",
},
"instance_names": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "Instance names in the nodepool",
},
},
CreateContext: resourceKubernetesClusterNodePoolCreate,
ReadContext: resourceKubernetesClusterNodePoolRead,
Expand Down Expand Up @@ -154,9 +160,11 @@ func resourceKubernetesClusterNodePoolRead(ctx context.Context, d *schema.Resour

d.Set("cluster_id", resp.ID)
for _, v := range resp.Pools {
instanceName := append(v.InstanceNames, v.InstanceNames...)
if v.ID == d.Id() {
d.Set("node_count", v.Count)
d.Set("size", v.Size)
d.Set("instance_names", instanceName)
}
}

Expand Down
7 changes: 4 additions & 3 deletions civo/resource_kubernetes_cluster_nodepool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ func TestAccCivoKubernetesClusterNodePool_basic(t *testing.T) {
testAccCheckCivoKubernetesClusterNodePoolValues(&kubernetesNodePool, "g4s.kube.medium"),
// verify local values
// resource.TestCheckResourceAttr(resPoolName, "cluster_id", kubernetes.ID),
resource.TestCheckResourceAttr(resPoolName, "num_target_nodes", "3"),
resource.TestCheckResourceAttr(resPoolName, "target_nodes_size", "g4s.kube.medium"),
resource.TestCheckResourceAttr(resPoolName, "node_count", "3"),
resource.TestCheckResourceAttr(resPoolName, "size", "g4s.kube.medium"),
),
},
},
Expand Down Expand Up @@ -98,7 +98,8 @@ func testAccCheckCivoKubernetesClusterNodePoolConfigBasic() string {
return `
resource "civo_kubernetes_node_pool" "foobar" {
cluster_id = civo_kubernetes_cluster.foobar.id
num_target_nodes = 3
node_count = 3
size = "g4s.kube.medium"
depends_on = [civo_kubernetes_cluster.foobar]
}`
}
Loading

0 comments on commit 98fb75c

Please sign in to comment.