Skip to content

Commit

Permalink
add orphan_resource_on_delete argument to worker pool resources (#5705)
Browse files Browse the repository at this point in the history
* add orphan_resource_on_delete argument to ibm_container_vpc_worker_pool

* Update website/docs/r/container_vpc_worker_pool.html.markdown

Co-authored-by: lewiseevans <lewis.evans@uk.ibm.com>

* rename orphan_resource_on_delete to orphan_on_delete

* add orphan_on_delete to the classic workerpool resource

* Apply suggestions from code review for orphan_on_delete doc

Co-authored-by: lewiseevans <lewis.evans@uk.ibm.com>

* rename vars to orphan_on_delete

---------

Co-authored-by: Zoltan Illes <zoltan.illes@ibm.com>
Co-authored-by: lewiseevans <lewis.evans@uk.ibm.com>
  • Loading branch information
3 people authored Oct 18, 2024
1 parent 650b4c1 commit 2b5fc0f
Show file tree
Hide file tree
Showing 6 changed files with 201 additions and 373 deletions.
32 changes: 23 additions & 9 deletions ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,13 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
DiffSuppressFunc: flex.ApplyOnce,
Description: "Import an existing WorkerPool from the cluster, instead of creating a new",
Description: "Import an existing workerpool from the cluster instead of creating a new",
},

"orphan_on_delete": {
Type: schema.TypeBool,
Optional: true,
Description: "Orphan the workerpool resource instead of deleting it",
},

"autoscale_enabled": {
Expand Down Expand Up @@ -715,14 +721,22 @@ func resourceIBMContainerVpcWorkerPoolDelete(d *schema.ResourceData, meta interf
if err != nil {
return err
}

err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv)
if err != nil {
return err
var orphan_on_delete bool = false
if orod, ok := d.GetOk("orphan_on_delete"); ok {
orphan_on_delete = orod.(bool)
}
_, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv)
if err != nil {
return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err)

if orphan_on_delete {
log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID)
} else {
err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv)
if err != nil {
return err
}
_, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv)
if err != nil {
return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err)
}
}
d.SetId("")
return nil
Expand Down Expand Up @@ -788,7 +802,7 @@ func WaitForWorkerPoolAvailable(d *schema.ResourceData, meta interface{}, cluste

func vpcWorkerPoolStateRefreshFunc(client v2.Workers, instanceID string, workerPoolNameOrID string, target v2.ClusterTargetHeader) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
workerFields, err := client.ListByWorkerPool(instanceID, "", false, target)
workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, false, target)
if err != nil {
return nil, "", fmt.Errorf("[ERROR] Error retrieving workers for cluster: %s", err)
}
Expand Down
Loading

0 comments on commit 2b5fc0f

Please sign in to comment.