Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_batch_pool - support target_node_communication_mode #22094

Merged
merged 1 commit into from
Jun 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions internal/services/batch/batch_pool_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -709,6 +709,13 @@ func resourceBatchPool() *pluginsdk.Resource {
string(pool.InterNodeCommunicationStateDisabled),
}, false),
},

"target_node_communication_mode": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice(pool.PossibleValuesForNodeCommunicationMode(), false),
},

"task_scheduling_policy": {
Type: pluginsdk.TypeList,
Optional: true,
Expand Down Expand Up @@ -912,6 +919,10 @@ func resourceBatchPoolCreate(d *pluginsdk.ResourceData, meta interface{}) error
return fmt.Errorf("expanding `network_configuration`: %+v", err)
}

if v, ok := d.GetOk("target_node_communication_mode"); ok {
parameters.Properties.TargetNodeCommunicationMode = pointer.To(pool.NodeCommunicationMode(v.(string)))
}

_, err = client.Create(ctx, id, parameters, pool.CreateOperationOptions{})
if err != nil {
return fmt.Errorf("creating %s: %+v", id, err)
Expand Down Expand Up @@ -1040,6 +1051,10 @@ func resourceBatchPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) error
}
parameters.Properties.MountConfiguration = mountConfiguration

if d.HasChange("target_node_communication_mode") {
parameters.Properties.TargetNodeCommunicationMode = pointer.To(pool.NodeCommunicationMode(d.Get("target_node_communication_mode").(string)))
}

result, err := client.Update(ctx, *id, parameters, pool.UpdateOperationOptions{})
if err != nil {
return fmt.Errorf("updating %s: %+v", *id, err)
Expand Down Expand Up @@ -1237,6 +1252,12 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error {
d.Set("mount", mountConfigs)
}

targetNodeCommunicationMode := ""
if props.TargetNodeCommunicationMode != nil {
targetNodeCommunicationMode = string(*props.TargetNodeCommunicationMode)
}
d.Set("target_node_communication_mode", targetNodeCommunicationMode)

if err := d.Set("network_configuration", flattenBatchPoolNetworkConfiguration(props.NetworkConfiguration)); err != nil {
return fmt.Errorf("setting `network_configuration`: %v", err)
}
Expand Down
62 changes: 62 additions & 0 deletions internal/services/batch/batch_pool_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,28 @@ func TestAccBatchPool_mountConfigurationNFS(t *testing.T) {
})
}

func TestAccBatchPool_targetNodeCommunicationMode(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.targetNodeCommunicationMode(data, "Default"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep("stop_pending_resize_operation"),
{
Config: r.targetNodeCommunicationMode(data, "Simplified"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep("stop_pending_resize_operation"),
})
}

func TestAccBatchPool_diskSettings(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}
Expand Down Expand Up @@ -2227,6 +2249,46 @@ resource "azurerm_batch_pool" "test" {
`, template, data.RandomString, data.RandomString)
}

func (BatchPoolResource) targetNodeCommunicationMode(data acceptance.TestData, targetNodeCommunicationMode string) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "testaccRG-batch-%d"
location = "%s"
}

resource "azurerm_batch_account" "test" {
name = "testaccbatch%s"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}

resource "azurerm_batch_pool" "test" {
name = "testaccpool%s"
resource_group_name = azurerm_resource_group.test.name
account_name = azurerm_batch_account.test.name
node_agent_sku_id = "batch.node.ubuntu 18.04"
vm_size = "Standard_A1"

target_node_communication_mode = "%s"

fixed_scale {
target_dedicated_nodes = 1
}

storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-lts"
version = "latest"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, targetNodeCommunicationMode)
}

func (BatchPoolResource) extensions(data acceptance.TestData) string {
template := BatchPoolResource{}.template(data)
return fmt.Sprintf(`
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/batch_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@ The following arguments are supported:

* `os_disk_placement` - (Optional) Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose which location the operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at <https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements> and Linux VMs at <https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements>. The only possible value is `CacheDisk`.

* `target_node_communication_mode` - (Optional) The desired node communication mode for the pool.

* `task_scheduling_policy` - (Optional) A `task_scheduling_policy` block that describes how tasks are distributed across compute nodes in a pool. If not specified, the default is spread as defined below.

* `user_accounts` - (Optional) A `user_accounts` block that describes the list of user accounts to be created on each node in the pool as defined below.
Expand Down