Skip to content

Commit

Permalink
azurerm_kubernete_cluster_node_pool: Support for `gpu_instance_prof…
Browse files Browse the repository at this point in the history
…ile` argument

Fixes hashicorp#23880
  • Loading branch information
favoretti committed Nov 17, 2023
1 parent 4c70668 commit 4a5a09b
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,19 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema {
}, false),
},

"gpu_instance_profile": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
string(agentpools.GPUInstanceProfileMIGOneg),
string(agentpools.GPUInstanceProfileMIGTwog),
string(agentpools.GPUInstanceProfileMIGThreeg),
string(agentpools.GPUInstanceProfileMIGFourg),
string(agentpools.GPUInstanceProfileMIGSeveng),
}, false),
},

"kubelet_config": schemaNodePoolKubeletConfigForceNew(),

"linux_os_config": schemaNodePoolLinuxOSConfigForceNew(),
Expand Down Expand Up @@ -455,6 +468,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int
EnableEncryptionAtHost: utils.Bool(d.Get("enable_host_encryption").(bool)),
EnableUltraSSD: utils.Bool(d.Get("ultra_ssd_enabled").(bool)),
EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)),
GpuInstanceProfile: utils.ToPtr(agentpools.GPUInstanceProfile(d.Get("gpu_instance_profile").(string))),
KubeletDiskType: utils.ToPtr(agentpools.KubeletDiskType(d.Get("kubelet_disk_type").(string))),
Mode: utils.ToPtr(mode),
ScaleSetPriority: utils.ToPtr(agentpools.ScaleSetPriority(d.Get("priority").(string))),
Expand Down Expand Up @@ -840,6 +854,7 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter
d.Set("custom_ca_trust_enabled", props.EnableCustomCATrust)
d.Set("fips_enabled", props.EnableFIPS)
d.Set("ultra_ssd_enabled", props.EnableUltraSSD)
d.Set("gpu_instance_profile", props.GpuInstanceProfile)

if v := props.KubeletDiskType; v != nil {
d.Set("kubelet_disk_type", string(*v))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1034,6 +1034,21 @@ func TestAccKubernetesClusterNodePool_snapshotId(t *testing.T) {
})
}

func TestAccKubernetesClusterNodePool_GPUInstanceProfile(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
r := KubernetesClusterNodePoolResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.gpuInstanceProfile(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) {
id, err := agentpools.ParseAgentPoolID(state.ID)
if err != nil {
Expand Down Expand Up @@ -1365,6 +1380,65 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger)
}

func (KubernetesClusterNodePoolResource) gpuInstanceProfile(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}
resource "azurerm_virtual_network" "test" {
name = "acctestvirtnet%d"
address_space = ["10.1.0.0/16"]
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
}
resource "azurerm_subnet" "test" {
name = "acctestsubnet%d"
resource_group_name = azurerm_resource_group.test.name
virtual_network_name = azurerm_virtual_network.test.name
address_prefixes = ["10.1.0.0/24"]
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
vnet_subnet_id = azurerm_subnet.test.id
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = "azure"
load_balancer_sku = "standard"
}
}
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_NC96ads_A100_v4"
node_count = 1
vnet_subnet_id = azurerm_subnet.test.id
gpu_instance_profile = "MIG7g"
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger)
}

func (KubernetesClusterNodePoolResource) capacityReservationGroup(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ The following arguments are supported:

* `eviction_policy` - (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created.

* `gpu_instance_profile` - (Optional) Specifies GPU instance profile for this node pool. Possible values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.

~> **Note:** An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.

* `host_group_id` - (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
Expand Down

0 comments on commit 4a5a09b

Please sign in to comment.