diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index c8e80152ee07..c0433ceb6ceb 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -1,6 +1,7 @@ package containers import ( + "encoding/base64" "fmt" "log" "time" @@ -155,6 +156,13 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { ForceNew: true, }, + "message_of_the_day": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "mode": { Type: pluginsdk.TypeString, Optional: true, @@ -454,6 +462,14 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int profile.NodeTaints = nodeTaints } + if v := d.Get("message_of_the_day").(string); v != "" { + if profile.OsType == containerservice.OSTypeWindows { + return fmt.Errorf("`message_of_the_day` cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script)") + } + messageOfTheDayEncoded := base64.StdEncoding.EncodeToString([]byte(v)) + profile.MessageOfTheDay = &messageOfTheDayEncoded + } + if osDiskSizeGB := d.Get("os_disk_size_gb").(int); osDiskSizeGB > 0 { profile.OsDiskSizeGB = utils.Int32(int32(osDiskSizeGB)) } @@ -772,6 +788,16 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter } d.Set("max_count", maxCount) + messageOfTheDay := "" + if props.MessageOfTheDay != nil { + messageOfTheDayDecoded, err := base64.StdEncoding.DecodeString(*props.MessageOfTheDay) + if err != nil { + return fmt.Errorf("setting `message_of_the_day`: %+v", err) + } + messageOfTheDay = string(messageOfTheDayDecoded) + } + d.Set("message_of_the_day", messageOfTheDay) + maxPods := 0 if props.MaxPods != nil { maxPods = int(*props.MaxPods) diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 7a92eec905e3..7067512ba5d0 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -1996,6 +1996,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { node_count = 3 fips_enabled = true kubelet_disk_type = "OS" + message_of_the_day = "daily message" } `, r.templateConfig(data)) } diff --git a/internal/services/containers/kubernetes_cluster_other_resource_test.go b/internal/services/containers/kubernetes_cluster_other_resource_test.go index 0b5bbd368a1e..ffb3fed2df63 100644 --- a/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -283,6 +283,28 @@ func TestAccKubernetesCluster_upgrade(t *testing.T) { }) } +func TestAccKubernetesCluster_scaleDownMode(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.scaleDownMode(data, "Delete"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.scaleDownMode(data, "Deallocate"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccKubernetesCluster_tags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} @@ -1197,11 +1219,13 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - node_count = 1 - vm_size = "Standard_DS2_v2" - fips_enabled = true - kubelet_disk_type = "OS" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + fips_enabled = true + kubelet_disk_type = "OS" + message_of_the_day = "daily message" + workload_runtime = "OCIContainer" } identity { @@ -1890,6 +1914,37 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, ultraSSDEnabled) } +func (KubernetesClusterResource) scaleDownMode(data acceptance.TestData, scaleDownMode string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + scale_down_mode = "%s" + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, scaleDownMode) +} + func (KubernetesClusterResource) privateClusterPublicFqdn(data acceptance.TestData, privateClusterPublicFqdnEnabled bool) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/internal/services/containers/kubernetes_cluster_resource.go b/internal/services/containers/kubernetes_cluster_resource.go index 89b8b463a9a6..dcaae10c5b93 100644 --- a/internal/services/containers/kubernetes_cluster_resource.go +++ b/internal/services/containers/kubernetes_cluster_resource.go @@ -563,6 +563,13 @@ func resourceKubernetesCluster() *pluginsdk.Resource { ValidateFunc: validation.IntBetween(1, 100), ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids", "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"}, }, + "managed_outbound_ipv6_count": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 100), + ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids", "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"}, + }, "outbound_ip_prefix_ids": { Type: pluginsdk.TypeSet, Optional: true, @@ -1485,6 +1492,18 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} loadBalancerProfile.OutboundIPPrefixes = nil } + if key := "network_profile.0.load_balancer_profile.0.managed_outbound_ipv6_count"; d.HasChange(key) { + managedOutboundIPV6Count := d.Get(key).(int) + if loadBalancerProfile.ManagedOutboundIPs == nil { + loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{} + } + loadBalancerProfile.ManagedOutboundIPs.CountIPv6 = utils.Int32(int32(managedOutboundIPV6Count)) + + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. + loadBalancerProfile.OutboundIPs = nil + loadBalancerProfile.OutboundIPPrefixes = nil + } + if key := "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"; d.HasChange(key) { outboundIPAddress := d.Get(key) if v := outboundIPAddress.(*pluginsdk.Set).List(); len(v) == 0 { @@ -2310,6 +2329,15 @@ func expandLoadBalancerProfile(d []interface{}) *containerservice.ManagedCluster } } + if ipv6Count := config["managed_outbound_ipv6_count"]; ipv6Count != nil { + if c := int32(ipv6Count.(int)); c > 0 { + if profile.ManagedOutboundIPs == nil { + profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{} + } + profile.ManagedOutboundIPs.CountIPv6 = &c + } + } + if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil { profile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} } @@ -2441,6 +2469,10 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro if count := ips.Count; count != nil { lb["managed_outbound_ip_count"] = count } + + if countIPv6 := ips.CountIPv6; countIPv6 != nil { + lb["managed_outbound_ipv6_count"] = countIPv6 + } } if oip := lbp.OutboundIPs; oip != nil { diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index 3171c321bc19..ea38792c48d0 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -1,6 +1,7 @@ package containers import ( + "encoding/base64" "fmt" "regexp" "strconv" @@ -117,6 +118,13 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { ForceNew: true, }, + "message_of_the_day": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "min_count": { Type: pluginsdk.TypeInt, Optional: true, @@ -226,6 +234,17 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { ForceNew: true, }, + "scale_down_mode": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: string(containerservice.ScaleDownModeDelete), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.ScaleDownModeDeallocate), + string(containerservice.ScaleDownModeDelete), + }, false), + }, + "host_group_id": { Type: pluginsdk.TypeString, Optional: true, @@ -234,6 +253,15 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { }, "upgrade_settings": upgradeSettingsSchema(), + + "workload_runtime": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.WorkloadRuntimeOCIContainer), + }, false), + }, } s["zones"] = commonschema.ZonesMultipleOptionalForceNew() @@ -605,6 +633,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA MaxPods: defaultCluster.MaxPods, OsType: defaultCluster.OsType, MaxCount: defaultCluster.MaxCount, + MessageOfTheDay: defaultCluster.MessageOfTheDay, MinCount: defaultCluster.MinCount, EnableAutoScaling: defaultCluster.EnableAutoScaling, EnableFIPS: defaultCluster.EnableFIPS, @@ -622,8 +651,10 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA NodeLabels: defaultCluster.NodeLabels, NodeTaints: defaultCluster.NodeTaints, PodSubnetID: defaultCluster.PodSubnetID, + ScaleDownMode: defaultCluster.ScaleDownMode, Tags: defaultCluster.Tags, UpgradeSettings: defaultCluster.UpgradeSettings, + WorkloadRuntime: defaultCluster.WorkloadRuntime, }, } } @@ -688,6 +719,11 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag profile.MaxPods = utils.Int32(maxPods) } + if v := raw["message_of_the_day"].(string); v != "" { + messageOfTheDayEncoded := base64.StdEncoding.EncodeToString([]byte(v)) + profile.MessageOfTheDay = &messageOfTheDayEncoded + } + if prefixID := raw["node_public_ip_prefix_id"].(string); prefixID != "" { profile.NodePublicIPPrefixID = utils.String(prefixID) } @@ -709,6 +745,11 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag profile.PodSubnetID = utils.String(podSubnetID) } + profile.ScaleDownMode = containerservice.ScaleDownModeDelete + if scaleDownMode := raw["scale_down_mode"].(string); scaleDownMode != "" { + profile.ScaleDownMode = containerservice.ScaleDownMode(scaleDownMode) + } + if ultraSSDEnabled, ok := raw["ultra_ssd_enabled"]; ok { profile.EnableUltraSSD = utils.Bool(ultraSSDEnabled.(bool)) } @@ -729,6 +770,10 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag profile.ProximityPlacementGroupID = utils.String(proximityPlacementGroupId) } + if workloadRunTime := raw["workload_runtime"].(string); workloadRunTime != "" { + profile.WorkloadRuntime = containerservice.WorkloadRuntime(workloadRunTime) + } + if capacityReservationGroupId := raw["capacity_reservation_group_id"].(string); capacityReservationGroupId != "" { profile.CapacityReservationGroupID = utils.String(capacityReservationGroupId) } @@ -1013,6 +1058,15 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro maxPods = int(*agentPool.MaxPods) } + messageOfTheDay := "" + if agentPool.MessageOfTheDay != nil { + messageOfTheDayDecoded, err := base64.StdEncoding.DecodeString(*agentPool.MessageOfTheDay) + if err != nil { + return nil, err + } + messageOfTheDay = string(messageOfTheDayDecoded) + } + minCount := 0 if agentPool.MinCount != nil { minCount = int(*agentPool.MinCount) @@ -1080,6 +1134,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro proximityPlacementGroupId = *agentPool.ProximityPlacementGroupID } + scaleDownMode := containerservice.ScaleDownModeDelete + if agentPool.ScaleDownMode != "" { + scaleDownMode = agentPool.ScaleDownMode + } + vmSize := "" if agentPool.VMSize != nil { vmSize = *agentPool.VMSize @@ -1089,6 +1148,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro capacityReservationGroupId = *agentPool.CapacityReservationGroupID } + workloadRunTime := "" + if agentPool.WorkloadRuntime != "" { + workloadRunTime = string(agentPool.WorkloadRuntime) + } + upgradeSettings := flattenUpgradeSettings(agentPool.UpgradeSettings) linuxOSConfig, err := flattenAgentPoolLinuxOSConfig(agentPool.LinuxOSConfig) if err != nil { @@ -1104,6 +1168,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "kubelet_disk_type": string(agentPool.KubeletDiskType), "max_count": maxCount, "max_pods": maxPods, + "message_of_the_day": messageOfTheDay, "min_count": minCount, "name": name, "node_count": count, @@ -1113,10 +1178,12 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "os_disk_size_gb": osDiskSizeGB, "os_disk_type": string(osDiskType), "os_sku": string(agentPool.OsSKU), + "scale_down_mode": string(scaleDownMode), "tags": tags.Flatten(agentPool.Tags), "type": string(agentPool.Type), "ultra_ssd_enabled": enableUltraSSD, "vm_size": vmSize, + "workload_runtime": workloadRunTime, "pod_subnet_id": podSubnetId, "orchestrator_version": orchestratorVersion, "proximity_placement_group_id": proximityPlacementGroupId, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index ed4a585bc235..26dd062c20c6 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -336,6 +336,8 @@ A `default_node_pool` block supports the following: * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. +* `message_of_the_day` - (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + * `node_public_ip_prefix_id` - (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. * `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. @@ -354,6 +356,10 @@ A `default_node_pool` block supports the following: * `pod_subnet_id` - (Optional) The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created. +-> **Note:** This requires that the Preview Feature `Microsoft.ContainerService/PodSubnetPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://docs.microsoft.com/azure/aks/configure-azure-cni#register-the-podsubnetpreview-preview-feature) for more information. + +* `scale_down_mode` - (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to 'ScaleDownModeDelete'. Possible values include 'ScaleDownModeDelete' and 'ScaleDownModeDeallocate'. Changing this forces a new resource to be created. + * `type` - (Optional) The type of Node Pool which should be created. Possible values are `AvailabilitySet` and `VirtualMachineScaleSets`. Defaults to `VirtualMachineScaleSets`. * `tags` - (Optional) A mapping of tags to assign to the Node Pool. @@ -384,6 +390,8 @@ If `enable_auto_scaling` is set to `false`, then the following fields can also b -> **Note:** If `enable_auto_scaling` is set to `false` both `min_count` and `max_count` fields need to be set to `null` or omitted from the configuration. +* `workload_runtime` - (Optional) Specifies the workload runtime used by the node pool. Possible values are `OCIContainer`. + * `zones` - (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster should be located. Changing this forces a new Kubernetes Cluster to be created. -> **Note:** This requires that the `type` is set to `VirtualMachineScaleSets` and that `load_balancer_sku` is set to `standard`. @@ -552,6 +560,10 @@ A `load_balancer_profile` block supports the following: * `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive. +* `managed_outbound_ipv6_count` - (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of 1 to 100 (inclusive). The default value is 0 for single-stack and 1 for dual-stack. + +~> **Note:** `managed_outbound_ipv6_count` requires dual-stack networking. To enable dual-stack networking the Preview Feature `Microsoft.ContainerService/AKS-EnableDualStack` needs to be enabled and the Resource Provider re-registered, see [the documentation](https://docs.microsoft.com/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature) for more information. + * `outbound_ip_address_ids` - (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. -> **Note:** Set `outbound_ip_address_ids` to an empty slice `[]` in order to unlink it from the cluster. Unlinking a `outbound_ip_address_ids` will revert the load balancing for the cluster back to a managed one. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 879928d37a23..3b1ab3e442ad 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -99,6 +99,8 @@ The following arguments are supported: * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. +* `message_of_the_day` - (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + * `mode` - (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. * `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.