diff --git a/.changelog/40717.txt b/.changelog/40717.txt new file mode 100644 index 000000000000..ba669b03c210 --- /dev/null +++ b/.changelog/40717.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_instance_type: Add `bandwidth_weightings`, `boot_modes`, `default_network_card_index`, `efa_maximum_interfaces`, `inference_accelerators.#.memory_size`, `media_accelerators.#.count`, `media_accelerators.#.manufacturer`, `media_accelerators.#.memory_size`, `media_accelerators.#.name`, `network_cards.#.baseline_bandwidth`, `network_cards.#.index`, `network_cards.#.maximum_interfaces`, `network_cards.#.performance`, `network_cards.#.peak_bandwidth`, `neuron_devices.#.core_count`, `neuron_devices.#.count`, `neuron_devices.#.memory_size`, `neuron_devices.#.name`, `neuron_devices.#.version`, `nitro_enclaves_support`, `nitro_tpm_support`, `nitro_tpm_supported_versions`, `phc_support`, `srd_supported`, `supported_cpu_features`, `total_inference_memory`, `total_media_memory`, and `total_neuron_device_memory` attributes. +``` diff --git a/internal/service/ec2/ec2_instance_type_data_source.go b/internal/service/ec2/ec2_instance_type_data_source.go index 0e20423fe231..d83b9536bc5b 100644 --- a/internal/service/ec2/ec2_instance_type_data_source.go +++ b/internal/service/ec2/ec2_instance_type_data_source.go @@ -30,10 +30,20 @@ func dataSourceInstanceType() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "bandwidth_weightings": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "bare_metal": { Type: schema.TypeBool, Computed: true, }, + "boot_modes": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "burstable_performance_supported": { Type: schema.TypeBool, Computed: true, @@ -50,6 +60,10 @@ func dataSourceInstanceType() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "default_network_card_index": { + Type: schema.TypeInt, + Computed: true, + }, "default_threads_per_core": { Type: schema.TypeInt, Computed: true, @@ -98,6 +112,10 @@ func dataSourceInstanceType() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "efa_maximum_interfaces": { + Type: schema.TypeInt, + Computed: true, + }, "ena_support": { Type: schema.TypeString, Computed: true, @@ -179,6 +197,10 @@ func dataSourceInstanceType() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "memory_size": { + Type: schema.TypeInt, + Computed: true, + }, names.AttrName: { Type: schema.TypeString, Computed: true, @@ -234,19 +256,125 @@ func dataSourceInstanceType() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "media_accelerators": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Computed: true, + }, + "manufacturer": { + Type: schema.TypeString, + Computed: true, + }, + "memory_size": { + Type: schema.TypeInt, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "memory_size": { Type: schema.TypeInt, Computed: true, }, + "network_cards": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "baseline_bandwidth": { + Type: schema.TypeFloat, + Computed: true, + }, + "index": { + Type: schema.TypeInt, + Computed: true, + }, + "maximum_interfaces": { + Type: schema.TypeInt, + Computed: true, + }, + "performance": { + Type: schema.TypeString, + Computed: true, + }, + "peak_bandwidth": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, "network_performance": { Type: schema.TypeString, Computed: true, }, + "neuron_devices": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "core_count": { + Type: schema.TypeInt, + Computed: true, + }, + "count": { + Type: schema.TypeInt, + Computed: true, + }, + "memory_size": { + Type: schema.TypeInt, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrVersion: { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "nitro_enclaves_support": { + Type: schema.TypeString, + Computed: true, + }, + "nitro_tpm_support": { + Type: schema.TypeString, + Computed: true, + }, + "nitro_tpm_supported_versions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "phc_support": { + Type: schema.TypeString, + Computed: true, + }, + "srd_supported": { + Type: schema.TypeBool, + Computed: true, + }, "supported_architectures": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "supported_cpu_features": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "supported_placement_strategies": { Type: schema.TypeList, Computed: true, @@ -279,10 +407,22 @@ func dataSourceInstanceType() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "total_inference_memory": { + Type: schema.TypeInt, + Computed: true, + }, "total_instance_storage": { Type: schema.TypeInt, Computed: true, }, + "total_media_memory": { + Type: schema.TypeInt, + Computed: true, + }, + "total_neuron_device_memory": { + Type: schema.TypeInt, + Computed: true, + }, "valid_cores": { Type: schema.TypeList, Computed: true, @@ -310,10 +450,13 @@ func dataSourceInstanceTypeRead(ctx context.Context, d *schema.ResourceData, met d.SetId(string(v.InstanceType)) d.Set("auto_recovery_supported", v.AutoRecoverySupported) d.Set("bare_metal", v.BareMetal) + d.Set("bandwidth_weightings", v.NetworkInfo.BandwidthWeightings) + d.Set("boot_modes", v.SupportedBootModes) d.Set("burstable_performance_supported", v.BurstablePerformanceSupported) d.Set("current_generation", v.CurrentGeneration) d.Set("dedicated_hosts_supported", v.DedicatedHostsSupported) d.Set("default_cores", v.VCpuInfo.DefaultCores) + d.Set("default_network_card_index", v.NetworkInfo.DefaultNetworkCardIndex) d.Set("default_threads_per_core", v.VCpuInfo.DefaultThreadsPerCore) d.Set("default_vcpus", v.VCpuInfo.DefaultVCpus) d.Set("ebs_encryption_support", v.EbsInfo.EncryptionSupport) @@ -328,6 +471,9 @@ func dataSourceInstanceTypeRead(ctx context.Context, d *schema.ResourceData, met d.Set("ebs_performance_maximum_iops", v.EbsInfo.EbsOptimizedInfo.MaximumIops) } d.Set("efa_supported", v.NetworkInfo.EfaSupported) + if v.NetworkInfo.EfaInfo != nil { + d.Set("efa_maximum_interfaces", v.NetworkInfo.EfaInfo.MaximumEfaInterfaces) + } d.Set("ena_support", v.NetworkInfo.EnaSupport) d.Set("encryption_in_transit_supported", v.NetworkInfo.EncryptionInTransitSupported) if v.FpgaInfo != nil { @@ -361,17 +507,19 @@ func dataSourceInstanceTypeRead(ctx context.Context, d *schema.ResourceData, met } d.Set("hibernation_supported", v.HibernationSupported) d.Set("hypervisor", v.Hypervisor) - if v.InferenceAcceleratorInfo != nil { - acceleratorList := make([]interface{}, len(v.InferenceAcceleratorInfo.Accelerators)) - for i, accl := range v.InferenceAcceleratorInfo.Accelerators { + if info := v.InferenceAcceleratorInfo; info != nil { + acceleratorList := make([]interface{}, len(info.Accelerators)) + for i, accl := range info.Accelerators { accelerator := map[string]interface{}{ "count": aws.ToInt32(accl.Count), "manufacturer": aws.ToString(accl.Manufacturer), + "memory_size": aws.ToInt32(accl.MemoryInfo.SizeInMiB), names.AttrName: aws.ToString(accl.Name), } acceleratorList[i] = accelerator } d.Set("inference_accelerators", acceleratorList) + d.Set("total_inference_memory", info.TotalInferenceMemoryInMiB) } if v.InstanceStorageInfo != nil { if v.InstanceStorageInfo.Disks != nil { @@ -395,9 +543,64 @@ func dataSourceInstanceTypeRead(ctx context.Context, d *schema.ResourceData, met d.Set("maximum_ipv6_addresses_per_interface", v.NetworkInfo.Ipv6AddressesPerInterface) d.Set("maximum_network_cards", v.NetworkInfo.MaximumNetworkCards) d.Set("maximum_network_interfaces", v.NetworkInfo.MaximumNetworkInterfaces) + if info := v.MediaAcceleratorInfo; info != nil { + acceleratorList := make([]interface{}, len(info.Accelerators)) + for i, accl := range info.Accelerators { + accelerator := map[string]interface{}{ + "count": aws.ToInt32(accl.Count), + "manufacturer": aws.ToString(accl.Manufacturer), + "memory_size": aws.ToInt32(accl.MemoryInfo.SizeInMiB), + names.AttrName: aws.ToString(accl.Name), + } + acceleratorList[i] = accelerator + } + d.Set("media_accelerators", acceleratorList) + d.Set("total_media_memory", info.TotalMediaMemoryInMiB) + } d.Set("memory_size", v.MemoryInfo.SizeInMiB) + if info := v.NeuronInfo; info != nil { + deviceList := make([]interface{}, len(info.NeuronDevices)) + for i, d := range info.NeuronDevices { + device := map[string]interface{}{ + "count": aws.ToInt32(d.Count), + "core_count": aws.ToInt32(d.CoreInfo.Count), + "memory_size": aws.ToInt32(d.MemoryInfo.SizeInMiB), + names.AttrName: aws.ToString(d.Name), + names.AttrVersion: aws.ToInt32(d.CoreInfo.Version), + } + deviceList[i] = device + } + d.Set("neuron_devices", deviceList) + d.Set("total_neuron_device_memory", info.TotalNeuronDeviceMemoryInMiB) + } + d.Set("nitro_enclaves_support", v.NitroEnclavesSupport) + d.Set("nitro_tpm_support", v.NitroTpmSupport) + var nitroTpmSupportedVersions []string + if v.NitroTpmInfo != nil { + nitroTpmSupportedVersions = v.NitroTpmInfo.SupportedVersions + } else { + nitroTpmSupportedVersions = []string{} + } + d.Set("nitro_tpm_supported_versions", nitroTpmSupportedVersions) d.Set("network_performance", v.NetworkInfo.NetworkPerformance) + if info := v.NetworkInfo; info != nil { + cardList := make([]interface{}, len(info.NetworkCards)) + for i, c := range info.NetworkCards { + card := map[string]interface{}{ + "baseline_bandwidth": aws.ToFloat64(c.BaselineBandwidthInGbps), + "index": aws.ToInt32(c.NetworkCardIndex), + "maximum_interfaces": aws.ToInt32(c.MaximumNetworkInterfaces), + "peak_bandwidth": aws.ToFloat64(c.PeakBandwidthInGbps), + "performance": aws.ToString(c.NetworkPerformance), + } + cardList[i] = card + } + d.Set("network_cards", cardList) + } + d.Set("phc_support", v.PhcSupport) + d.Set("srd_supported", v.NetworkInfo.EnaSrdSupported) d.Set("supported_architectures", v.ProcessorInfo.SupportedArchitectures) + d.Set("supported_cpu_features", v.ProcessorInfo.SupportedFeatures) d.Set("supported_placement_strategies", v.PlacementGroupInfo.SupportedStrategies) d.Set("supported_root_device_types", v.SupportedRootDeviceTypes) d.Set("supported_usages_classes", v.SupportedUsageClasses) diff --git a/website/docs/d/ec2_instance_type.html.markdown b/website/docs/d/ec2_instance_type.html.markdown index ac277ac0fc88..38e73b58d212 100644 --- a/website/docs/d/ec2_instance_type.html.markdown +++ b/website/docs/d/ec2_instance_type.html.markdown @@ -33,11 +33,14 @@ This data source exports the following attributes in addition to the arguments a ~> **NOTE:** Not all attributes are set for every instance type. * `auto_recovery_supported` - `true` if auto recovery is supported. +* `bandwidth_weightings` - A set of strings of valid settings for [configurable bandwidth weighting](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-bandwidth-weighting.html), if supported. * `bare_metal` - `true` if it is a bare metal instance type. +* `boot_modes` - A set of strings of supported boot modes. * `burstable_performance_supported` - `true` if the instance type is a burstable performance instance type. * `current_generation` - `true` if the instance type is a current generation. * `dedicated_hosts_supported` - `true` if Dedicated Hosts are supported on the instance type. * `default_cores` - Default number of cores for the instance type. +* `default_network_card_index` - The index of the default network card, starting at `0`. * `default_threads_per_core` - The default number of threads per core for the instance type. * `default_vcpus` - Default number of vCPUs for the instance type. * `ebs_encryption_support` - Indicates whether Amazon EBS encryption is supported. @@ -49,25 +52,27 @@ This data source exports the following attributes in addition to the arguments a * `ebs_performance_maximum_bandwidth` - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. * `ebs_performance_maximum_iops` - The maximum input/output storage operations per second for an EBS-optimized instance type. * `ebs_performance_maximum_throughput` - The maximum throughput performance for an EBS-optimized instance type, in MBps. -* `efa_supported` - Whether Elastic Fabric Adapter (EFA) is supported. -* `ena_support` - Whether Elastic Network Adapter (ENA) is supported. -* `encryption_in_transit_supported` - Indicates whether encryption in-transit between instances is supported. +* `efa_maximum_interfaces` - The maximum number of Elastic Fabric Adapters for the instance type. +* `efa_supported` - `true` if Elastic Fabric Adapter (EFA) is supported. +* `ena_support` - Indicates whether Elastic Network Adapter (ENA) is `"supported"`, `"required"`, or `"unsupported"`. +* `encryption_in_transit_supported` - `true` if encryption in-transit between instances is supported. * `fpgas` - Describes the FPGA accelerator settings for the instance type. * `fpgas.#.count` - The count of FPGA accelerators for the instance type. * `fpgas.#.manufacturer` - The manufacturer of the FPGA accelerator. - * `fpgas.#.memory_size` - The size (in MiB) for the memory available to the FPGA accelerator. + * `fpgas.#.memory_size` - The size (in MiB) of the memory available to the FPGA accelerator. * `fpgas.#.name` - The name of the FPGA accelerator. * `free_tier_eligible` - `true` if the instance type is eligible for the free tier. * `gpus` - Describes the GPU accelerators for the instance type. * `gpus.#.count` - The number of GPUs for the instance type. * `gpus.#.manufacturer` - The manufacturer of the GPU accelerator. - * `gpus.#.memory_size` - The size (in MiB) for the memory available to the GPU accelerator. + * `gpus.#.memory_size` - The size (in MiB) of the memory available to the GPU accelerator. * `gpus.#.name` - The name of the GPU accelerator. * `hibernation_supported` - `true` if On-Demand hibernation is supported. * `hypervisor` - Hypervisor used for the instance type. * `inference_accelerators` Describes the Inference accelerators for the instance type. * `inference_accelerators.#.count` - The number of Inference accelerators for the instance type. * `inference_accelerators.#.manufacturer` - The manufacturer of the Inference accelerator. + * `inference_accelerators.#.memory_size` - The size (in MiB) of the memory available to the inference accelerator. * `inference_accelerators.#.name` - The name of the Inference accelerator. * `instance_disks` - Describes the disks for the instance type. * `instance_disks.#.count` - The number of disks with this configuration. @@ -79,17 +84,43 @@ This data source exports the following attributes in addition to the arguments a * `maximum_ipv6_addresses_per_interface` - The maximum number of IPv6 addresses per network interface. * `maximum_network_cards` - The maximum number of physical network cards that can be allocated to the instance. * `maximum_network_interfaces` - The maximum number of network interfaces for the instance type. +* `media_accelerators` - Describes the media accelerator settings for the instance type. + * `media_accelerators.#.count` - The number of media accelerators for the instance type. + * `media_accelerators.#.manufacturer` - The manufacturer of the media accelerator. + * `media_accelerators.#.memory_size` - The size (in MiB) of the memory available to each media accelerator. + * `media_accelerators.#.name` - The name of the media accelerator. * `memory_size` - Size of the instance memory, in MiB. +* `network_cards` - Describes the network cards for the instance type. + * `network_cards.#.baseline_bandwidth` - The baseline network performance (in Gbps) of the network card. + * `network_cards.#.index` - The index of the network card. + * `network_cards.#.maximum_interfaces` - The maximum number of network interfaces for the /network card. + * `network_cards.#.performance` - Describes the network performance of the network card. + * `network_cards.#.peak_bandwidth` - The peak (burst) network performance (in Gbps) of the network card. * `network_performance` - Describes the network performance. -* `supported_architectures` - A list of architectures supported by the instance type. +* `neuron_devices` - Describes the Neuron accelerator settings for the instance type. + * `neuron_devices.#.core_count` - The number of cores available to the neuron accelerator. + * `neuron_devices.#.count` - The number of neuron accelerators for the instance type. + * `neuron_devices.#.memory_size` - The size (in MiB) of the memory available to the neuron accelerator. + * `neuron_devices.#.name` - The name of the neuron accelerator. + * `neuron_devices.#.version` - A number representing the version of the neuron accelerator. +* `nitro_enclaves_support` - Indicates whether Nitro Enclaves is `"supported"` or `"unsupported"`. +* `nitro_tpm_support` - Indicates whether NitroTPM is `"supported"` or `"unsupported"`. +* `nitro_tpm_supported_versions` - A set of strings indicating the supported NitroTPM versions. +* `phc_support` - `true` if a local Precision Time Protocol (PTP) hardware clock (PHC) is supported. +* `srd_supported` - `true` if the instance type supports [ENA Express](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ena-express.html). +* `supported_architectures` - A list of strings of architectures supported by the instance type. +* `supported_cpu_features` - A set of strings indicating supported CPU features. * `supported_placement_strategies` - A list of supported placement groups types. -* `supported_root_device_types` - Indicates the supported root device types. -* `supported_usages_classes` - Indicates whether the instance type is offered for spot or On-Demand. +* `supported_root_device_types` - A list of supported root device types. +* `supported_usages_classes` - A list of supported usage classes. Usage classes are `"spot"`, `"on-demand"`, or `"capacity-block"`. * `supported_virtualization_types` - The supported virtualization types. * `sustained_clock_speed` - The speed of the processor, in GHz. * `total_fpga_memory` - Total memory of all FPGA accelerators for the instance type (in MiB). * `total_gpu_memory` - Total size of the memory for the GPU accelerators for the instance type (in MiB). +* `total_inference_memory` - The total size of the memory for the neuron accelerators for the instance type (in MiB). * `total_instance_storage` - The total size of the instance disks, in GB. +* `total_neuron_device_memory` - The total size of the memory for the neuron accelerators for the instance type (in MiB). +* `total_media_memory` - The total size of the memory for the media accelerators for the instance type (in MiB). * `valid_cores` - List of the valid number of cores that can be configured for the instance type. * `valid_threads_per_core` - List of the valid number of threads per core that can be configured for the instance type.