diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource.go b/azurerm/internal/services/compute/linux_virtual_machine_resource.go index bc02361a819e..29ad6ed8a770 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource.go @@ -857,7 +857,7 @@ func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) if features.VMDataDiskBeta() && d.HasChange("data_disks") { shouldUpdate = true - oldRaw, newRaw := d.GetChange("data_disks.0.local") + oldRaw, newRaw := d.GetChange("data_disks.0.create") oldDisks := oldRaw.(*schema.Set).List() newDisks := newRaw.(*schema.Set).List() for _, o := range oldDisks { @@ -867,7 +867,7 @@ func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) newDisk := n.(map[string]interface{}) if newDiskName, ok := newDisk["name"]; ok && oldDiskName.(string) == newDiskName.(string) { if newDisk["disk_size_gb"].(int) < oldDisk["disk_size_gb"].(int) { - return fmt.Errorf("new disk size cannot be smaller than existing for %q, in Virtual Machine %q (resource group %q)", oldDisk["name"], id.Name, id.ResourceGroup) + return fmt.Errorf("new disk size cannot be smaller than current size for %q, in Virtual Machine %q (resource group %q)", oldDisk["name"], id.Name, id.ResourceGroup) } else if newDisk["disk_size_gb"].(int) > oldDisk["disk_size_gb"].(int) { shouldShutDown = true shouldDeallocate = true @@ -881,13 +881,13 @@ func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) } } // EncryptionSet changes for "existing" disks - oldExistingRaw, newExistingRaw := d.GetChange("data_disks.0.existing") - oldExisting := oldExistingRaw.(*schema.Set).List() - newExisting := newExistingRaw.(*schema.Set).List() - for _, o := range oldExisting { + oldAttachedRaw, newAttachedRaw := d.GetChange("data_disks.0.attach") + oldAttached := oldAttachedRaw.(*schema.Set).List() + newAttached := newAttachedRaw.(*schema.Set).List() + for _, o := range oldAttached { oldDisk := o.(map[string]interface{}) if oldDiskID, ok := oldDisk["managed_disk_id"]; ok { - for _, n := range newExisting { + for _, n := range newAttached { newDisk := n.(map[string]interface{}) if newDiskID, ok := newDisk["managed_disk_id"]; ok && oldDiskID.(string) == newDiskID.(string) { if newDisk["disk_encryption_set_id"].(string) != oldDisk["disk_encryption_set_id"].(string) { @@ -1103,7 +1103,7 @@ func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) } } - if features.VMDataDiskBeta() && d.HasChanges("data_disks.0.local", "data_disks.0.existing") { + if features.VMDataDiskBeta() && d.HasChanges("data_disks.0.create", "data_disks.0.attach") { shouldUpdate = true dataDisks, err := expandVirtualMachineDataDisks(ctx, d, meta) if err != nil { diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_data_disk_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_data_disk_test.go index bfe53489dd10..3df7ba62db82 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_data_disk_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_data_disk_test.go @@ -266,7 +266,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "None" @@ -383,7 +383,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "None" @@ -391,7 +391,7 @@ resource "azurerm_linux_virtual_machine" "test" { disk_size_gb = 1 } - local { + create { name = "acctest-localdisk2" lun = 2 caching = "ReadOnly" @@ -399,7 +399,7 @@ resource "azurerm_linux_virtual_machine" "test" { disk_size_gb = 2 } - local { + create { name = "acctest-localdisk3" lun = 3 caching = "ReadWrite" @@ -407,14 +407,14 @@ resource "azurerm_linux_virtual_machine" "test" { disk_size_gb = 3 } - existing { + attach { managed_disk_id = azurerm_managed_disk.test1.id lun = 10 caching = "None" storage_account_type = "Standard_LRS" } - existing { + attach { managed_disk_id = azurerm_managed_disk.test2.id lun = 11 caching = "ReadOnly" @@ -492,7 +492,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "None" @@ -500,7 +500,7 @@ resource "azurerm_linux_virtual_machine" "test" { disk_size_gb = 1 } - local { + create { name = "acctest-localdisk2" lun = 2 caching = "ReadOnly" @@ -508,14 +508,14 @@ resource "azurerm_linux_virtual_machine" "test" { disk_size_gb = 4 } - existing { + attach { managed_disk_id = azurerm_managed_disk.test1.id lun = 10 caching = "None" storage_account_type = "Standard_LRS" } - existing { + attach { managed_disk_id = azurerm_managed_disk.test2.id lun = 11 caching = "ReadWrite" @@ -564,7 +564,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -610,7 +610,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -683,7 +683,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -734,7 +734,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -742,7 +742,7 @@ resource "azurerm_linux_virtual_machine" "test" { disk_size_gb = 2 } - local { + create { name = "acctest-localdisk2" lun = 2 caching = "ReadOnly" @@ -806,7 +806,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - existing { + attach { managed_disk_id = azurerm_managed_disk.test.id lun = 1 caching = "None" @@ -869,7 +869,7 @@ resource "azurerm_linux_virtual_machine" "test" { } data_disks { - existing { + attach { managed_disk_id = azurerm_managed_disk.test.id lun = 1 caching = "None" diff --git a/azurerm/internal/services/compute/virtual_machine.go b/azurerm/internal/services/compute/virtual_machine.go index 8d59c0302828..e3191b35df62 100644 --- a/azurerm/internal/services/compute/virtual_machine.go +++ b/azurerm/internal/services/compute/virtual_machine.go @@ -423,9 +423,9 @@ func virtualMachineDataDiskSchema() *schema.Schema { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "local": virtualMachineLocalDataDiskSchema(), + "create": virtualMachineLocalDataDiskSchema(), - "existing": virtualMachineExistingDataDiskSchema(), + "attach": virtualMachineExistingDataDiskSchema(), }, }, } @@ -561,7 +561,7 @@ func expandVirtualMachineDataDisks(ctx context.Context, d *schema.ResourceData, result := make([]compute.DataDisk, 0) dataDisks := dataDisksRaw[0].(map[string]interface{}) - if newDisksRaw, ok := dataDisks["local"]; ok { + if newDisksRaw, ok := dataDisks["create"]; ok { var newDisks []compute.DataDisk if d.IsNewResource() { newDisks = expandVirtualMachineNewDataDisksForCreate(newDisksRaw) @@ -574,13 +574,13 @@ func expandVirtualMachineDataDisks(ctx context.Context, d *schema.ResourceData, result = append(result, newDisks...) } - if existingDisksRaw, ok := dataDisks["existing"]; ok { - existingDisks, err := expandVirtualMachineExistingDataDisksForCreate(ctx, existingDisksRaw, d, meta) + if attachDisksRaw, ok := dataDisks["attach"]; ok { + attachDisks, err := expandVirtualMachineExistingDataDisksForCreate(ctx, attachDisksRaw, d, meta) if err != nil { return nil, err } - result = append(result, existingDisks...) + result = append(result, attachDisks...) } return &result, nil @@ -688,18 +688,18 @@ func expandVirtualMachineExistingDataDisksForCreate(ctx context.Context, input i } continue } - existing, err := disksClient.Get(ctx, diskID.ResourceGroup, diskID.DiskName) + attach, err := disksClient.Get(ctx, diskID.ResourceGroup, diskID.DiskName) if err != nil { - return nil, fmt.Errorf("failed retrieving details for existing Managed Disk %q (resource group %q: %+v", diskID.DiskName, diskID.ResourceGroup, err) + return nil, fmt.Errorf("failed retrieving details for attached Managed Disk %q (resource group %q: %+v", diskID.DiskName, diskID.ResourceGroup, err) } dataDisk := compute.DataDisk{} dataDisk.Name = &diskID.DiskName dataDisk.CreateOption = compute.DiskCreateOptionTypesAttach - if existing.DiskSizeGB == nil { - return nil, fmt.Errorf("failed reading `disk_size_gb` from existing Managed Disk %q (resource group %q)", diskID.DiskName, diskID.ResourceGroup) + if attach.DiskSizeGB == nil { + return nil, fmt.Errorf("failed reading `disk_size_gb` from attached Managed Disk %q (resource group %q)", diskID.DiskName, diskID.ResourceGroup) } - dataDisk.DiskSizeGB = existing.DiskSizeGB + dataDisk.DiskSizeGB = attach.DiskSizeGB dataDisk.Caching = compute.CachingTypes(disk["caching"].(string)) dataDisk.Lun = utils.Int32(int32(disk["lun"].(int))) @@ -724,8 +724,8 @@ func flattenVirtualMachineDataDisks(input *[]compute.DataDisk) ([]interface{}, e } var newDisks []interface{} - var existingDisks []interface{} - // we need to split into new and "existing", we can use `createOption` as indicator + var attachDisks []interface{} + // we need to split into new and "attach", we can use `createOption` as indicator for _, v := range *input { dataDisk := make(map[string]interface{}) @@ -784,7 +784,7 @@ func flattenVirtualMachineDataDisks(input *[]compute.DataDisk) ([]interface{}, e dataDisk["storage_account_type"] = storageAccountType dataDisk["managed_disk_id"] = managedDiskID - existingDisks = append(existingDisks, dataDisk) + attachDisks = append(attachDisks, dataDisk) default: return nil, fmt.Errorf("unsupported `createOption` type while flattening: %s", string(createOption)) @@ -792,8 +792,8 @@ func flattenVirtualMachineDataDisks(input *[]compute.DataDisk) ([]interface{}, e } return []interface{}{ map[string]interface{}{ - "local": schema.NewSet(resourceArmVirtualMachineNewDataDiskHash, newDisks), - "existing": existingDisks, + "create": schema.NewSet(resourceArmVirtualMachineNewDataDiskHash, newDisks), + "attach": attachDisks, }, }, nil } diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_data_disk_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_data_disk_test.go index 0216768c5de6..517d51203665 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_data_disk_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_data_disk_test.go @@ -263,7 +263,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "None" @@ -371,7 +371,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "None" @@ -379,7 +379,7 @@ resource "azurerm_windows_virtual_machine" "test" { disk_size_gb = 1 } - local { + create { name = "acctest-localdisk2" lun = 2 caching = "ReadOnly" @@ -387,7 +387,7 @@ resource "azurerm_windows_virtual_machine" "test" { disk_size_gb = 2 } - local { + create { name = "acctest-localdisk3" lun = 3 caching = "ReadWrite" @@ -395,14 +395,14 @@ resource "azurerm_windows_virtual_machine" "test" { disk_size_gb = 3 } - existing { + attach { managed_disk_id = azurerm_managed_disk.test1.id lun = 10 caching = "None" storage_account_type = "Standard_LRS" } - existing { + attach { managed_disk_id = azurerm_managed_disk.test2.id lun = 11 caching = "ReadOnly" @@ -477,7 +477,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "None" @@ -485,7 +485,7 @@ resource "azurerm_windows_virtual_machine" "test" { disk_size_gb = 1 } - local { + create { name = "acctest-localdisk2" lun = 2 caching = "ReadOnly" @@ -493,14 +493,14 @@ resource "azurerm_windows_virtual_machine" "test" { disk_size_gb = 4 } - existing { + attach { managed_disk_id = azurerm_managed_disk.test1.id lun = 10 caching = "None" storage_account_type = "Standard_LRS" } - existing { + attach { managed_disk_id = azurerm_managed_disk.test2.id lun = 11 caching = "ReadWrite" @@ -546,7 +546,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -589,7 +589,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -659,7 +659,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -707,7 +707,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - local { + create { name = "acctest-localdisk" lun = 1 caching = "ReadOnly" @@ -715,7 +715,7 @@ resource "azurerm_windows_virtual_machine" "test" { disk_size_gb = 2 } - local { + create { name = "acctest-localdisk2" lun = 2 caching = "ReadOnly" @@ -776,7 +776,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - existing { + attach { managed_disk_id = azurerm_managed_disk.test.id lun = 1 caching = "None" @@ -836,7 +836,7 @@ resource "azurerm_windows_virtual_machine" "test" { } data_disks { - existing { + attach { managed_disk_id = azurerm_managed_disk.test.id lun = 1 caching = "None"