diff --git a/deploy/example/logs/0.2.0/csi-azuredisk-controller.log b/deploy/example/logs/0.2.0/csi-azuredisk-controller.log index cf674c535d..0496e64658 100644 --- a/deploy/example/logs/0.2.0/csi-azuredisk-controller.log +++ b/deploy/example/logs/0.2.0/csi-azuredisk-controller.log @@ -80,8 +80,8 @@ I0708 02:07:02.908790 1 utils.go:106] GRPC call: /csi.v1.Controller/Contro I0708 02:07:02.908930 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" node_id:"aks-agentpool-41197296-0" volume_capability: access_mode: > volume_context: volume_context: volume_context: volume_context: I0708 02:07:02.909162 1 controllerserver.go:267] ControllerPublishVolume: called with args {VolumeId:/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d NodeId:aks-agentpool-41197296-0 VolumeCapability:mount: access_mode: Readonly:false Secrets:map[] VolumeContext:map[cachingMode:ReadOnly kind:managed skuname:Standard_LRS storage.kubernetes.io/csiProvisionerIdentity:1562551478534-8081-disk.csi.azure.com] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} I0708 02:07:02.948540 1 controllerserver.go:304] GetDiskLun returned: . Initiating attaching volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" to node "aks-agentpool-41197296-0". -I0708 02:07:02.948613 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:07:02.948626 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:07:02.948613 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:07:02.948626 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:07:03.011845 1 controllerserver.go:335] Trying to attach volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" lun 0 to node "aks-agentpool-41197296-0" I0708 02:07:03.011880 1 azure_controller_standard.go:77] azureDisk - update(MC_andy-virtualnode_andy-virtualnode_eastus2): vm(aks-agentpool-41197296-0) - attach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) I0708 02:07:19.142897 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe @@ -90,8 +90,8 @@ I0708 02:07:19.142945 1 utils.go:112] GRPC response: ready: I0708 02:07:38.506091 1 azure_controller_standard.go:94] azureDisk - attach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) succeeded I0708 02:07:38.506121 1 controllerserver.go:338] Attach operation successful: volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" attached to node "aks-agentpool-41197296-0". I0708 02:07:38.506138 1 controllerserver.go:343] attach volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" lun 0 to node "aks-agentpool-41197296-0" successfully -I0708 02:07:38.506155 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:07:38.506164 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:07:38.506155 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:07:38.506164 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:07:38.506174 1 utils.go:112] GRPC response: publish_context: I0708 02:07:49.141956 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe I0708 02:07:49.141971 1 utils.go:107] GRPC request: @@ -109,8 +109,8 @@ I0708 02:09:39.452064 1 utils.go:106] GRPC call: /csi.v1.Controller/Contro I0708 02:09:39.452163 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" node_id:"aks-agentpool-41197296-0" I0708 02:09:39.452211 1 controllerserver.go:352] ControllerUnpublishVolume: called with args {VolumeId:/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d NodeId:aks-agentpool-41197296-0 Secrets:map[] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} I0708 02:09:39.452246 1 expiration_cache.go:98] Entry InstanceMetadata: &{key:InstanceMetadata data:0xc000673cd0 lock:{state:0 sema:0}} has expired -I0708 02:09:39.457483 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:09:39.457529 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:09:39.457483 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:09:39.457529 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:09:39.457618 1 controllerserver.go:381] Trying to detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 I0708 02:09:39.551551 1 azure_controller_standard.go:122] azureDisk - detach disk: name "pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" uri "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" I0708 02:09:39.551585 1 azure_controller_standard.go:142] azureDisk - update(MC_andy-virtualnode_andy-virtualnode_eastus2): vm(aks-agentpool-41197296-0) - detach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) @@ -129,7 +129,7 @@ I0708 02:09:41.779760 1 utils.go:112] GRPC response: name:"disk.csi.azure. I0708 02:09:41.787775 1 utils.go:106] GRPC call: /csi.v1.Controller/DeleteVolume I0708 02:09:41.787786 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" I0708 02:09:41.787824 1 controllerserver.go:250] deleting azure disk(/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) -E0708 02:09:42.024138 1 utils.go:110] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." +E0708 02:09:42.024138 1 utils.go:110] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." I0708 02:09:49.141968 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe I0708 02:09:49.142137 1 utils.go:107] GRPC request: I0708 02:09:49.142212 1 utils.go:112] GRPC response: ready: @@ -148,22 +148,22 @@ I0708 02:09:57.038401 1 utils.go:112] GRPC response: name:"disk.csi.azure. I0708 02:09:57.046245 1 utils.go:106] GRPC call: /csi.v1.Controller/DeleteVolume I0708 02:09:57.046463 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" I0708 02:09:57.046516 1 controllerserver.go:250] deleting azure disk(/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) -E0708 02:09:57.295441 1 utils.go:110] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." +E0708 02:09:57.295441 1 utils.go:110] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." I0708 02:10:04.901291 1 azure_controller_standard.go:161] azureDisk - detach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d, /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) succeeded I0708 02:10:04.901326 1 controllerserver.go:389] detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 successfully -I0708 02:10:04.901334 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:10:04.901346 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:10:04.901334 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:10:04.901346 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:10:04.901356 1 utils.go:112] GRPC response: I0708 02:10:04.917031 1 utils.go:106] GRPC call: /csi.v1.Controller/ControllerUnpublishVolume I0708 02:10:04.917044 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" node_id:"aks-agentpool-41197296-0" I0708 02:10:04.917083 1 controllerserver.go:352] ControllerUnpublishVolume: called with args {VolumeId:/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d NodeId:aks-agentpool-41197296-0 Secrets:map[] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} -I0708 02:10:04.917163 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:10:04.917176 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:10:04.917163 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:10:04.917176 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:10:04.917190 1 controllerserver.go:381] Trying to detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 W0708 02:10:04.995319 1 controllerserver.go:384] volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d already detached from node aks-agentpool-41197296-0 I0708 02:10:04.995374 1 controllerserver.go:389] detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 successfully -I0708 02:10:04.995381 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:10:04.995395 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:10:04.995381 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:10:04.995395 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:10:04.995404 1 utils.go:112] GRPC response: I0708 02:10:19.141759 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe I0708 02:10:19.141896 1 utils.go:107] GRPC request: diff --git a/deploy/example/logs/0.3.0/csi-azuredisk-controller.log b/deploy/example/logs/0.3.0/csi-azuredisk-controller.log index aa2cb48da2..cd73b0fe89 100644 --- a/deploy/example/logs/0.3.0/csi-azuredisk-controller.log +++ b/deploy/example/logs/0.3.0/csi-azuredisk-controller.log @@ -142,7 +142,7 @@ I0708 09:18:06.849897 1 utils.go:119] GRPC response: name:"disk.csi.azure. I0708 09:18:06.859929 1 utils.go:112] GRPC call: /csi.v1.Controller/DeleteVolume I0708 09:18:06.859949 1 utils.go:113] GRPC request: volume_id:"/subscriptions/.../resourceGroups/mc_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631" I0708 09:18:06.859959 1 controllerserver.go:257] deleting azure disk(/subscriptions/.../resourceGroups/mc_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631) -E0708 09:18:07.117134 1 utils.go:117] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631 is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." +E0708 09:18:07.117134 1 utils.go:117] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631 is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." I0708 09:18:13.587579 1 utils.go:112] GRPC call: /csi.v1.Identity/Probe I0708 09:18:13.587699 1 utils.go:113] GRPC request: I0708 09:18:13.587788 1 utils.go:119] GRPC response: ready: diff --git a/go.mod b/go.mod index 262ee3c970..43b1575ad8 100644 --- a/go.mod +++ b/go.mod @@ -40,9 +40,9 @@ require ( k8s.io/mount-utils v0.29.0 k8s.io/pod-security-admission v0.0.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c + sigs.k8s.io/cloud-provider-azure v1.29.0 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 8b38ad5428..5f7611fe45 100644 --- a/go.sum +++ b/go.sum @@ -2037,12 +2037,12 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c h1:5Q300VvljRNIT2LaKZ+xo9Z1NvAOKV1v007YxItji1Y= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c h1:6RWYYBDabcs2L3bs+t5IPSDietkKTqDUqJ+drIhx6xk= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c/go.mod h1:dhkW9GQLM9XJMvXcXziwy7QqDOLwaXU11FsGJyIpy+s= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c h1:/UCu9sYXGn3zhT0/B74nxc3EE5lRNuDVJC8gCYqk+K8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c/go.mod h1:slvzU7aF5CMzat64pn/LdpgU4mLYBDQGqm1fjXMAp1Q= +sigs.k8s.io/cloud-provider-azure v1.29.0 h1:lHk6AB+3XfURM7bbR+uABKeRcMC1TYreWA6GM5wUT6g= +sigs.k8s.io/cloud-provider-azure v1.29.0/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047 h1:1Nvke3MeBdnYSuyRyQY4iLwaT566ZKUxTkr/YOZLz1o= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047/go.mod h1:dhkW9GQLM9XJMvXcXziwy7QqDOLwaXU11FsGJyIpy+s= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047 h1:4dmpJRuwCxou60ULbhFASF7FHhxyqkGsoGLtI7kR32w= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047/go.mod h1:slvzU7aF5CMzat64pn/LdpgU4mLYBDQGqm1fjXMAp1Q= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= diff --git a/pkg/azuredisk/azure_controller_common.go b/pkg/azuredisk/azure_controller_common.go index 12f745f7f3..27a9cd84fa 100644 --- a/pkg/azuredisk/azure_controller_common.go +++ b/pkg/azuredisk/azure_controller_common.go @@ -27,6 +27,9 @@ import ( "sync" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -37,6 +40,7 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/pointer" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -95,9 +99,10 @@ var ( ) type controllerCommon struct { - diskStateMap sync.Map // - lockMap *lockMap - cloud *provider.Cloud + diskStateMap sync.Map // + lockMap *lockMap + cloud *provider.Cloud + clientFactory azclient.ClientFactory // disk queue that is waiting for attach or detach on specific node // > attachDiskMap sync.Map @@ -122,14 +127,14 @@ type ExtendedLocation struct { // occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments // return (lun, error) func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName, - cachingMode compute.CachingTypes, disk *compute.Disk, occupiedLuns []int) (int32, error) { + cachingMode armcompute.CachingTypes, disk *armcompute.Disk, occupiedLuns []int) (int32, error) { diskEncryptionSetID := "" writeAcceleratorEnabled := false // there is possibility that disk is nil when GetDisk is throttled // don't check disk state when GetDisk is throttled if disk != nil { - if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) { + if disk.ManagedBy != nil && (disk.Properties == nil || disk.Properties.MaxShares == nil || *disk.Properties.MaxShares <= 1) { vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { return -1, err @@ -155,22 +160,22 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str return -1, volerr.NewDanglingError(attachErr, attachedNode, "") } - if disk.DiskProperties != nil { - if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone { + if disk.Properties != nil { + if disk.Properties.DiskSizeGB != nil && *disk.Properties.DiskSizeGB >= diskCachingLimit && cachingMode != armcompute.CachingTypesNone { // Disk Caching is not supported for disks 4 TiB and larger // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching - cachingMode = compute.CachingTypesNone + cachingMode = armcompute.CachingTypesNone klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None", - diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit) + diskURI, *disk.Properties.DiskSizeGB, diskCachingLimit) } - if disk.DiskProperties.Encryption != nil && - disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { - diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID + if disk.Properties.Encryption != nil && + disk.Properties.Encryption.DiskEncryptionSetID != nil { + diskEncryptionSetID = *disk.Properties.Encryption.DiskEncryptionSetID } - if disk.DiskProperties.DiskState != compute.Unattached && (disk.MaxShares == nil || *disk.MaxShares <= 1) { - return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, disk.DiskProperties.DiskState, compute.Unattached) + if disk.Properties.DiskState != nil && *disk.Properties.DiskState != armcompute.DiskStateUnattached && (disk.Properties.MaxShares == nil || *disk.Properties.MaxShares <= 1) { + return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, *disk.Properties.DiskState, armcompute.DiskStateUnattached) } } @@ -184,7 +189,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str options := provider.AttachDiskOptions{ Lun: -1, DiskName: diskName, - CachingMode: cachingMode, + CachingMode: compute.CachingTypes(cachingMode), DiskEncryptionSetID: diskEncryptionSetID, WriteAcceleratorEnabled: writeAcceleratorEnabled, } @@ -469,7 +474,7 @@ func (c *controllerCommon) cleanDetachDiskRequests(nodeName string) (map[string] } // GetNodeDataDisks invokes vmSet interfaces to get data disks for the node. -func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vmset, err := c.cloud.GetNodeVMSet(nodeName, crt) if err != nil { return nil, nil, err @@ -611,8 +616,8 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N return attached, nil } -func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []compute.DataDisk) []compute.DataDisk { - filteredDisks := []compute.DataDisk{} +func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []*armcompute.DataDisk) []*armcompute.DataDisk { + filteredDisks := []*armcompute.DataDisk{} for _, disk := range unfilteredDisks { filter := false if disk.ManagedDisk != nil && disk.ManagedDisk.ID != nil { @@ -643,13 +648,17 @@ func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) return false, err } - if _, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName); rerr != nil { - if rerr.HTTPStatusCode == http.StatusNotFound { + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return false, err + } + if _, err := diskClient.Get(ctx, resourceGroup, diskName); err != nil { + var respErr = &azcore.ResponseError{} + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { return false, nil } - return false, rerr.Error() + return false, err } - return true, nil } @@ -658,10 +667,10 @@ func vmUpdateRequired(future *azure.Future, err error) bool { return configAccepted(future) && errCode == consts.OperationPreemptedErrorCode } -func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (compute.CreationData, error) { +func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (armcompute.CreationData, error) { if options.SourceResourceID == "" { - return compute.CreationData{ - CreateOption: compute.Empty, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), PerformancePlus: options.PerformancePlus, }, nil } @@ -678,8 +687,8 @@ func getValidCreationData(subscriptionID, resourceGroup string, options *Managed sourceResourceID = fmt.Sprintf(managedDiskPath, subscriptionID, resourceGroup, sourceResourceID) } default: - return compute.CreationData{ - CreateOption: compute.Empty, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), PerformancePlus: options.PerformancePlus, }, nil } @@ -687,12 +696,12 @@ func getValidCreationData(subscriptionID, resourceGroup string, options *Managed splits := strings.Split(sourceResourceID, "/") if len(splits) > 9 { if options.SourceType == sourceSnapshot { - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) } - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE) + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE) } - return compute.CreationData{ - CreateOption: compute.Copy, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceID, PerformancePlus: options.PerformancePlus, }, nil diff --git a/pkg/azuredisk/azure_controller_common_test.go b/pkg/azuredisk/azure_controller_common_test.go index 6566095a19..6515177411 100644 --- a/pkg/azuredisk/azure_controller_common_test.go +++ b/pkg/azuredisk/azure_controller_common_test.go @@ -17,10 +17,12 @@ limitations under the License. package azuredisk import ( + "bytes" "context" "encoding/json" "errors" "fmt" + "io/ioutil" "net/http" "reflect" "strconv" @@ -28,18 +30,21 @@ import ( "testing" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" autorestmocks "github.com/Azure/go-autorest/autorest/mocks" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" "k8s.io/utils/pointer" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -114,7 +119,7 @@ func TestCommonAttachDisk(t *testing.T) { testCases := []struct { desc string diskName string - existedDisk *compute.Disk + existedDisk *armcompute.Disk nodeName types.NodeName vmList map[string]string isDataDisksFull bool @@ -143,7 +148,7 @@ func TestCommonAttachDisk(t *testing.T) { desc: "LUN -1 and error shall be returned if there's no such instance corresponding to given nodeName", nodeName: "vm1", diskName: "disk-name", - existedDisk: &compute.Disk{Name: pointer.String("disk-name")}, + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name")}, expectedLun: -1, expectErr: true, }, @@ -153,7 +158,7 @@ func TestCommonAttachDisk(t *testing.T) { nodeName: "vm1", isDataDisksFull: true, diskName: "disk-name", - existedDisk: &compute.Disk{Name: pointer.String("disk-name")}, + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name")}, expectedLun: -1, expectErr: true, }, @@ -162,11 +167,11 @@ func TestCommonAttachDisk(t *testing.T) { vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", diskName: "disk-name", - existedDisk: &compute.Disk{Name: pointer.String("disk-name"), - DiskProperties: &compute.DiskProperties{ - Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name"), + Properties: &armcompute.DiskProperties{ + Encryption: &armcompute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, DiskSizeGB: pointer.Int32(4096), - DiskState: compute.Unattached, + DiskState: to.Ptr(armcompute.DiskStateUnattached), }, Tags: testTags}, expectedLun: 3, @@ -178,11 +183,11 @@ func TestCommonAttachDisk(t *testing.T) { vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", diskName: "disk-name", - existedDisk: &compute.Disk{Name: pointer.String("disk-name"), - DiskProperties: &compute.DiskProperties{ - Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name"), + Properties: &armcompute.DiskProperties{ + Encryption: &armcompute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, DiskSizeGB: pointer.Int32(4096), - DiskState: compute.Attached, + DiskState: to.Ptr(armcompute.DiskStateAttached), }, Tags: testTags}, expectedLun: -1, @@ -193,7 +198,7 @@ func TestCommonAttachDisk(t *testing.T) { vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", diskName: "disk-name", - existedDisk: &compute.Disk{Name: pointer.String("disk-name"), ManagedBy: pointer.String(goodInstanceID), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}}, + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name"), ManagedBy: pointer.String(goodInstanceID), Properties: &armcompute.DiskProperties{MaxShares: &maxShare}}, expectedLun: -1, expectErr: true, }, @@ -295,7 +300,7 @@ func TestCommonAttachDisk(t *testing.T) { lockMap: newLockMap(), DisableDiskLunCheck: true, } - lun, err := testdiskController.AttachDisk(ctx, test.diskName, diskURI, tt.nodeName, compute.CachingTypesReadOnly, tt.existedDisk, nil) + lun, err := testdiskController.AttachDisk(ctx, test.diskName, diskURI, tt.nodeName, armcompute.CachingTypesReadOnly, tt.existedDisk, nil) assert.Equal(t, tt.expectedLun, lun, "TestCase[%d]: %s", i, tt.desc) assert.Equal(t, tt.expectErr, err != nil, "TestCase[%d]: %s, return error: %v", i, tt.desc, err) @@ -616,7 +621,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup string sourceResourceID string sourceType string - expected1 compute.CreationData + expected1 armcompute.CreationData expected2 error }{ { @@ -624,8 +629,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "", sourceType: "", - expected1: compute.CreationData{ - CreateOption: compute.Empty, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, expected2: nil, }, @@ -634,8 +639,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", sourceType: sourceSnapshot, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceSnapshotID, }, expected2: nil, @@ -645,8 +650,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "xxx", sourceResourceID: "xxx", sourceType: sourceSnapshot, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceSnapshotID, }, expected2: nil, @@ -656,7 +661,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/23/providers/Microsoft.Compute/disks/name", sourceType: sourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/23/providers/Microsoft.Compute/disks/name", diskSnapshotPathRE), }, { @@ -664,7 +669,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "http://test.com/vhds/name", sourceType: sourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots/http://test.com/vhds/name", diskSnapshotPathRE), }, { @@ -672,7 +677,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/snapshots/xxx", sourceType: sourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/xxx/snapshots/xxx", diskSnapshotPathRE), }, { @@ -680,7 +685,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", sourceType: sourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", diskSnapshotPathRE), }, { @@ -688,8 +693,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "xxx", sourceType: "", - expected1: compute.CreationData{ - CreateOption: compute.Empty, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, expected2: nil, }, @@ -698,8 +703,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx", sourceType: sourceVolume, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceVolumeID, }, expected2: nil, @@ -709,8 +714,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "xxx", sourceResourceID: "xxx", sourceType: sourceVolume, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceVolumeID, }, expected2: nil, @@ -720,7 +725,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", sourceType: sourceVolume, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/disks//subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", managedDiskPathRE), }, } @@ -745,18 +750,27 @@ func TestCheckDiskExists(t *testing.T) { defer cancel() testCloud := provider.GetTestCloud(ctrl) + mockFactory := mock_azclient.NewMockClientFactory(ctrl) common := &controllerCommon{ - cloud: testCloud, - lockMap: newLockMap(), + cloud: testCloud, + clientFactory: mockFactory, + lockMap: newLockMap(), } // create a new disk before running test newDiskName := "newdisk" newDiskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", testCloud.SubscriptionID, testCloud.ResourceGroup, newDiskName) - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) - mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Any(), testCloud.ResourceGroup, newDiskName).Return(compute.Disk{}, nil).AnyTimes() - mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Not(testCloud.ResourceGroup), gomock.Any()).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + mockFactory.EXPECT().GetDiskClientForSub(gomock.Any()).Return(mockDisksClient, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, newDiskName).Return(&armcompute.Disk{}, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Not(testCloud.ResourceGroup), gomock.Any()).Return(&armcompute.Disk{}, &azcore.ResponseError{ + StatusCode: http.StatusNotFound, + RawResponse: &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, + }).AnyTimes() testCases := []struct { diskURI string @@ -795,9 +809,11 @@ func TestFilterNonExistingDisksWithSpecialHTTPStatusCode(t *testing.T) { defer cancel() testCloud := provider.GetTestCloud(ctrl) + mockFactory := mock_azclient.NewMockClientFactory(ctrl) common := &controllerCommon{ - cloud: testCloud, - lockMap: newLockMap(), + cloud: testCloud, + clientFactory: mockFactory, + lockMap: newLockMap(), } // create a new disk before running test diskURIPrefix := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/", @@ -805,13 +821,20 @@ func TestFilterNonExistingDisksWithSpecialHTTPStatusCode(t *testing.T) { newDiskName := "specialdisk" newDiskURI := diskURIPrefix + newDiskName - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, gomock.Eq(newDiskName)).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusBadRequest, RawError: cloudprovider.InstanceNotFound}).AnyTimes() + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + mockFactory.EXPECT().GetDiskClientForSub(gomock.Any()).Return(mockDisksClient, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Eq(newDiskName)).Return(&armcompute.Disk{}, &azcore.ResponseError{ + StatusCode: http.StatusBadRequest, + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, + }).AnyTimes() - disks := []compute.DataDisk{ + disks := []*armcompute.DataDisk{ { Name: &newDiskName, - ManagedDisk: &compute.ManagedDiskParameters{ + ManagedDisk: &armcompute.ManagedDiskParameters{ ID: &newDiskURI, }, }, diff --git a/pkg/azuredisk/azure_managedDiskController.go b/pkg/azuredisk/azure_managedDiskController.go index 5f75084384..630e96f07c 100644 --- a/pkg/azuredisk/azure_managedDiskController.go +++ b/pkg/azuredisk/azure_managedDiskController.go @@ -18,13 +18,16 @@ package azuredisk import ( "context" + "errors" "fmt" "net/http" "path" "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "k8s.io/apimachinery/pkg/api/resource" kwait "k8s.io/apimachinery/pkg/util/wait" @@ -46,6 +49,7 @@ func NewManagedDiskController(provider *provider.Cloud) *ManagedDiskController { cloud: provider, lockMap: newLockMap(), AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: provider.ComputeClientFactory, } return &ManagedDiskController{common} @@ -54,7 +58,7 @@ func NewManagedDiskController(provider *provider.Cloud) *ManagedDiskController { // ManagedDiskOptions specifies the options of managed disks. type ManagedDiskOptions struct { // The SKU of storage account. - StorageAccountType compute.DiskStorageAccountTypes + StorageAccountType armcompute.DiskStorageAccountTypes // The name of the disk. DiskName string // The name of PVC. @@ -86,9 +90,9 @@ type ManagedDiskOptions struct { // SkipGetDiskOperation indicates whether skip GetDisk operation(mainly due to throttling) SkipGetDiskOperation bool // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled' - PublicNetworkAccess compute.PublicNetworkAccess + PublicNetworkAccess armcompute.PublicNetworkAccess // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll' - NetworkAccessPolicy compute.NetworkAccessPolicy + NetworkAccessPolicy armcompute.NetworkAccessPolicy // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. DiskAccessID *string // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. @@ -146,19 +150,19 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * if err != nil { return "", err } - diskProperties := compute.DiskProperties{ + diskProperties := armcompute.DiskProperties{ DiskSizeGB: &diskSizeGB, CreationData: &creationData, BurstingEnabled: options.BurstingEnabled, } if options.PublicNetworkAccess != "" { - diskProperties.PublicNetworkAccess = options.PublicNetworkAccess + diskProperties.PublicNetworkAccess = to.Ptr(options.PublicNetworkAccess) } if options.NetworkAccessPolicy != "" { - diskProperties.NetworkAccessPolicy = options.NetworkAccessPolicy - if options.NetworkAccessPolicy == compute.AllowPrivate { + diskProperties.NetworkAccessPolicy = to.Ptr(options.NetworkAccessPolicy) + if options.NetworkAccessPolicy == armcompute.NetworkAccessPolicyAllowPrivate { if options.DiskAccessID == nil { return "", fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate") } @@ -170,9 +174,9 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * } } - if diskSku == compute.UltraSSDLRS || diskSku == compute.PremiumV2LRS { + if diskSku == armcompute.DiskStorageAccountTypesUltraSSDLRS || diskSku == armcompute.DiskStorageAccountTypesPremiumV2LRS { if options.DiskIOPSReadWrite == "" { - if diskSku == compute.UltraSSDLRS { + if diskSku == armcompute.DiskStorageAccountTypesUltraSSDLRS { diskIOPSReadWrite := int64(consts.DefaultDiskIOPSReadWrite) diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite) } @@ -186,7 +190,7 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * } if options.DiskMBpsReadWrite == "" { - if diskSku == compute.UltraSSDLRS { + if diskSku == armcompute.DiskStorageAccountTypesUltraSSDLRS { diskMBpsReadWrite := int64(consts.DefaultDiskMBpsReadWrite) diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite) } @@ -219,14 +223,14 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * if strings.Index(strings.ToLower(options.DiskEncryptionSetID), "/subscriptions/") != 0 { return "", fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", options.DiskEncryptionSetID, consts.DiskEncryptionSetIDFormat) } - encryptionType := compute.EncryptionTypeEncryptionAtRestWithCustomerKey + encryptionType := armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey if options.DiskEncryptionType != "" { - encryptionType = compute.EncryptionType(options.DiskEncryptionType) + encryptionType = armcompute.EncryptionType(options.DiskEncryptionType) klog.V(4).Infof("azureDisk - DiskEncryptionType: %s, DiskEncryptionSetID: %s", options.DiskEncryptionType, options.DiskEncryptionSetID) } - diskProperties.Encryption = &compute.Encryption{ + diskProperties.Encryption = &armcompute.Encryption{ DiskEncryptionSetID: &options.DiskEncryptionSetID, - Type: encryptionType, + Type: to.Ptr(encryptionType), } } else { if options.DiskEncryptionType != "" { @@ -242,28 +246,31 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * if options.Location != "" { location = options.Location } - model := compute.Disk{ + model := armcompute.Disk{ Location: &location, Tags: newTags, - Sku: &compute.DiskSku{ - Name: diskSku, + SKU: &armcompute.DiskSKU{ + Name: to.Ptr(diskSku), }, - DiskProperties: &diskProperties, + Properties: &diskProperties, } if c.cloud.HasExtendedLocation() { - model.ExtendedLocation = &compute.ExtendedLocation{ + model.ExtendedLocation = &armcompute.ExtendedLocation{ Name: pointer.String(c.cloud.ExtendedLocationName), - Type: compute.ExtendedLocationTypes(c.cloud.ExtendedLocationType), + Type: to.Ptr(armcompute.ExtendedLocationTypes(c.cloud.ExtendedLocationType)), } } if len(createZones) > 0 { - model.Zones = &createZones + model.Zones = to.SliceOfPtrs(createZones...) } - - if rerr := c.cloud.DisksClient.CreateOrUpdate(ctx, subsID, rg, options.DiskName, model); rerr != nil { - return "", rerr.Error() + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return "", err + } + if _, err := diskClient.CreateOrUpdate(ctx, rg, options.DiskName, model); err != nil { + return "", err } diskID := fmt.Sprintf(managedDiskPath, subsID, rg, options.DiskName) @@ -310,23 +317,26 @@ func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI s } diskName := path.Base(diskURI) - disk, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - if rerr.HTTPStatusCode == http.StatusNotFound { + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return err + } + + disk, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + var respErr = &azcore.ResponseError{} + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI) return nil } - // ignore GetDisk throttling - if !rerr.IsThrottled() && !strings.Contains(rerr.RawError.Error(), consts.RateLimited) { - return rerr.Error() - } + return err } if disk.ManagedBy != nil { return fmt.Errorf("disk(%s) already attached to node(%s), could not be deleted", diskURI, *disk.ManagedBy) } - if rerr := c.cloud.DisksClient.Delete(ctx, subsID, resourceGroup, diskName); rerr != nil { - return rerr.Error() + if err = diskClient.Delete(ctx, resourceGroup, diskName); err != nil { + return err } // We don't need poll here, k8s will immediately stop referencing the disk // the disk will be eventually deleted - cleanly - by ARM @@ -338,13 +348,18 @@ func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI s // GetDisk return: disk provisionState, diskID, error func (c *ManagedDiskController) GetDisk(ctx context.Context, subsID, resourceGroup, diskName string) (string, string, error) { - result, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return "", "", rerr.Error() + diskclient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return "", "", err + } + + result, err := diskclient.Get(ctx, resourceGroup, diskName) + if err != nil { + return "", "", err } - if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil { - return *(*result.DiskProperties).ProvisioningState, *result.ID, nil + if result.Properties != nil && (*result.Properties).ProvisioningState != nil { + return *(*result.Properties).ProvisioningState, *result.ID, nil } return "", "", nil } @@ -356,13 +371,16 @@ func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, if err != nil { return oldSize, err } - - result, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return oldSize, rerr.Error() + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return oldSize, err + } + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return oldSize, err } - if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil { + if result.Properties == nil || result.Properties.DiskSizeGB == nil { return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName) } @@ -376,22 +394,22 @@ func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) // If disk already of greater or equal size than requested we return - if *result.DiskProperties.DiskSizeGB >= requestGiB { + if *result.Properties.DiskSizeGB >= requestGiB { return newSizeQuant, nil } - if !supportOnlineResize && result.DiskProperties.DiskState != compute.Unattached { - return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, pointer.StringDeref(result.ManagedBy, "")) + if !supportOnlineResize && *result.Properties.DiskState != armcompute.DiskStateUnattached { + return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", *result.Properties.DiskState, pointer.StringDeref(result.ManagedBy, "")) } - diskParameter := compute.DiskUpdate{ - DiskUpdateProperties: &compute.DiskUpdateProperties{ + diskParameter := armcompute.DiskUpdate{ + Properties: &armcompute.DiskUpdateProperties{ DiskSizeGB: &requestGiB, }, } - if rerr := c.cloud.DisksClient.Update(ctx, subsID, resourceGroup, diskName, diskParameter); rerr != nil { - return oldSize, rerr.Error() + if _, err := diskClient.Patch(ctx, resourceGroup, diskName, diskParameter); err != nil { + return oldSize, err } klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) diff --git a/pkg/azuredisk/azure_managedDiskController_test.go b/pkg/azuredisk/azure_managedDiskController_test.go index 1556221a41..6d4e722786 100644 --- a/pkg/azuredisk/azure_managedDiskController_test.go +++ b/pkg/azuredisk/azure_managedDiskController_test.go @@ -22,17 +22,18 @@ import ( "strings" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/pointer" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/provider" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) const ( @@ -57,105 +58,105 @@ func TestCreateManagedDisk(t *testing.T) { desc string diskID string diskName string - storageAccountType compute.DiskStorageAccountTypes + storageAccountType armcompute.DiskStorageAccountTypes diskIOPSReadWrite string diskMBPSReadWrite string diskEncryptionSetID string diskEncryptionType string subscriptionID string resouceGroup string - publicNetworkAccess compute.PublicNetworkAccess - networkAccessPolicy compute.NetworkAccessPolicy + publicNetworkAccess armcompute.PublicNetworkAccess + networkAccessPolicy armcompute.NetworkAccessPolicy diskAccessID *string expectedDiskID string - existedDisk compute.Disk + existedDisk *armcompute.Disk expectedErr bool expectedErrMsg error }{ { - desc: "disk Id and no error shall be returned if everything is good with UltraSSDLRS storage account", + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.UltraSSDLRS, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, diskIOPSReadWrite: "100", diskMBPSReadWrite: "100", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: disk1ID, - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: false, }, { desc: "disk Id and no error shall be returned if everything is good with PremiumV2LRS storage account", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.PremiumV2LRS, + storageAccountType: armcompute.DiskStorageAccountTypesPremiumV2LRS, diskIOPSReadWrite: "100", diskMBPSReadWrite: "100", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: disk1ID, - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: false, }, { desc: "disk Id and no error shall be returned if everything is good with PremiumV2LRS storage account", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.PremiumV2LRS, + storageAccountType: armcompute.DiskStorageAccountTypesPremiumV2LRS, diskIOPSReadWrite: "", diskMBPSReadWrite: "", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: disk1ID, - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: false, }, { - desc: "disk Id and no error shall be returned if everything is good with StandardLRS storage account", + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesStandardLRS storage account", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskIOPSReadWrite: "", diskMBPSReadWrite: "", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: disk1ID, - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: false, }, { - desc: "empty diskid and an error shall be returned if everything is good with UltraSSDLRS storage account but DiskIOPSReadWrite is invalid", + desc: "empty diskid and an error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account but DiskIOPSReadWrite is invalid", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.UltraSSDLRS, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, diskIOPSReadWrite: "invalid", diskMBPSReadWrite: "100", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: strconv.Atoi: parsing \"invalid\": invalid syntax"), }, { - desc: "empty diskid and an error shall be returned if everything is good with UltraSSDLRS storage account but DiskMBPSReadWrite is invalid", + desc: "empty diskid and an error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account but DiskMBPSReadWrite is invalid", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.UltraSSDLRS, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, diskIOPSReadWrite: "100", diskMBPSReadWrite: "invalid", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: strconv.Atoi: parsing \"invalid\": invalid syntax"), }, { - desc: "empty diskid and an error shall be returned if everything is good with UltraSSDLRS storage account with bad Disk EncryptionSetID", + desc: "empty diskid and an error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account with bad Disk EncryptionSetID", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.UltraSSDLRS, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, diskIOPSReadWrite: "100", diskMBPSReadWrite: "100", diskEncryptionSetID: badDiskEncryptionSetID, expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", badDiskEncryptionSetID, consts.DiskEncryptionSetIDFormat), }, @@ -163,37 +164,37 @@ func TestCreateManagedDisk(t *testing.T) { desc: "DiskEncryptionType should be empty when DiskEncryptionSetID is not set", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskEncryptionSetID: "", diskEncryptionType: "EncryptionAtRestWithCustomerKey", expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("AzureDisk - DiskEncryptionType(EncryptionAtRestWithCustomerKey) should be empty when DiskEncryptionSetID is not set"), }, { - desc: "disk Id and no error shall be returned if everything is good with StandardLRS storage account with not empty diskIOPSReadWrite", + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesStandardLRS storage account with not empty diskIOPSReadWrite", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskIOPSReadWrite: "100", diskMBPSReadWrite: "", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type"), }, { - desc: "disk Id and no error shall be returned if everything is good with StandardLRS storage account with not empty diskMBPSReadWrite", + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesStandardLRS storage account with not empty diskMBPSReadWrite", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskIOPSReadWrite: "", diskMBPSReadWrite: "100", diskEncryptionSetID: goodDiskEncryptionSetID, expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type"), }, @@ -201,36 +202,36 @@ func TestCreateManagedDisk(t *testing.T) { desc: "correct NetworkAccessPolicy(DenyAll) setting", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskEncryptionSetID: goodDiskEncryptionSetID, - networkAccessPolicy: compute.DenyAll, - publicNetworkAccess: compute.Disabled, + networkAccessPolicy: armcompute.NetworkAccessPolicyDenyAll, + publicNetworkAccess: armcompute.PublicNetworkAccessDisabled, expectedDiskID: disk1ID, - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: false, }, { desc: "correct NetworkAccessPolicy(AllowAll) setting", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskEncryptionSetID: goodDiskEncryptionSetID, diskEncryptionType: "EncryptionAtRestWithCustomerKey", - networkAccessPolicy: compute.AllowAll, - publicNetworkAccess: compute.Enabled, + networkAccessPolicy: armcompute.NetworkAccessPolicyAllowAll, + publicNetworkAccess: armcompute.PublicNetworkAccessEnabled, expectedDiskID: disk1ID, - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: false, }, { desc: "DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskEncryptionSetID: goodDiskEncryptionSetID, - networkAccessPolicy: compute.AllowPrivate, + networkAccessPolicy: armcompute.NetworkAccessPolicyAllowPrivate, expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate"), }, @@ -238,12 +239,12 @@ func TestCreateManagedDisk(t *testing.T) { desc: "DiskAccessID(%s) must be empty when NetworkAccessPolicy(%s) is not AllowPrivate", diskID: disk1ID, diskName: disk1Name, - storageAccountType: compute.StandardLRS, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, diskEncryptionSetID: goodDiskEncryptionSetID, - networkAccessPolicy: compute.AllowAll, + networkAccessPolicy: armcompute.NetworkAccessPolicyAllowAll, diskAccessID: pointer.String("diskAccessID"), expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("DiskAccessID(diskAccessID) must be empty when NetworkAccessPolicy(AllowAll) is not AllowPrivate"), }, @@ -254,7 +255,7 @@ func TestCreateManagedDisk(t *testing.T) { subscriptionID: "abc", resouceGroup: "", expectedDiskID: "", - existedDisk: compute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, expectedErr: true, expectedErrMsg: fmt.Errorf("resourceGroup must be specified when subscriptionID(abc) is not empty"), }, @@ -267,6 +268,7 @@ func TestCreateManagedDisk(t *testing.T) { cloud: testCloud, lockMap: newLockMap(), AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: testCloud.ComputeClientFactory, } managedDiskController := &ManagedDiskController{common} @@ -288,10 +290,11 @@ func TestCreateManagedDisk(t *testing.T) { SubscriptionID: test.subscriptionID, } - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() //disk := getTestDisk(test.diskName) - mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), test.diskName, gomock.Any()).Return(nil).AnyTimes() - mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), test.diskName).Return(test.existedDisk, nil).AnyTimes() + mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), test.diskName, gomock.Any()).Return(test.existedDisk, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Any(), test.diskName).Return(test.existedDisk, nil).AnyTimes() actualDiskID, err := managedDiskController.CreateManagedDisk(ctx, volumeOptions) assert.Equal(t, test.expectedDiskID, actualDiskID, "TestCase[%d]: %s", i, test.desc) @@ -312,16 +315,16 @@ func TestCreateManagedDiskWithExtendedLocation(t *testing.T) { testCloud := provider.GetTestCloudWithExtendedLocation(ctrl) diskName := disk1Name expectedDiskID := disk1ID - el := &compute.ExtendedLocation{ + el := &armcompute.ExtendedLocation{ Name: pointer.String("microsoftlosangeles1"), - Type: compute.ExtendedLocationTypesEdgeZone, + Type: to.Ptr(armcompute.ExtendedLocationTypesEdgeZone), } - diskreturned := compute.Disk{ + diskreturned := armcompute.Disk{ ID: pointer.String(expectedDiskID), Name: pointer.String(diskName), ExtendedLocation: el, - DiskProperties: &compute.DiskProperties{ + Properties: &armcompute.DiskProperties{ ProvisioningState: pointer.String("Succeeded"), }, } @@ -330,25 +333,27 @@ func TestCreateManagedDiskWithExtendedLocation(t *testing.T) { cloud: testCloud, lockMap: newLockMap(), AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: testCloud.ComputeClientFactory, } managedDiskController := &ManagedDiskController{common} volumeOptions := &ManagedDiskOptions{ DiskName: diskName, - StorageAccountType: compute.PremiumLRS, + StorageAccountType: armcompute.DiskStorageAccountTypes(armcompute.EdgeZoneStorageAccountTypePremiumLRS), ResourceGroup: "", SizeGB: 1, AvailabilityZone: "westus-testzone", } - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) - mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, diskName, gomock.Any()). - Do(func(ctx interface{}, subsID, rg, dn string, disk compute.Disk) { + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() + mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, diskName, gomock.Any()). + Do(func(ctx interface{}, rg, dn string, disk armcompute.Disk) { assert.Equal(t, el.Name, disk.ExtendedLocation.Name, "The extended location name should match.") assert.Equal(t, el.Type, disk.ExtendedLocation.Type, "The extended location type should match.") - }).Return(nil) + }).Return(to.Ptr(diskreturned), nil) - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, diskName).Return(diskreturned, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, diskName).Return(&diskreturned, nil).AnyTimes() actualDiskID, err := managedDiskController.CreateManagedDisk(ctx, volumeOptions) assert.Equal(t, expectedDiskID, actualDiskID, "Disk ID does not match.") @@ -363,7 +368,7 @@ func TestDeleteManagedDisk(t *testing.T) { desc string diskName string diskState string - existedDisk compute.Disk + existedDisk *armcompute.Disk expectedErr bool expectedErrMsg error }{ @@ -371,22 +376,22 @@ func TestDeleteManagedDisk(t *testing.T) { desc: "an error shall be returned if delete an attaching disk", diskName: disk1Name, diskState: "attaching", - existedDisk: compute.Disk{Name: pointer.String(disk1Name)}, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, expectedErr: true, expectedErrMsg: fmt.Errorf("failed to delete disk(/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1) since it's in attaching state"), }, { desc: "no error shall be returned if everything is good", diskName: disk1Name, - existedDisk: compute.Disk{Name: pointer.String(disk1Name)}, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, expectedErr: false, }, { desc: "an error shall be returned if get disk failed", diskName: fakeGetDiskFailed, - existedDisk: compute.Disk{Name: pointer.String(fakeGetDiskFailed)}, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeGetDiskFailed)}, expectedErr: true, - expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: Get Disk failed"), + expectedErrMsg: fmt.Errorf("Get Disk failed"), }, } ctx, cancel := context.WithCancel(context.Background()) @@ -399,6 +404,7 @@ func TestDeleteManagedDisk(t *testing.T) { cloud: testCloud, lockMap: newLockMap(), AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: testCloud.ComputeClientFactory, } managedDiskController := &ManagedDiskController{common} @@ -408,13 +414,14 @@ func TestDeleteManagedDisk(t *testing.T) { managedDiskController.diskStateMap.Store(strings.ToLower(diskURI), test.diskState) } - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() if test.diskName == fakeGetDiskFailed { - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, fmt.Errorf("Get Disk failed")).AnyTimes() } else { - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() } - mockDisksClient.EXPECT().Delete(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName).Return(nil).AnyTimes() + mockDisksClient.EXPECT().Delete(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(nil).AnyTimes() err := managedDiskController.DeleteManagedDisk(ctx, diskURI) assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) @@ -434,7 +441,7 @@ func TestGetDisk(t *testing.T) { testCases := []struct { desc string diskName string - existedDisk compute.Disk + existedDisk *armcompute.Disk expectedErr bool expectedErrMsg error expectedProvisioningState string @@ -443,7 +450,7 @@ func TestGetDisk(t *testing.T) { { desc: "no error shall be returned if get a normal disk without DiskProperties", diskName: disk1Name, - existedDisk: compute.Disk{Name: pointer.String(disk1Name)}, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, expectedErr: false, expectedProvisioningState: "", expectedDiskID: "", @@ -451,9 +458,9 @@ func TestGetDisk(t *testing.T) { { desc: "an error shall be returned if get disk failed", diskName: fakeGetDiskFailed, - existedDisk: compute.Disk{Name: pointer.String(fakeGetDiskFailed)}, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeGetDiskFailed)}, expectedErr: true, - expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: Get Disk failed"), + expectedErrMsg: fmt.Errorf("Get Disk failed"), expectedProvisioningState: "", expectedDiskID: "", }, @@ -466,13 +473,16 @@ func TestGetDisk(t *testing.T) { cloud: testCloud, lockMap: newLockMap(), DisableDiskLunCheck: true, + clientFactory: testCloud.ComputeClientFactory, }, } - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + managedDiskController.controllerCommon.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub("").Return(mockDisksClient, nil).AnyTimes() if test.diskName == fakeGetDiskFailed { - mockDisksClient.EXPECT().Get(gomock.Any(), "", testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, fmt.Errorf("Get Disk failed")).AnyTimes() } else { - mockDisksClient.EXPECT().Get(gomock.Any(), "", testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() } provisioningState, diskid, err := managedDiskController.GetDisk(ctx, "", testCloud.ResourceGroup, test.diskName) @@ -500,7 +510,7 @@ func TestResizeDisk(t *testing.T) { diskName string oldSize resource.Quantity newSize resource.Quantity - existedDisk compute.Disk + existedDisk *armcompute.Disk expectedQuantity resource.Quantity expectedErr bool expectedErrMsg error @@ -510,7 +520,7 @@ func TestResizeDisk(t *testing.T) { diskName: diskName, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), - existedDisk: compute.Disk{Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, expectedQuantity: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), expectedErr: false, }, @@ -519,7 +529,7 @@ func TestResizeDisk(t *testing.T) { diskName: diskName, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), - existedDisk: compute.Disk{Name: pointer.String(disk1Name)}, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, expectedErrMsg: fmt.Errorf("DiskProperties of disk(%s) is nil", diskName), @@ -529,7 +539,7 @@ func TestResizeDisk(t *testing.T) { diskName: diskName, oldSize: *resource.NewQuantity(1*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), - existedDisk: compute.Disk{Name: pointer.String(disk1Name), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: false, }, @@ -538,27 +548,27 @@ func TestResizeDisk(t *testing.T) { diskName: fakeGetDiskFailed, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), - existedDisk: compute.Disk{Name: pointer.String(fakeGetDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeGetDiskFailed), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, - expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: Get Disk failed"), + expectedErrMsg: fmt.Errorf("Get Disk failed"), }, { desc: "an error shall be returned if everything is good but create disk failed", diskName: fakeCreateDiskFailed, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), - existedDisk: compute.Disk{Name: pointer.String(fakeCreateDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeCreateDiskFailed), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, - expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: Create Disk failed"), + expectedErrMsg: fmt.Errorf("Create Disk failed"), }, { desc: "an error shall be returned if disk is not in Unattached state", diskName: fakeCreateDiskFailed, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), - existedDisk: compute.Disk{Name: pointer.String(fakeCreateDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Attached}}, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeCreateDiskFailed), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateAttached)}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, expectedErrMsg: fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: Attached, already attached to "), @@ -572,21 +582,23 @@ func TestResizeDisk(t *testing.T) { cloud: testCloud, lockMap: newLockMap(), DisableDiskLunCheck: true, + clientFactory: testCloud.ComputeClientFactory, }, } diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + managedDiskController.controllerCommon.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() if test.diskName == fakeGetDiskFailed { - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, fmt.Errorf("Get Disk failed")).AnyTimes() } else { - mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() } if test.diskName == fakeCreateDiskFailed { - mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(&retry.Error{RawError: fmt.Errorf("Create Disk failed")}).AnyTimes() + mockDisksClient.EXPECT().Patch(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(test.existedDisk, fmt.Errorf("Create Disk failed")).AnyTimes() } else { - mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes() + mockDisksClient.EXPECT().Patch(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(test.existedDisk, nil).AnyTimes() } result, err := managedDiskController.ResizeDisk(ctx, diskURI, test.oldSize, test.newSize, false) diff --git a/pkg/azuredisk/azuredisk.go b/pkg/azuredisk/azuredisk.go index d667496a09..c41132e24a 100644 --- a/pkg/azuredisk/azuredisk.go +++ b/pkg/azuredisk/azuredisk.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -48,6 +48,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/mounter" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" azurecloudconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -75,6 +76,7 @@ type DriverCore struct { customUserAgent string userAgentSuffix string cloud *azure.Cloud + clientFactory azclient.ClientFactory diskController *ManagedDiskController mounter *mount.SafeFormatAndMount deviceHelper optimization.Interface @@ -183,6 +185,7 @@ func newDriverV1(options *DriverOptions) *Driver { driver.diskController = NewManagedDiskController(driver.cloud) driver.diskController.DisableUpdateCache = driver.disableUpdateCache driver.diskController.AttachDetachInitialDelayInMs = int(driver.attachDetachInitialDelayInMs) + driver.clientFactory = driver.cloud.ComputeClientFactory if driver.vmType != "" { klog.V(2).Infof("override VMType(%s) in cloud config as %s", driver.cloud.VMType, driver.vmType) driver.cloud.VMType = driver.vmType @@ -326,7 +329,7 @@ func (d *Driver) isCheckDiskLunThrottled() bool { return cache != nil } -func (d *Driver) checkDiskExists(ctx context.Context, diskURI string) (*compute.Disk, error) { +func (d *Driver) checkDiskExists(ctx context.Context, diskURI string) (*armcompute.Disk, error) { diskName, err := azureutils.GetDiskName(diskURI) if err != nil { return nil, err @@ -342,17 +345,15 @@ func (d *Driver) checkDiskExists(ctx context.Context, diskURI string) (*compute. return nil, nil } subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - disk, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - if rerr.IsThrottled() || strings.Contains(rerr.RawError.Error(), consts.RateLimited) { - klog.Warningf("checkDiskExists(%s) is throttled with error: %v", diskURI, rerr.Error()) - d.throttlingCache.Set(consts.GetDiskThrottlingKey, "") - return nil, nil - } - return nil, rerr.Error() + diskClient, err := d.diskController.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, err } - - return &disk, nil + disk, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, err + } + return disk, nil } func (d *Driver) checkDiskCapacity(ctx context.Context, subsID, resourceGroup, diskName string, requestGiB int) (bool, error) { @@ -360,18 +361,16 @@ func (d *Driver) checkDiskCapacity(ctx context.Context, subsID, resourceGroup, d klog.Warningf("skip checkDiskCapacity(%s, %s) since it's still in throttling", resourceGroup, diskName) return true, nil } - - disk, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return false, err + } + disk, err := diskClient.Get(ctx, resourceGroup, diskName) // Because we can not judge the reason of the error. Maybe the disk does not exist. // So here we do not handle the error. - if rerr == nil { - if !reflect.DeepEqual(disk, compute.Disk{}) && disk.DiskSizeGB != nil && int(*disk.DiskSizeGB) != requestGiB { - return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.DiskProperties.DiskSizeGB, requestGiB) - } - } else { - if rerr.IsThrottled() || strings.Contains(rerr.RawError.Error(), consts.RateLimited) { - klog.Warningf("checkDiskCapacity(%s, %s) is throttled with error: %v", resourceGroup, diskName, rerr.Error()) - d.throttlingCache.Set(consts.GetDiskThrottlingKey, "") + if err == nil { + if !reflect.DeepEqual(disk, armcompute.Disk{}) && disk.Properties.DiskSizeGB != nil && int(*disk.Properties.DiskSizeGB) != requestGiB { + return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.Properties.DiskSizeGB, requestGiB) } } return true, nil diff --git a/pkg/azuredisk/azuredisk_test.go b/pkg/azuredisk/azuredisk_test.go index 719620b8db..a7ab924b64 100644 --- a/pkg/azuredisk/azuredisk_test.go +++ b/pkg/azuredisk/azuredisk_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/date" "github.com/stretchr/testify/assert" @@ -35,7 +36,8 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/utils/pointer" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -61,12 +63,14 @@ func TestCheckDiskCapacity(t *testing.T) { size := int32(10) diskName := "unit-test" resourceGroup := "unit-test" - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{ + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub("").Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() flag, err := d.checkDiskCapacity(context.TODO(), "", resourceGroup, diskName, 10) assert.Equal(t, flag, true) assert.Nil(t, err) diff --git a/pkg/azuredisk/azuredisk_v1_test.go b/pkg/azuredisk/azuredisk_v1_test.go index f85004ef60..c0f1c5365f 100644 --- a/pkg/azuredisk/azuredisk_v1_test.go +++ b/pkg/azuredisk/azuredisk_v1_test.go @@ -23,11 +23,12 @@ import ( "context" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" ) func TestCheckDiskCapacity_V1(t *testing.T) { @@ -37,12 +38,14 @@ func TestCheckDiskCapacity_V1(t *testing.T) { size := int32(10) diskName := "unit-test" resourceGroup := "unit-test" - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{ + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub("").Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() d.setThrottlingCache(consts.GetDiskThrottlingKey, "") flag, _ := d.checkDiskCapacity(context.TODO(), "", resourceGroup, diskName, 11) diff --git a/pkg/azuredisk/azuredisk_v2.go b/pkg/azuredisk/azuredisk_v2.go index 88b2ac3f4e..fa951ab4a5 100644 --- a/pkg/azuredisk/azuredisk_v2.go +++ b/pkg/azuredisk/azuredisk_v2.go @@ -27,7 +27,7 @@ import ( "os" "reflect" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" @@ -108,6 +108,7 @@ func newDriverV2(options *DriverOptions) *DriverV2 { driver.diskController = NewManagedDiskController(driver.cloud) driver.diskController.DisableUpdateCache = driver.disableUpdateCache driver.diskController.AttachDetachInitialDelayInMs = int(driver.attachDetachInitialDelayInMs) + driver.clientFactory = driver.cloud.ComputeClientFactory if driver.vmType != "" { klog.V(2).Infof("override VMType(%s) in cloud config as %s", driver.cloud.VMType, driver.vmType) driver.cloud.VMType = driver.vmType @@ -212,7 +213,7 @@ func (d *DriverV2) Run(ctx context.Context) error { return err } -func (d *DriverV2) checkDiskExists(ctx context.Context, diskURI string) (*compute.Disk, error) { +func (d *DriverV2) checkDiskExists(ctx context.Context, diskURI string) (*armcompute.Disk, error) { diskName, err := azureutils.GetDiskName(diskURI) if err != nil { return nil, err @@ -224,21 +225,30 @@ func (d *DriverV2) checkDiskExists(ctx context.Context, diskURI string) (*comput } subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - disk, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, rerr.Error() + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, err + } + + disk, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, err } - return &disk, nil + return disk, nil } func (d *DriverV2) checkDiskCapacity(ctx context.Context, subsID, resourceGroup, diskName string, requestGiB int) (bool, error) { - disk, err := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return false, err + } + disk, err := diskClient.Get(ctx, resourceGroup, diskName) // Because we can not judge the reason of the error. Maybe the disk does not exist. // So here we do not handle the error. if err == nil { - if !reflect.DeepEqual(disk, compute.Disk{}) && disk.DiskSizeGB != nil && int(*disk.DiskSizeGB) != requestGiB { - return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.DiskProperties.DiskSizeGB, requestGiB) + if !reflect.DeepEqual(disk, &armcompute.Disk{}) && disk.Properties != nil && disk.Properties.DiskSizeGB != nil && int(*disk.Properties.DiskSizeGB) != requestGiB { + return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.Properties.DiskSizeGB, requestGiB) } } return true, nil diff --git a/pkg/azuredisk/controllerserver.go b/pkg/azuredisk/controllerserver.go index 648c0a6c4b..d72292590f 100644 --- a/pkg/azuredisk/controllerserver.go +++ b/pkg/azuredisk/controllerserver.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/container-storage-interface/spec/lib/go/csi" @@ -133,6 +134,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) cloud: localCloud, lockMap: newLockMap(), DisableDiskLunCheck: true, + clientFactory: localCloud.ComputeClientFactory, }, } localDiskController.DisableUpdateCache = d.disableUpdateCache @@ -163,7 +165,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if _, err := azureutils.NormalizeCachingMode(diskParams.CachingMode); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - if skuName == compute.PremiumV2LRS { + if skuName == armcompute.DiskStorageAccountTypesPremiumV2LRS { // PremiumV2LRS only supports None caching mode azureutils.SetKeyValueInMap(diskParams.VolumeContext, consts.CachingModeField, string(v1.AzureDataDiskCachingNone)) } @@ -185,7 +187,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) requirement := req.GetAccessibilityRequirements() diskZone := azureutils.PickAvailabilityZone(requirement, diskParams.Location, topologyKey) accessibleTopology := []*csi.Topology{} - if skuName == compute.StandardSSDZRS || skuName == compute.PremiumZRS { + if skuName == armcompute.DiskStorageAccountTypesStandardSSDZRS || skuName == armcompute.DiskStorageAccountTypesPremiumZRS { klog.V(2).Infof("diskZone(%s) is reset as empty since disk(%s) is ZRS(%s)", diskZone, diskParams.DiskName, skuName) diskZone = "" // make volume scheduled on all 3 availability zones @@ -255,7 +257,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } } - if skuName == compute.UltraSSDLRS { + if skuName == armcompute.DiskStorageAccountTypesUltraSSDLRS { if diskParams.DiskIOPSReadWrite == "" && diskParams.DiskMBPSReadWrite == "" { // set default DiskIOPSReadWrite, DiskMBPSReadWrite per request size diskParams.DiskIOPSReadWrite = strconv.Itoa(getDefaultDiskIOPSReadWrite(requestGiB)) @@ -445,7 +447,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { return nil, status.Errorf(codes.Internal, err.Error()) } - var cachingMode compute.CachingTypes + var cachingMode armcompute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { return nil, status.Errorf(codes.Internal, err.Error()) } @@ -749,7 +751,8 @@ func (d *Driver) listVolumesInNodeResourceGroup(ctx context.Context, start, maxE // listVolumesByResourceGroup is a helper function that updates the ListVolumeResponse_Entry slice and returns number of total visited volumes, number of volumes that needs to be visited and an error if found func (d *Driver) listVolumesByResourceGroup(ctx context.Context, resourceGroup string, entries []*csi.ListVolumesResponse_Entry, start, maxEntries int, volSet map[string]bool) listVolumeStatus { - disks, derr := d.cloud.DisksClient.ListByResourceGroup(ctx, "", resourceGroup) + diskClient := d.clientFactory.GetDiskClient() + disks, derr := diskClient.List(ctx, resourceGroup) if derr != nil { return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr.Error())} } @@ -785,7 +788,7 @@ func (d *Driver) listVolumesByResourceGroup(ctx context.Context, resourceGroup s continue } // HyperVGeneration property is only setup for os disks. Only the non os disks should be included in the list - if disk.DiskProperties == nil || disk.DiskProperties.HyperVGeneration == "" { + if disk.Properties == nil || disk.Properties.HyperVGeneration == nil || *disk.Properties.HyperVGeneration == "" { nodeList := []string{} if disk.ManagedBy != nil { @@ -843,14 +846,18 @@ func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.Controller } subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get disk client for subscription(%s) with error(%v)", subsID, err) + } + result, rerr := diskClient.Get(ctx, resourceGroup, diskName) if rerr != nil { return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, rerr.Error()) } - if result.DiskProperties.DiskSizeGB == nil { + if result.Properties == nil || result.Properties.DiskSizeGB == nil { return nil, status.Errorf(codes.Internal, "could not get size of the disk(%s)", diskName) } - oldSize := *resource.NewQuantity(int64(*result.DiskProperties.DiskSizeGB), resource.BinarySI) + oldSize := *resource.NewQuantity(int64(*result.Properties.DiskSizeGB), resource.BinarySI) mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_expand_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) isOperationSucceeded := false @@ -1153,26 +1160,30 @@ func (d *Driver) GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, d if curDepth > maxDepth { return nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth)) } - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, rerr.Error() + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, err } - if result.DiskProperties == nil { + if result.Properties == nil { return nil, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup)) } - if result.DiskProperties.CreationData != nil && (*result.DiskProperties.CreationData).CreateOption == "Copy" { + if result.Properties.CreationData != nil && result.Properties.CreationData.CreateOption != nil && *result.Properties.CreationData.CreateOption == armcompute.DiskCreateOptionCopy { klog.V(2).Infof("Clone source disk has a parent source") - sourceResourceID := *result.DiskProperties.CreationData.SourceResourceID + sourceResourceID := *result.Properties.CreationData.SourceResourceID parentResourceGroup, _ := azureutils.GetResourceGroupFromURI(sourceResourceID) parentDiskName := path.Base(sourceResourceID) return d.GetSourceDiskSize(ctx, subsID, parentResourceGroup, parentDiskName, curDepth+1, maxDepth) } - if (*result.DiskProperties).DiskSizeGB == nil { + if (*result.Properties).DiskSizeGB == nil { return nil, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup)) } - return (*result.DiskProperties).DiskSizeGB, nil + return (*result.Properties).DiskSizeGB, nil } // The format of snapshot id is /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/snapshot-xxx-xxx. diff --git a/pkg/azuredisk/controllerserver_test.go b/pkg/azuredisk/controllerserver_test.go index 7aa27804fb..b18720c4a1 100644 --- a/pkg/azuredisk/controllerserver_test.go +++ b/pkg/azuredisk/controllerserver_test.go @@ -22,6 +22,8 @@ import ( "reflect" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/date" "github.com/container-storage-interface/spec/lib/go/csi" @@ -37,7 +39,8 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockkubeclient" "sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockpersistentvolume" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -294,10 +297,12 @@ func TestCreateVolume(t *testing.T) { VolumeCapabilities: createVolumeCapabilities(csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER), Parameters: mp, } - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.CreateVolume(context.Background(), req) expectedErr := status.Error(codes.InvalidArgument, "Failed parsing disk parameters: Tags 'unit-test' are invalid, the format should like: 'key1=value1,key2=value2'") if !reflect.DeepEqual(err, expectedErr) { @@ -328,16 +333,15 @@ func TestCreateVolume(t *testing.T) { Parameters: mp, VolumeContentSource: &volumecontensource, } - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("test")).AnyTimes() _, err := d.CreateVolume(context.Background(), req) - expectedErr := status.Errorf(codes.Internal, "Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test") + expectedErr := status.Errorf(codes.Internal, "test") if err.Error() != expectedErr.Error() { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -360,16 +364,15 @@ func TestCreateVolume(t *testing.T) { Parameters: mp, VolumeContentSource: &volumecontensource, } - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - rerr := &retry.Error{ - RawError: fmt.Errorf(consts.NotFound), + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(consts.NotFound)).AnyTimes() _, err := d.CreateVolume(context.Background(), req) - expectedErr := status.Error(codes.NotFound, "Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: NotFound") + expectedErr := status.Error(codes.NotFound, "NotFound") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -396,16 +399,18 @@ func TestCreateVolume(t *testing.T) { size := int32(volumehelper.BytesToGiB(req.CapacityRange.RequiredBytes)) id := fmt.Sprintf(consts.ManagedDiskPath, "subs", "rg", testVolumeName) state := "Succeeded" - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, Name: &testVolumeName, - DiskProperties: &compute.DiskProperties{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, ProvisioningState: &state, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.CreateVolume(context.Background(), req) expectedErr := error(nil) if !reflect.DeepEqual(err, expectedErr) { @@ -431,16 +436,18 @@ func TestCreateVolume(t *testing.T) { size := int32(volumehelper.BytesToGiB(req.CapacityRange.RequiredBytes)) id := fmt.Sprintf(consts.ManagedDiskPath, "subs", "rg", testVolumeName) state := "Succeeded" - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, Name: &testVolumeName, - DiskProperties: &compute.DiskProperties{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, ProvisioningState: &state, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.CreateVolume(context.Background(), req) expectedErr := error(nil) if !reflect.DeepEqual(err, expectedErr) { @@ -514,16 +521,18 @@ func TestCreateVolume(t *testing.T) { size := int32(volumehelper.BytesToGiB(req.CapacityRange.RequiredBytes)) id := fmt.Sprintf(consts.ManagedDiskPath, "subs", "rg", testVolumeName) state := "Succeeded" - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, Name: &testVolumeName, - DiskProperties: &compute.DiskProperties{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, ProvisioningState: &state, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() res, err := d.CreateVolume(context.Background(), req) assert.Equal(t, res.Volume.CapacityBytes, volumehelper.GiBToBytes(consts.PerformancePlusMinimumDiskSizeGiB)) expectedErr := error(nil) @@ -580,12 +589,13 @@ func TestDeleteVolume(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) defer cancel() id := test.req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } - - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Eq(ctx), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Delete(gomock.Eq(ctx), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().Delete(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() result, err := d.DeleteVolume(ctx, test.req) if err != nil { @@ -751,14 +761,14 @@ func TestControllerPublishVolume(t *testing.T) { VolumeCapability: volumeCap, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := status.Error(codes.InvalidArgument, "Node ID not provided") _, err := d.ControllerPublishVolume(context.Background(), req) @@ -776,14 +786,14 @@ func TestControllerPublishVolume(t *testing.T) { NodeId: nodeName, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) vm := compute.VirtualMachine{ Name: &nodeName, @@ -839,14 +849,14 @@ func TestControllerPublishVolume(t *testing.T) { NodeId: nodeName, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) vm := compute.VirtualMachine{ Name: &nodeName, @@ -902,14 +912,14 @@ func TestControllerPublishVolume(t *testing.T) { VolumeContext: volumeContext, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) vm := compute.VirtualMachine{ Name: &nodeName, @@ -1128,10 +1138,10 @@ func TestControllerExpandVolume(t *testing.T) { CapacityRange: stdCapRange, } id := req.VolumeId - diskProperties := compute.DiskProperties{} - disk := compute.Disk{ - ID: &id, - DiskProperties: &diskProperties, + diskProperties := armcompute.DiskProperties{} + disk := &armcompute.Disk{ + ID: &id, + Properties: &diskProperties, } ctx := context.Background() cntl := gomock.NewController(t) @@ -1139,10 +1149,9 @@ func TestControllerExpandVolume(t *testing.T) { d, _ := NewFakeDriver(cntl) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.setCloud(&azure.Cloud{}) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := status.Errorf(codes.Internal, "could not get size of the disk(unit-test-volume)") _, err := d.ControllerExpandVolume(ctx, req) if !reflect.DeepEqual(err, expectedErr) { @@ -1731,10 +1740,12 @@ func TestListVolumes(t *testing.T) { defer cntl.Finish() d, _ := NewFakeDriver(cntl) fakeVolumeID := "test" - disk := compute.Disk{ID: &fakeVolumeID} - disks := []compute.Disk{} + disk := &armcompute.Disk{ID: &fakeVolumeID} + disks := []*armcompute.Disk{} disks = append(disks, disk) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1755,10 +1766,12 @@ func TestListVolumes(t *testing.T) { defer cntl.Finish() d, _ := NewFakeDriver(cntl) fakeVolumeID := "test" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID}, compute.Disk{ID: &fakeVolumeID} - disks := []compute.Disk{} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID}, &armcompute.Disk{ID: &fakeVolumeID} + disks := []*armcompute.Disk{} disks = append(disks, disk1, disk2) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1783,10 +1796,12 @@ func TestListVolumes(t *testing.T) { defer cntl.Finish() d, _ := NewFakeDriver(cntl) fakeVolumeID1, fakeVolumeID12 := "test1", "test2" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID1}, compute.Disk{ID: &fakeVolumeID12} - disks := []compute.Disk{} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID1}, &armcompute.Disk{ID: &fakeVolumeID12} + disks := []*armcompute.Disk{} disks = append(disks, disk1, disk2) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1812,8 +1827,10 @@ func TestListVolumes(t *testing.T) { cntl := gomock.NewController(t) defer cntl.Finish() d, _ := NewFakeDriver(cntl) - disks := []compute.Disk{} - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + disks := []*armcompute.Disk{} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := status.Error(codes.FailedPrecondition, "ListVolumes starting token(1) on rg(rg) is greater than total number of volumes") _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1830,12 +1847,11 @@ func TestListVolumes(t *testing.T) { cntl := gomock.NewController(t) defer cntl.Finish() d, _ := NewFakeDriver(cntl) - disks := []compute.Disk{} - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, rerr).AnyTimes() - expectedErr := status.Error(codes.Internal, "ListVolumes on rg(rg) failed with error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test") + disks := []*armcompute.Disk{} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, fmt.Errorf("test")).AnyTimes() + expectedErr := status.Error(codes.Internal, "ListVolumes on rg(rg) failed with error: test") _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1846,12 +1862,16 @@ func TestListVolumes(t *testing.T) { name: "When KubeClient exists, Empty list without start token should not return error", testFunc: func(t *testing.T) { req := csi.ListVolumesRequest{} - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{}, nil) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{}, nil).AnyTimes() expectedErr := error(nil) _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1864,13 +1884,17 @@ func TestListVolumes(t *testing.T) { testFunc: func(t *testing.T) { req := csi.ListVolumesRequest{} fakeVolumeID := "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-1/providers/Microsoft.Compute/disks/test-pv-1" - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{volume1}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - disk1 := compute.Disk{ID: &fakeVolumeID} - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk1}, nil) + disk1 := &armcompute.Disk{ID: &fakeVolumeID} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk1}, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1887,16 +1911,20 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ MaxEntries: 1, } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) d.getCloud().SubscriptionID = "test-subscription" fakeVolumeID := "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-1/providers/Microsoft.Compute/disks/test-pv-1" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID}, compute.Disk{ID: &fakeVolumeID} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID}, &armcompute.Disk{ID: &fakeVolumeID} pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{volume1, volume2}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk1}, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk2}, nil) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk1}, nil).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk2}, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1917,16 +1945,20 @@ func TestListVolumes(t *testing.T) { StartingToken: "1", MaxEntries: 1, } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) d.getCloud().SubscriptionID = "test-subscription" pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{volume1, volume2}, } fakeVolumeID11, fakeVolumeID12 := "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-1/providers/Microsoft.Compute/disks/test-pv-1", "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-2/providers/Microsoft.Compute/disks/test-pv-2" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID11}, compute.Disk{ID: &fakeVolumeID12} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID11}, &armcompute.Disk{ID: &fakeVolumeID12} d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk1}, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk2}, nil) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk1}, nil).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk2}, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1938,8 +1970,8 @@ func TestListVolumes(t *testing.T) { if listVolumesResponse.NextToken != "" { t.Errorf("actualNextToken: (%v), expectedNextToken: (%v)", listVolumesResponse.NextToken, "") } - if listVolumesResponse.Entries[0].Volume.VolumeId != fakeVolumeID12 { - t.Errorf("actualVolumeId: (%v), expectedVolumeId: (%v)", listVolumesResponse.Entries[0].Volume.VolumeId, fakeVolumeID12) + if listVolumesResponse.Entries[0].Volume.VolumeId != fakeVolumeID11 { + t.Errorf("actualVolumeId: (%v), expectedVolumeId: (%v)", listVolumesResponse.Entries[0].Volume.VolumeId, fakeVolumeID11) } }, }, @@ -1949,12 +1981,16 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ StartingToken: "1", } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) expectedErr := status.Error(codes.FailedPrecondition, "ListVolumes starting token(1) is greater than total number of disks") + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1967,10 +2003,14 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ StartingToken: "1", } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) rerr := fmt.Errorf("test") d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(nil, rerr) expectedErr := status.Error(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: test") + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -2046,10 +2086,12 @@ func TestValidateVolumeCapabilities(t *testing.T) { cntl := gomock.NewController(t) defer cntl.Finish() d, _ := NewFakeDriver(cntl) - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := error(nil) _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -2075,10 +2117,12 @@ func TestValidateVolumeCapabilities(t *testing.T) { cntl := gomock.NewController(t) defer cntl.Finish() d, _ := NewFakeDriver(cntl) - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := error(nil) _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -2116,8 +2160,11 @@ func TestGetSourceDiskSize(t *testing.T) { cntl := gomock.NewController(t) defer cntl.Finish() d, _ := NewFakeDriver(cntl) - disk := compute.Disk{} - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + disk := &armcompute.Disk{} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) expectedErr := status.Error(codes.Internal, "DiskProperty not found for disk (test-disk) in resource group (test-rg)") if !reflect.DeepEqual(err, expectedErr) { @@ -2131,11 +2178,13 @@ func TestGetSourceDiskSize(t *testing.T) { cntl := gomock.NewController(t) defer cntl.Finish() d, _ := NewFakeDriver(cntl) - diskProperties := compute.DiskProperties{} - disk := compute.Disk{ - DiskProperties: &diskProperties, + diskProperties := armcompute.DiskProperties{} + disk := &armcompute.Disk{ + Properties: &diskProperties, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) expectedErr := status.Error(codes.Internal, "DiskSizeGB for disk (test-disk) in resourcegroup (test-rg) is nil") if !reflect.DeepEqual(err, expectedErr) { @@ -2150,13 +2199,15 @@ func TestGetSourceDiskSize(t *testing.T) { defer cntl.Finish() d, _ := NewFakeDriver(cntl) diskSizeGB := int32(8) - diskProperties := compute.DiskProperties{ + diskProperties := armcompute.DiskProperties{ DiskSizeGB: &diskSizeGB, } - disk := compute.Disk{ - DiskProperties: &diskProperties, + disk := &armcompute.Disk{ + Properties: &diskProperties, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() size, _ := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) expectedOutput := diskSizeGB if *size != expectedOutput { @@ -2173,24 +2224,26 @@ func TestGetSourceDiskSize(t *testing.T) { diskSizeGB1 := int32(16) diskSizeGB2 := int32(8) sourceURI := "/subscriptions/xxxxxxxx/resourcegroups/test-rg/providers/microsoft.compute/disks/test-disk-1" - creationData := compute.CreationData{ - CreateOption: "Copy", + creationData := armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceURI: &sourceURI, } - diskProperties1 := compute.DiskProperties{ + diskProperties1 := armcompute.DiskProperties{ CreationData: &creationData, DiskSizeGB: &diskSizeGB1, } - diskProperties2 := compute.DiskProperties{ + diskProperties2 := armcompute.DiskProperties{ DiskSizeGB: &diskSizeGB2, } - disk1 := compute.Disk{ - DiskProperties: &diskProperties1, + disk1 := &armcompute.Disk{ + Properties: &diskProperties1, } - disk2 := compute.Disk{ - DiskProperties: &diskProperties2, + disk2 := &armcompute.Disk{ + Properties: &diskProperties2, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk1, nil).Return(disk2, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk1, nil).Return(disk2, nil).AnyTimes() size, _ := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk-1", 0, 2) expectedOutput := diskSizeGB2 if *size != expectedOutput { @@ -2204,12 +2257,10 @@ func TestGetSourceDiskSize(t *testing.T) { } } -func getFakeDriverWithKubeClient(t *testing.T) FakeDriver { - cntl := gomock.NewController(t) - defer cntl.Finish() - d, _ := NewFakeDriver(cntl) - ctrl := gomock.NewController(t) - defer ctrl.Finish() +func getFakeDriverWithKubeClient(ctrl *gomock.Controller) FakeDriver { + + d, _ := NewFakeDriver(ctrl) + corev1 := mockcorev1.NewMockInterface(ctrl) persistentvolume := mockpersistentvolume.NewMockInterface(ctrl) d.getCloud().KubeClient = mockkubeclient.NewMockInterface(ctrl) diff --git a/pkg/azuredisk/controllerserver_v2.go b/pkg/azuredisk/controllerserver_v2.go index a7457b4d93..94aa016bce 100644 --- a/pkg/azuredisk/controllerserver_v2.go +++ b/pkg/azuredisk/controllerserver_v2.go @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/container-storage-interface/spec/lib/go/csi" @@ -128,7 +129,7 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques if _, err := azureutils.NormalizeCachingMode(diskParams.CachingMode); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - if skuName == compute.PremiumV2LRS { + if skuName == armcompute.DiskStorageAccountTypesPremiumV2LRS { // PremiumV2LRS only supports None caching mode azureutils.SetKeyValueInMap(diskParams.VolumeContext, consts.CachingModeField, string(v1.AzureDataDiskCachingNone)) } @@ -372,7 +373,7 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { return nil, status.Errorf(codes.Internal, err.Error()) } - var cachingMode compute.CachingTypes + var cachingMode armcompute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { return nil, status.Errorf(codes.Internal, err.Error()) } @@ -622,9 +623,10 @@ func (d *DriverV2) listVolumesInNodeResourceGroup(ctx context.Context, start, ma // listVolumesByResourceGroup is a helper function that updates the ListVolumeResponse_Entry slice and returns number of total visited volumes, number of volumes that needs to be visited and an error if found func (d *DriverV2) listVolumesByResourceGroup(ctx context.Context, resourceGroup string, entries []*csi.ListVolumesResponse_Entry, start, maxEntries int, volSet map[string]bool) listVolumeStatus { - disks, derr := d.cloud.DisksClient.ListByResourceGroup(ctx, "", resourceGroup) + diskClient := d.clientFactory.GetDiskClient() + disks, derr := diskClient.List(ctx, resourceGroup) if derr != nil { - return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr.Error())} + return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %s", resourceGroup, derr.Error())} } // if volSet is initialized but is empty, return if volSet != nil && len(volSet) == 0 { @@ -658,7 +660,7 @@ func (d *DriverV2) listVolumesByResourceGroup(ctx context.Context, resourceGroup continue } // HyperVGeneration property is only setup for os disks. Only the non os disks should be included in the list - if disk.DiskProperties == nil || disk.DiskProperties.HyperVGeneration == "" { + if disk.Properties == nil || disk.Properties.HyperVGeneration == nil || *disk.Properties.HyperVGeneration == "" { nodeList := []string{} if disk.ManagedBy != nil { @@ -722,14 +724,18 @@ func (d *DriverV2) ControllerExpandVolume(ctx context.Context, req *csi.Controll }() subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, rerr.Error()) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get disk client for subscription(%s) with error(%v)", subsID, err) } - if result.DiskProperties.DiskSizeGB == nil { + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, err) + } + if result.Properties.DiskSizeGB == nil { return nil, status.Errorf(codes.Internal, "could not get size of the disk(%s)", diskName) } - oldSize := *resource.NewQuantity(int64(*result.DiskProperties.DiskSizeGB), resource.BinarySI) + oldSize := *resource.NewQuantity(int64(*result.Properties.DiskSizeGB), resource.BinarySI) klog.V(2).Infof("begin to expand azure disk(%s) with new size(%d)", diskURI, requestSize.Value()) newSize, err := d.diskController.ResizeDisk(ctx, diskURI, oldSize, requestSize, d.enableDiskOnlineResize) @@ -952,26 +958,30 @@ func (d *DriverV2) GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, if curDepth > maxDepth { return nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth)) } - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, rerr.Error() + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, err } - if result.DiskProperties == nil { + if result.Properties == nil { return nil, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup)) } - if result.DiskProperties.CreationData != nil && (*result.DiskProperties.CreationData).CreateOption == "Copy" { + if result.Properties.CreationData != nil && result.Properties.CreationData.CreateOption != nil && *result.Properties.CreationData.CreateOption == armcompute.DiskCreateOptionCopy { klog.V(2).Infof("Clone source disk has a parent source") - sourceResourceID := *result.DiskProperties.CreationData.SourceResourceID + sourceResourceID := *result.Properties.CreationData.SourceResourceID parentResourceGroup, _ := azureutils.GetResourceGroupFromURI(sourceResourceID) parentDiskName := path.Base(sourceResourceID) return d.GetSourceDiskSize(ctx, subsID, parentResourceGroup, parentDiskName, curDepth+1, maxDepth) } - if (*result.DiskProperties).DiskSizeGB == nil { + if (*result.Properties).DiskSizeGB == nil { return nil, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup)) } - return (*result.DiskProperties).DiskSizeGB, nil + return (*result.Properties).DiskSizeGB, nil } // The format of snapshot id is /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/snapshot-xxx-xxx. diff --git a/pkg/azuredisk/fake_azuredisk.go b/pkg/azuredisk/fake_azuredisk.go index b1599292c7..8d17a334a3 100644 --- a/pkg/azuredisk/fake_azuredisk.go +++ b/pkg/azuredisk/fake_azuredisk.go @@ -20,7 +20,7 @@ import ( "context" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "go.uber.org/mock/gomock" "k8s.io/apimachinery/pkg/types" @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization/mockoptimization" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) @@ -78,6 +79,7 @@ type FakeDriver interface { setVersion(version string) getCloud() *azure.Cloud setCloud(*azure.Cloud) + getClientFactory() azclient.ClientFactory getMounter() *mount.SafeFormatAndMount setMounter(*mount.SafeFormatAndMount) setPerfOptimizationEnabled(bool) @@ -85,7 +87,7 @@ type FakeDriver interface { getHostUtil() hostUtil checkDiskCapacity(context.Context, string, string, string, int) (bool, error) - checkDiskExists(ctx context.Context, diskURI string) (*compute.Disk, error) + checkDiskExists(ctx context.Context, diskURI string) (*armcompute.Disk, error) getSnapshotInfo(string) (string, string, string, error) waitForSnapshotReady(context.Context, string, string, string, time.Duration, time.Duration) error getSnapshotByID(context.Context, string, string, string, string) (*csi.Snapshot, error) @@ -121,6 +123,7 @@ func newFakeDriverV1(ctrl *gomock.Controller) (*fakeDriverV1, error) { driver.cloud = azure.GetTestCloud(ctrl) driver.diskController = NewManagedDiskController(driver.cloud) + driver.clientFactory = driver.cloud.ComputeClientFactory mounter, err := mounter.NewSafeMounter(driver.enableWindowsHostProcess, driver.useCSIProxyGAInterface) if err != nil { @@ -165,6 +168,9 @@ func (d *fakeDriverV1) setNextCommandOutputScripts(scripts ...testingexec.FakeAc func (d *fakeDriverV1) setThrottlingCache(key string, value string) { d.throttlingCache.Set(key, value) } +func (d *fakeDriverV1) getClientFactory() azclient.ClientFactory { + return d.clientFactory +} func createVolumeCapabilities(accessMode csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability { return []*csi.VolumeCapability{ diff --git a/pkg/azuredisk/fake_azuredisk_v2.go b/pkg/azuredisk/fake_azuredisk_v2.go index 2bcfc4ff56..4facb5a421 100644 --- a/pkg/azuredisk/fake_azuredisk_v2.go +++ b/pkg/azuredisk/fake_azuredisk_v2.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/mounter" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization/mockoptimization" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) @@ -71,6 +72,7 @@ func newFakeDriverV2(ctrl *gomock.Controller) (*fakeDriverV2, error) { driver.cloud = azure.GetTestCloud(ctrl) driver.diskController = NewManagedDiskController(driver.cloud) + driver.clientFactory = driver.cloud.ComputeClientFactory mounter, err := mounter.NewSafeMounter(driver.enableWindowsHostProcess, driver.useCSIProxyGAInterface) if err != nil { @@ -104,6 +106,9 @@ func newFakeDriverV2(ctrl *gomock.Controller) (*fakeDriverV2, error) { func (d *fakeDriverV2) setNextCommandOutputScripts(scripts ...testingexec.FakeAction) { d.mounter.Exec.(*mounter.FakeSafeMounter).SetNextCommandOutputScripts(scripts...) } +func (d *fakeDriverV2) getClientFactory() azclient.ClientFactory { + return d.clientFactory +} func (d *DriverV2) setThrottlingCache(key string, value string) { } diff --git a/pkg/azureutils/azure_disk_utils.go b/pkg/azureutils/azure_disk_utils.go index 13c7e608bd..908f4a035c 100644 --- a/pkg/azureutils/azure_disk_utils.go +++ b/pkg/azureutils/azure_disk_utils.go @@ -29,7 +29,8 @@ import ( "time" "unicode" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -52,8 +53,8 @@ import ( const ( azurePublicCloud = "AZUREPUBLICCLOUD" azureStackCloud = "AZURESTACKCLOUD" - azurePublicCloudDefaultStorageAccountType = compute.StandardSSDLRS - azureStackCloudDefaultStorageAccountType = compute.StandardLRS + azurePublicCloudDefaultStorageAccountType = armcompute.DiskStorageAccountTypesStandardSSDLRS + azureStackCloudDefaultStorageAccountType = armcompute.DiskStorageAccountTypesStandardLRS defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingReadOnly // default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#uri-parameters @@ -132,7 +133,7 @@ type ManagedDiskParameters struct { Zoned string } -func GetCachingMode(attributes map[string]string) (compute.CachingTypes, error) { +func GetCachingMode(attributes map[string]string) (armcompute.CachingTypes, error) { var ( cachingMode v1.AzureDataDiskCachingMode err error @@ -146,7 +147,7 @@ func GetCachingMode(attributes map[string]string) (compute.CachingTypes, error) } cachingMode, err = NormalizeCachingMode(cachingMode) - return compute.CachingTypes(cachingMode), err + return armcompute.CachingTypes(cachingMode), err } // GetAttachDiskInitialDelay gttachDiskInitialDelay from attributes @@ -353,10 +354,10 @@ func GetSubscriptionIDFromURI(diskURI string) string { return "" } -func GetValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (compute.CreationData, error) { +func GetValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (armcompute.CreationData, error) { if sourceResourceID == "" { - return compute.CreationData{ - CreateOption: compute.Empty, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, nil } @@ -371,21 +372,21 @@ func GetValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc sourceResourceID = fmt.Sprintf(consts.ManagedDiskPath, subscriptionID, resourceGroup, sourceResourceID) } default: - return compute.CreationData{ - CreateOption: compute.Empty, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, nil } splits := strings.Split(sourceResourceID, "/") if len(splits) > 9 { if sourceType == consts.SourceSnapshot { - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) } - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, consts.ManagedDiskPathRE) + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, consts.ManagedDiskPathRE) } - return compute.CreationData{ - CreateOption: compute.Copy, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceID, }, nil } @@ -484,33 +485,33 @@ func NormalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureData return cachingMode, nil } -func NormalizeNetworkAccessPolicy(networkAccessPolicy string) (compute.NetworkAccessPolicy, error) { +func NormalizeNetworkAccessPolicy(networkAccessPolicy string) (armcompute.NetworkAccessPolicy, error) { if networkAccessPolicy == "" { - return compute.NetworkAccessPolicy(networkAccessPolicy), nil + return armcompute.NetworkAccessPolicy(networkAccessPolicy), nil } - policy := compute.NetworkAccessPolicy(networkAccessPolicy) - for _, s := range compute.PossibleNetworkAccessPolicyValues() { + policy := armcompute.NetworkAccessPolicy(networkAccessPolicy) + for _, s := range armcompute.PossibleNetworkAccessPolicyValues() { if policy == s { return policy, nil } } - return "", fmt.Errorf("azureDisk - %s is not supported NetworkAccessPolicy. Supported values are %s", networkAccessPolicy, compute.PossibleNetworkAccessPolicyValues()) + return "", fmt.Errorf("azureDisk - %s is not supported NetworkAccessPolicy. Supported values are %s", networkAccessPolicy, armcompute.PossibleNetworkAccessPolicyValues()) } -func NormalizePublicNetworkAccess(publicNetworkAccess string) (compute.PublicNetworkAccess, error) { +func NormalizePublicNetworkAccess(publicNetworkAccess string) (armcompute.PublicNetworkAccess, error) { if publicNetworkAccess == "" { - return compute.PublicNetworkAccess(publicNetworkAccess), nil + return armcompute.PublicNetworkAccess(publicNetworkAccess), nil } - access := compute.PublicNetworkAccess(publicNetworkAccess) - for _, s := range compute.PossiblePublicNetworkAccessValues() { + access := armcompute.PublicNetworkAccess(publicNetworkAccess) + for _, s := range armcompute.PossiblePublicNetworkAccessValues() { if access == s { return access, nil } } - return "", fmt.Errorf("azureDisk - %s is not supported PublicNetworkAccess. Supported values are %s", publicNetworkAccess, compute.PossiblePublicNetworkAccessValues()) + return "", fmt.Errorf("azureDisk - %s is not supported PublicNetworkAccess. Supported values are %s", publicNetworkAccess, armcompute.PossiblePublicNetworkAccessValues()) } -func NormalizeStorageAccountType(storageAccountType, cloud string, disableAzureStackCloud bool) (compute.DiskStorageAccountTypes, error) { +func NormalizeStorageAccountType(storageAccountType, cloud string, disableAzureStackCloud bool) (armcompute.DiskStorageAccountTypes, error) { if storageAccountType == "" { if IsAzureStackCloud(cloud, disableAzureStackCloud) { return azureStackCloudDefaultStorageAccountType, nil @@ -518,10 +519,10 @@ func NormalizeStorageAccountType(storageAccountType, cloud string, disableAzureS return azurePublicCloudDefaultStorageAccountType, nil } - sku := compute.DiskStorageAccountTypes(storageAccountType) - supportedSkuNames := compute.PossibleDiskStorageAccountTypesValues() + sku := armcompute.DiskStorageAccountTypes(storageAccountType) + supportedSkuNames := armcompute.PossibleDiskStorageAccountTypesValues() if IsAzureStackCloud(cloud, disableAzureStackCloud) { - supportedSkuNames = []compute.DiskStorageAccountTypes{compute.StandardLRS, compute.PremiumLRS} + supportedSkuNames = []armcompute.DiskStorageAccountTypes{armcompute.DiskStorageAccountTypesStandardLRS, armcompute.DiskStorageAccountTypesPremiumLRS} } for _, s := range supportedSkuNames { if sku == s { @@ -536,7 +537,7 @@ func ValidateDiskEncryptionType(encryptionType string) error { if encryptionType == "" { return nil } - supportedTypes := compute.PossibleEncryptionTypeValues() + supportedTypes := armcompute.PossibleEncryptionTypeValues() for _, s := range supportedTypes { if encryptionType == string(s) { return nil @@ -549,7 +550,7 @@ func ValidateDataAccessAuthMode(dataAccessAuthMode string) error { if dataAccessAuthMode == "" { return nil } - supportedModes := compute.PossibleDataAccessAuthModeValues() + supportedModes := armcompute.PossibleDataAccessAuthModeValues() for _, s := range supportedModes { if dataAccessAuthMode == string(s) { return nil @@ -669,9 +670,9 @@ func ParseDiskParameters(parameters map[string]string) (ManagedDiskParameters, e } } - if strings.EqualFold(diskParams.AccountType, string(compute.PremiumV2LRS)) { + if strings.EqualFold(diskParams.AccountType, string(armcompute.DiskStorageAccountTypesPremiumV2LRS)) { if diskParams.CachingMode != "" && !strings.EqualFold(string(diskParams.CachingMode), string(v1.AzureDataDiskCachingNone)) { - return diskParams, fmt.Errorf("cachingMode %s is not supported for %s", diskParams.CachingMode, compute.PremiumV2LRS) + return diskParams, fmt.Errorf("cachingMode %s is not supported for %s", diskParams.CachingMode, armcompute.DiskStorageAccountTypesPremiumV2LRS) } } @@ -725,18 +726,18 @@ func checkDiskName(diskName string) bool { return true } -// InsertDiskProperties: insert disk properties to map -func InsertDiskProperties(disk *compute.Disk, publishConext map[string]string) { +// InsertProperties: insert disk properties to map +func InsertDiskProperties(disk *armcompute.Disk, publishConext map[string]string) { if disk == nil || publishConext == nil { return } - if disk.Sku != nil { - publishConext[consts.SkuNameField] = string(disk.Sku.Name) + if disk.SKU != nil { + publishConext[consts.SkuNameField] = string(*disk.SKU.Name) } - prop := disk.DiskProperties + prop := disk.Properties if prop != nil { - publishConext[consts.NetworkAccessPolicyField] = string(prop.NetworkAccessPolicy) + publishConext[consts.NetworkAccessPolicyField] = string(*prop.NetworkAccessPolicy) if prop.DiskIOPSReadWrite != nil { publishConext[consts.DiskIOPSReadWriteField] = strconv.Itoa(int(*prop.DiskIOPSReadWrite)) } diff --git a/pkg/azureutils/azure_disk_utils_test.go b/pkg/azureutils/azure_disk_utils_test.go index ca184b2e33..5716160a56 100644 --- a/pkg/azureutils/azure_disk_utils_test.go +++ b/pkg/azureutils/azure_disk_utils_test.go @@ -27,6 +27,8 @@ import ( "testing" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" @@ -131,42 +133,42 @@ func TestCheckDiskName(t *testing.T) { func TestGetCachingMode(t *testing.T) { tests := []struct { options map[string]string - expectedCachingMode compute.CachingTypes + expectedCachingMode armcompute.CachingTypes expectedError bool }{ { nil, - compute.CachingTypes(defaultAzureDataDiskCachingMode), + armcompute.CachingTypes(defaultAzureDataDiskCachingMode), false, }, { map[string]string{}, - compute.CachingTypes(defaultAzureDataDiskCachingMode), + armcompute.CachingTypes(defaultAzureDataDiskCachingMode), false, }, { map[string]string{consts.CachingModeField: ""}, - compute.CachingTypes(defaultAzureDataDiskCachingMode), + armcompute.CachingTypes(defaultAzureDataDiskCachingMode), false, }, { map[string]string{consts.CachingModeField: "None"}, - compute.CachingTypes("None"), + armcompute.CachingTypes("None"), false, }, { map[string]string{consts.CachingModeField: "ReadOnly"}, - compute.CachingTypes("ReadOnly"), + armcompute.CachingTypes("ReadOnly"), false, }, { map[string]string{consts.CachingModeField: "ReadWrite"}, - compute.CachingTypes("ReadWrite"), + armcompute.CachingTypes("ReadWrite"), false, }, { map[string]string{consts.CachingModeField: "WriteOnly"}, - compute.CachingTypes(""), + armcompute.CachingTypes(""), true, }, } @@ -622,7 +624,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup string sourceResourceID string sourceType string - expected1 compute.CreationData + expected1 armcompute.CreationData expected2 error }{ { @@ -630,8 +632,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "", sourceType: "", - expected1: compute.CreationData{ - CreateOption: compute.Empty, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, expected2: nil, }, @@ -640,8 +642,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceSnapshotID, }, expected2: nil, @@ -651,8 +653,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "xxx", sourceResourceID: "xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceSnapshotID, }, expected2: nil, @@ -662,7 +664,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/23/providers/Microsoft.Compute/disks/name", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/23/providers/Microsoft.Compute/disks/name", diskSnapshotPathRE), }, { @@ -670,7 +672,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "http://test.com/vhds/name", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots/http://test.com/vhds/name", diskSnapshotPathRE), }, { @@ -678,7 +680,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/snapshots/xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/xxx/snapshots/xxx", diskSnapshotPathRE), }, { @@ -686,7 +688,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", diskSnapshotPathRE), }, { @@ -694,8 +696,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "xxx", sourceType: "", - expected1: compute.CreationData{ - CreateOption: compute.Empty, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, expected2: nil, }, @@ -704,8 +706,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx", sourceType: consts.SourceVolume, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceVolumeID, }, expected2: nil, @@ -715,8 +717,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "xxx", sourceResourceID: "xxx", sourceType: consts.SourceVolume, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceVolumeID, }, expected2: nil, @@ -726,7 +728,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", sourceType: consts.SourceVolume, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/disks//subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", consts.ManagedDiskPathRE), }, } @@ -1194,37 +1196,37 @@ func TestValidateDataAccessAuthMode(t *testing.T) { func TestNormalizeNetworkAccessPolicy(t *testing.T) { tests := []struct { networkAccessPolicy string - expectedNetworkAccessPolicy compute.NetworkAccessPolicy + expectedNetworkAccessPolicy armcompute.NetworkAccessPolicy expectError bool }{ { networkAccessPolicy: "", - expectedNetworkAccessPolicy: compute.NetworkAccessPolicy(""), + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicy(""), expectError: false, }, { networkAccessPolicy: "AllowAll", - expectedNetworkAccessPolicy: compute.AllowAll, + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicyAllowAll, expectError: false, }, { networkAccessPolicy: "DenyAll", - expectedNetworkAccessPolicy: compute.DenyAll, + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicyDenyAll, expectError: false, }, { networkAccessPolicy: "AllowPrivate", - expectedNetworkAccessPolicy: compute.AllowPrivate, + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicyAllowPrivate, expectError: false, }, { networkAccessPolicy: "allowAll", - expectedNetworkAccessPolicy: compute.NetworkAccessPolicy(""), + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicy(""), expectError: true, }, { networkAccessPolicy: "invalid", - expectedNetworkAccessPolicy: compute.NetworkAccessPolicy(""), + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicy(""), expectError: true, }, } @@ -1239,32 +1241,32 @@ func TestNormalizeNetworkAccessPolicy(t *testing.T) { func TestNormalizePublicNetworkAccess(t *testing.T) { tests := []struct { publicNetworkAccess string - expectedPublicNetworkAccess compute.PublicNetworkAccess + expectedPublicNetworkAccess armcompute.PublicNetworkAccess expectError bool }{ { publicNetworkAccess: "", - expectedPublicNetworkAccess: compute.PublicNetworkAccess(""), + expectedPublicNetworkAccess: armcompute.PublicNetworkAccess(""), expectError: false, }, { publicNetworkAccess: "Enabled", - expectedPublicNetworkAccess: compute.Enabled, + expectedPublicNetworkAccess: armcompute.PublicNetworkAccessEnabled, expectError: false, }, { publicNetworkAccess: "Disabled", - expectedPublicNetworkAccess: compute.Disabled, + expectedPublicNetworkAccess: armcompute.PublicNetworkAccessDisabled, expectError: false, }, { publicNetworkAccess: "enabled", - expectedPublicNetworkAccess: compute.PublicNetworkAccess(""), + expectedPublicNetworkAccess: armcompute.PublicNetworkAccess(""), expectError: true, }, { publicNetworkAccess: "disabled", - expectedPublicNetworkAccess: compute.PublicNetworkAccess(""), + expectedPublicNetworkAccess: armcompute.PublicNetworkAccess(""), expectError: true, }, } @@ -1281,21 +1283,21 @@ func TestNormalizeStorageAccountType(t *testing.T) { cloud string storageAccountType string disableAzureStackCloud bool - expectedAccountType compute.DiskStorageAccountTypes + expectedAccountType armcompute.DiskStorageAccountTypes expectError bool }{ { cloud: azurePublicCloud, storageAccountType: "", disableAzureStackCloud: false, - expectedAccountType: compute.StandardSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardSSDLRS, expectError: false, }, { cloud: azureStackCloud, storageAccountType: "", disableAzureStackCloud: false, - expectedAccountType: compute.StandardLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardLRS, expectError: false, }, { @@ -1309,28 +1311,28 @@ func TestNormalizeStorageAccountType(t *testing.T) { cloud: azurePublicCloud, storageAccountType: "Standard_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.StandardLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardLRS, expectError: false, }, { cloud: azurePublicCloud, storageAccountType: "Premium_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.PremiumLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesPremiumLRS, expectError: false, }, { cloud: azurePublicCloud, storageAccountType: "StandardSSD_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.StandardSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardSSDLRS, expectError: false, }, { cloud: azurePublicCloud, storageAccountType: "UltraSSD_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.UltraSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, expectError: false, }, { @@ -1344,7 +1346,7 @@ func TestNormalizeStorageAccountType(t *testing.T) { cloud: azureStackCloud, storageAccountType: "UltraSSD_LRS", disableAzureStackCloud: true, - expectedAccountType: compute.UltraSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, expectError: false, }, } @@ -1677,7 +1679,7 @@ func createTestFile(path string) error { func TestInsertDiskProperties(t *testing.T) { tests := []struct { desc string - disk *compute.Disk + disk *armcompute.Disk inputMap map[string]string expectedMap map[string]string }{ @@ -1686,30 +1688,30 @@ func TestInsertDiskProperties(t *testing.T) { }, { desc: "empty", - disk: &compute.Disk{}, + disk: &armcompute.Disk{}, inputMap: map[string]string{}, expectedMap: map[string]string{}, }, { desc: "skuName", - disk: &compute.Disk{ - Sku: &compute.DiskSku{Name: compute.PremiumLRS}, + disk: &armcompute.Disk{ + SKU: &armcompute.DiskSKU{Name: to.Ptr(armcompute.DiskStorageAccountTypesPremiumLRS)}, }, inputMap: map[string]string{}, expectedMap: map[string]string{"skuname": string(compute.PremiumLRS)}, }, { desc: "DiskProperties", - disk: &compute.Disk{ - Sku: &compute.DiskSku{Name: compute.StandardSSDLRS}, - DiskProperties: &compute.DiskProperties{ - NetworkAccessPolicy: compute.AllowPrivate, + disk: &armcompute.Disk{ + SKU: &armcompute.DiskSKU{Name: to.Ptr(armcompute.DiskStorageAccountTypesStandardSSDLRS)}, + Properties: &armcompute.DiskProperties{ + NetworkAccessPolicy: to.Ptr(armcompute.NetworkAccessPolicyAllowPrivate), DiskIOPSReadWrite: pointer.Int64(6400), DiskMBpsReadWrite: pointer.Int64(100), - CreationData: &compute.CreationData{ + CreationData: &armcompute.CreationData{ LogicalSectorSize: pointer.Int32(512), }, - Encryption: &compute.Encryption{DiskEncryptionSetID: pointer.String("/subs/DiskEncryptionSetID")}, + Encryption: &armcompute.Encryption{DiskEncryptionSetID: pointer.String("/subs/DiskEncryptionSetID")}, MaxShares: pointer.Int32(3), }, }, diff --git a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go index a80fb599c5..47543cb2f8 100644 --- a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -96,7 +97,7 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client if err != nil { return false, fmt.Errorf("Error getting disk for azuredisk %v", err) } - if *disktest.Properties.DiskState == compute.DiskStateUnattached { + if *disktest.Properties.DiskState == armcompute.DiskStateUnattached { return true, nil } ginkgo.By(fmt.Sprintf("current disk state(%v) is not in unattached state, wait and recheck", *disktest.Properties.DiskState)) diff --git a/test/utils/azure/azure_helpers.go b/test/utils/azure/azure_helpers.go index f4137a860f..a105a00d40 100644 --- a/test/utils/azure/azure_helpers.go +++ b/test/utils/azure/azure_helpers.go @@ -23,6 +23,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" network "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" @@ -80,7 +81,7 @@ func (az *Client) GetAzureDisksClient() (diskclient.Interface, error) { } func (az *Client) EnsureSSHPublicKey(ctx context.Context, resourceGroupName, location, keyName string) (publicKey string, err error) { - _, err = az.sshPublicKeysClient.Create(ctx, resourceGroupName, keyName, compute.SSHPublicKeyResource{Location: &location}) + _, err = az.sshPublicKeysClient.Create(ctx, resourceGroupName, keyName, armcompute.SSHPublicKeyResource{Location: &location}) if err != nil { return "", err } @@ -134,12 +135,12 @@ func (az *Client) DeleteResourceGroup(ctx context.Context, groupName string) err func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, vmName string) (compute.VirtualMachine, error) { nic, err := az.EnsureNIC(ctx, groupName, location, vmName+"-nic", vmName+"-vnet", vmName+"-subnet") if err != nil { - return compute.VirtualMachine{}, err + return armcompute.VirtualMachine{}, err } publicKey, err := az.EnsureSSHPublicKey(ctx, groupName, location, "test-key") if err != nil { - return compute.VirtualMachine{}, err + return armcompute.VirtualMachine{}, err } resp, err := az.vmClient.CreateOrUpdate( @@ -148,25 +149,25 @@ func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, vmName, compute.VirtualMachine{ Location: to.Ptr(location), - Properties: &compute.VirtualMachineProperties{ - HardwareProfile: &compute.HardwareProfile{ + Properties: &armcompute.VirtualMachineProperties{ + HardwareProfile: &armcompute.HardwareProfile{ VMSize: to.Ptr(compute.VirtualMachineSizeTypesStandardDS2V2), }, - StorageProfile: &compute.StorageProfile{ - ImageReference: &compute.ImageReference{ + StorageProfile: &armcompute.StorageProfile{ + ImageReference: &armcompute.ImageReference{ Publisher: to.Ptr("Canonical"), Offer: to.Ptr("UbuntuServer"), SKU: to.Ptr("16.04.0-LTS"), Version: to.Ptr("latest"), }, }, - OSProfile: &compute.OSProfile{ + OSProfile: &armcompute.OSProfile{ ComputerName: to.Ptr(vmName), AdminUsername: to.Ptr("azureuser"), AdminPassword: to.Ptr("Azureuser1234"), - LinuxConfiguration: &compute.LinuxConfiguration{ + LinuxConfiguration: &armcompute.LinuxConfiguration{ DisablePasswordAuthentication: to.Ptr(true), - SSH: &compute.SSHConfiguration{ + SSH: &armcompute.SSHConfiguration{ PublicKeys: []*compute.SSHPublicKey{ { Path: to.Ptr("/home/azureuser/.ssh/authorized_keys"), @@ -176,11 +177,11 @@ func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, }, }, }, - NetworkProfile: &compute.NetworkProfile{ + NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*compute.NetworkInterfaceReference{ { ID: nic.ID, - Properties: &compute.NetworkInterfaceReferenceProperties{ + Properties: &armcompute.NetworkInterfaceReferenceProperties{ Primary: to.Ptr(true), }, }, @@ -190,7 +191,7 @@ func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, }, ) if err != nil { - return compute.VirtualMachine{}, fmt.Errorf("cannot create vm: %v", err) + return armcompute.VirtualMachine{}, fmt.Errorf("cannot create vm: %v", err) } return *resp, nil diff --git a/vendor/modules.txt b/vendor/modules.txt index 8b95cd5585..ecc2eb1ce0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1571,7 +1571,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c +# sigs.k8s.io/cloud-provider-azure v1.29.0 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient @@ -1625,7 +1625,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/retry sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient @@ -1634,6 +1634,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient @@ -1665,7 +1666,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go new file mode 100644 index 0000000000..e8d8af8340 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go @@ -0,0 +1,45 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diskclient + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" +) + +const PatchOperationName = "DisksClient.Patch" + +func (client *Client) Patch(ctx context.Context, resourceGroupName string, resourceName string, parameters armcompute.DiskUpdate) (result *armcompute.Disk, err error) { + ctx = utils.ContextWithClientName(ctx, "DisksClient") + ctx = utils.ContextWithRequestMethod(ctx, "Patch") + ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) + ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) + resp, err := utils.NewPollerWrapper(client.DisksClient.BeginUpdate(ctx, resourceGroupName, resourceName, parameters, nil)).WaitforPollerResp(ctx) + if err != nil { + return nil, err + } + if resp != nil { + return &resp.Disk, nil + } + return nil, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go index af3d0a9032..9b9f85e10f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go @@ -18,6 +18,8 @@ limitations under the License. package diskclient import ( + "context" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -29,4 +31,5 @@ type Interface interface { utils.CreateOrUpdateFunc[armcompute.Disk] utils.DeleteFunc[armcompute.Disk] utils.ListFunc[armcompute.Disk] + Patch(ctx context.Context, resourceGroupName string, resourceName string, parameters armcompute.DiskUpdate) (result *armcompute.Disk, err error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient/interface.go new file mode 100644 index 0000000000..c0050b159b --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient/interface.go @@ -0,0 +1,131 @@ +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ + +// Code generated by MockGen. DO NOT EDIT. +// Source: diskclient/interface.go +// +// Generated by this command: +// +// mockgen -package mock_diskclient -source diskclient/interface.go +// + +// Package mock_diskclient is a generated GoMock package. +package mock_diskclient + +import ( + context "context" + reflect "reflect" + + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + gomock "go.uber.org/mock/gomock" +) + +// MockInterface is a mock of Interface interface. +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface. +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance. +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, resourceName string, resourceParam armcompute.Disk) (*armcompute.Disk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, resourceName, resourceParam) + ret0, _ := ret[0].(*armcompute.Disk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, resourceName, resourceParam any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, resourceName, resourceParam) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, resourceName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, resourceName) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, resourceName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, resourceName) +} + +// Get mocks base method. +func (m *MockInterface) Get(ctx context.Context, resourceGroupName, resourceName string) (*armcompute.Disk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, resourceName) + ret0, _ := ret[0].(*armcompute.Disk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, resourceName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, resourceName) +} + +// List mocks base method. +func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]*armcompute.Disk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) + ret0, _ := ret[0].([]*armcompute.Disk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List. +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) +} + +// Patch mocks base method. +func (m *MockInterface) Patch(ctx context.Context, resourceGroupName, resourceName string, parameters armcompute.DiskUpdate) (*armcompute.Disk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Patch", ctx, resourceGroupName, resourceName, parameters) + ret0, _ := ret[0].(*armcompute.Disk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Patch indicates an expected call of Patch. +func (mr *MockInterfaceMockRecorder) Patch(ctx, resourceGroupName, resourceName, parameters any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockInterface)(nil).Patch), ctx, resourceGroupName, resourceName, parameters) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go index d2b159e579..f51b85dc8c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go @@ -79,6 +79,7 @@ type ClientFactory interface { GetSecretClient() secretclient.Interface GetSecurityGroupClient() securitygroupclient.Interface GetSnapshotClient() snapshotclient.Interface + GetSnapshotClientForSub(subscriptionID string) (snapshotclient.Interface, error) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface GetSubnetClient() subnetclient.Interface GetVaultClient() vaultclient.Interface diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go index c1aed81d6e..1c2f36ee19 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go @@ -86,7 +86,7 @@ type ClientFactoryImpl struct { routetableclientInterface routetableclient.Interface secretclientInterface secretclient.Interface securitygroupclientInterface securitygroupclient.Interface - snapshotclientInterface snapshotclient.Interface + snapshotclientInterface sync.Map sshpublickeyresourceclientInterface sshpublickeyresourceclient.Interface subnetclientInterface subnetclient.Interface vaultclientInterface vaultclient.Interface @@ -247,7 +247,7 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c } //initialize snapshotclient - factory.snapshotclientInterface, err = factory.createSnapshotClient(config.SubscriptionID) + _, err = factory.GetSnapshotClientForSub(config.SubscriptionID) if err != nil { return nil, err } @@ -888,7 +888,24 @@ func (factory *ClientFactoryImpl) createSnapshotClient(subscription string) (sna } func (factory *ClientFactoryImpl) GetSnapshotClient() snapshotclient.Interface { - return factory.snapshotclientInterface + clientImp, _ := factory.snapshotclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(snapshotclient.Interface) +} +func (factory *ClientFactoryImpl) GetSnapshotClientForSub(subscriptionID string) (snapshotclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.snapshotclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(snapshotclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createSnapshotClient(subscriptionID) + if err != nil { + return nil, err + } + factory.snapshotclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(snapshotclient.Interface), nil } func (factory *ClientFactoryImpl) createSSHPublicKeyResourceClient(subscription string) (sshpublickeyresourceclient.Interface, error) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go index 763b5b3389..8e0445440f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go @@ -482,6 +482,21 @@ func (mr *MockClientFactoryMockRecorder) GetSnapshotClient() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClient", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClient)) } +// GetSnapshotClientForSub mocks base method. +func (m *MockClientFactory) GetSnapshotClientForSub(arg0 string) (snapshotclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnapshotClientForSub", arg0) + ret0, _ := ret[0].(snapshotclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSnapshotClientForSub indicates an expected call of GetSnapshotClientForSub. +func (mr *MockClientFactoryMockRecorder) GetSnapshotClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClientForSub), arg0) +} + // GetSubnetClient mocks base method. func (m *MockClientFactory) GetSubnetClient() subnetclient.Interface { m.ctrl.T.Helper() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go index 1cae1fb807..b6452e6c31 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit +// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit,crossSubFactory=true type Interface interface { utils.GetFunc[armcompute.Snapshot] utils.CreateOrUpdateFunc[armcompute.Snapshot] diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index f983ea0774..28fc17a9e7 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -1471,7 +1471,7 @@ func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) sets.Set[string defer az.multipleStandardLoadBalancersActiveNodesLock.Unlock() for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { - if strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { return multiSLBConfig.ActiveNodes } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index 2e5bdbc752..48b0989d95 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -22,6 +22,7 @@ import ( "net/http" "strings" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -269,7 +270,7 @@ func (as *availabilitySet) updateCache(nodeName string, vm *compute.VirtualMachi } // GetDataDisks gets a list of data disks attached to the node. -func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vm, err := as.getVirtualMachine(nodeName, crt) if err != nil { return nil, nil, err @@ -279,5 +280,9 @@ func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.Azu return nil, nil, nil } - return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil + result, err := ToArmcomputeDisk(*vm.StorageProfile.DataDisks) + if err != nil { + return nil, nil, err + } + return result, vm.ProvisioningState, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 2ea827a61a..064961a473 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -22,6 +22,7 @@ import ( "net/http" "strings" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -286,7 +287,7 @@ func (ss *ScaleSet) UpdateVMAsync(ctx context.Context, nodeName types.NodeName) } // GetDataDisks gets a list of data disks attached to the node. -func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vm, err := ss.getVmssVM(string(nodeName), crt) if err != nil { return nil, nil, err @@ -298,8 +299,11 @@ func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCache if storageProfile == nil || storageProfile.DataDisks == nil { return nil, nil, nil } - - return *storageProfile.DataDisks, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil + result, err := ToArmcomputeDisk(*storageProfile.DataDisks) + if err != nil { + return nil, nil, err + } + return result, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil } return nil, nil, nil diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go index b55c0ca31f..6352928875 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go @@ -23,6 +23,7 @@ import ( "strings" "sync" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" "k8s.io/apimachinery/pkg/types" @@ -297,7 +298,7 @@ func (fs *FlexScaleSet) updateCache(nodeName string, vm *compute.VirtualMachine) } // GetDataDisks gets a list of data disks attached to the node. -func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vm, err := fs.getVmssFlexVM(string(nodeName), crt) if err != nil { return nil, nil, err @@ -306,6 +307,9 @@ func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureC if vm.StorageProfile.DataDisks == nil { return nil, nil, nil } - - return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil + result, err := ToArmcomputeDisk(*vm.StorageProfile.DataDisks) + if err != nil { + return nil, nil, err + } + return result, vm.ProvisioningState, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index 7f2fb523be..1649ee008d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -394,10 +394,10 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust return false } - lbName := strings.TrimSuffix(currLBName, consts.InternalLoadBalancerNameSuffix) + lbName := trimSuffixIgnoreCase(currLBName, consts.InternalLoadBalancerNameSuffix) // change the LB from vmSet dedicated to primary if the vmSet becomes the primary one if strings.EqualFold(lbName, vmSetName) { - if lbName != clusterName && + if !strings.EqualFold(lbName, clusterName) && strings.EqualFold(az.VMSet.GetPrimaryVMSetName(), vmSetName) { klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one", service.Name, currLBName, clusterName) return true @@ -593,7 +593,7 @@ func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vm // Remove corresponding nodes in ActiveNodes and nodesWithCorrectLoadBalancerByPrimaryVMSet. for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold( - strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), + trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name, ) { if az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes != nil { @@ -1597,7 +1597,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( lbName, ruleName, svcName, ) for i := range az.MultipleStandardLoadBalancerConfigurations { - if strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { + if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() @@ -1966,7 +1966,7 @@ func (az *Cloud) accommodateNodesByPrimaryVMSet( multiSLBConfig := az.MultipleStandardLoadBalancerConfigurations[i] if strings.EqualFold(multiSLBConfig.PrimaryVMSet, vmSetName) { foundPrimaryLB := isLBInList(lbs, multiSLBConfig.Name) - if !foundPrimaryLB && !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if !foundPrimaryLB && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { klog.V(4).Infof("accommodateNodesByPrimaryVMSet: node(%s) should be on lb(%s) because of primary vmSet (%s), but the lb is not found and will not be created this time, will ignore the primaryVMSet", node.Name, multiSLBConfig.Name, vmSetName) continue } @@ -2018,7 +2018,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( if nodeSelector.Matches(labels.Set(node.Labels)) { klog.V(4).Infof("accommodateNodesByNodeSelector: lb(%s) matches node(%s) labels", multiSLBConfig.Name, node.Name) found := isLBInList(lbs, multiSLBConfig.Name) - if !found && !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if !found && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { klog.V(4).Infof("accommodateNodesByNodeSelector: but the lb is not found and will not be created this time, will ignore this load balancer") continue } @@ -2039,7 +2039,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( for i := len(eligibleLBsIDX) - 1; i >= 0; i-- { multiSLBConfig := az.MultipleStandardLoadBalancerConfigurations[eligibleLBsIDX[i]] found := isLBInList(lbs, multiSLBConfig.Name) - if !found && !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if !found && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { klog.V(4).Infof("accommodateNodesByNodeSelector: the load balancer %s is a valid placement target for node %s, but the lb is not found and will not be created this time, ignore this load balancer", multiSLBConfig.Name, node.Name) eligibleLBsIDX = append(eligibleLBsIDX[:i], eligibleLBsIDX[i+1:]...) } @@ -2088,7 +2088,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( func isLBInList(lbs *[]network.LoadBalancer, lbConfigName string) bool { if lbs != nil { for _, lb := range *lbs { - if strings.EqualFold(strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), lbConfigName) { + if strings.EqualFold(trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), lbConfigName) { return true } } @@ -2135,7 +2135,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerBackendNodes( } func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb bool, svcName, lbName string) { - lbName = strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix) + lbName = trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix) for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold(lbName, az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() @@ -3793,7 +3793,7 @@ func getMostEligibleLBForService( var found bool if existingLBs != nil { for _, existingLB := range *existingLBs { - if strings.EqualFold(pointer.StringDeref(existingLB.Name, ""), eligibleLB) { + if strings.EqualFold(trimSuffixIgnoreCase(pointer.StringDeref(existingLB.Name, ""), consts.InternalLoadBalancerNameSuffix), eligibleLB) { found = true break } @@ -3810,7 +3810,7 @@ func getMostEligibleLBForService( ruleCount := 301 if existingLBs != nil { for _, existingLB := range *existingLBs { - if StringInSlice(pointer.StringDeref(existingLB.Name, ""), eligibleLBs) { + if StringInSlice(trimSuffixIgnoreCase(pointer.StringDeref(existingLB.Name, ""), consts.InternalLoadBalancerNameSuffix), eligibleLBs) { if existingLB.LoadBalancerPropertiesFormat != nil && existingLB.LoadBalancingRules != nil { if len(*existingLB.LoadBalancingRules) < ruleCount { @@ -3826,7 +3826,7 @@ func getMostEligibleLBForService( klog.V(4).Infof("getMostEligibleLBForService: choose %s with fewest %d rules", expectedLBName, ruleCount) } - return expectedLBName + return trimSuffixIgnoreCase(expectedLBName, consts.InternalLoadBalancerNameSuffix) } func (az *Cloud) getServiceCurrentLoadBalancerName(service *v1.Service) string { @@ -3844,9 +3844,8 @@ func (az *Cloud) getServiceCurrentLoadBalancerName(service *v1.Service) string { // 2. AllowServicePlacement flag. Default to true, if set to false, the new services will not be put onto the LB. // But the existing services that is using the LB will not be affected. // 3. ServiceLabelSelector. The service will be put onto the LB only if the service has the labels specified in the selector. -// If there is no ServiceLabel selector on the LB, all services can be valid. // 4. ServiceNamespaceSelector. The service will be put onto the LB only if the service is in the namespaces specified in the selector. -// If there is no ServiceNamespace selector on the LB, all services can be valid. +// 5. If there is no label/namespace selector on the LB, it can be a valid placement target if and only if the service has no other choice. func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]string, error) { var ( eligibleLBs []MultipleStandardLoadBalancerConfiguration @@ -3857,6 +3856,10 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri lbFailedPlacementFlag []string ) + logger := klog.Background(). + WithName("getEligibleLoadBalancersForService"). + WithValues("service", service.Name) + // 1. Service selects LBs defined in the annotation. // If there is no annotation given, it selects all LBs. lbsFromAnnotation := consts.GetLoadBalancerConfigurationsNames(service) @@ -3864,7 +3867,8 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri lbNamesSet := sets.New[string](lbsFromAnnotation...) for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { if lbNamesSet.Has(strings.ToLower(multiSLBConfig.Name)) { - klog.V(4).Infof("getEligibleLoadBalancersForService: service %q selects load balancer %q by annotation", service.Name, multiSLBConfig.Name) + logger.V(4).Info("selects the load balancer by annotation", + "load balancer configuration name", multiSLBConfig.Name) eligibleLBs = append(eligibleLBs, multiSLBConfig) lbSelectedByAnnotation = append(lbSelectedByAnnotation, multiSLBConfig.Name) } @@ -3873,13 +3877,14 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri return nil, fmt.Errorf("service %q selects %d load balancers by annotation, but none of them is defined in cloud provider configuration", service.Name, len(lbsFromAnnotation)) } } else { - klog.V(4).Infof("getEligibleLoadBalancersForService: service %q does not select any load balancer by annotation, all load balancers are eligible", service.Name) + logger.V(4).Info("the service does not select any load balancer by annotation, all load balancers are eligible") eligibleLBs = append(eligibleLBs, az.MultipleStandardLoadBalancerConfigurations...) for _, eligibleLB := range eligibleLBs { lbSelectedByAnnotation = append(lbSelectedByAnnotation, eligibleLB.Name) } } + var selectorMatched bool for i := len(eligibleLBs) - 1; i >= 0; i-- { eligibleLB := eligibleLBs[i] @@ -3887,9 +3892,11 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri // unless the service is already using the LB. if !pointer.BoolDeref(eligibleLB.AllowServicePlacement, true) { if az.isLoadBalancerInUseByService(service, eligibleLB) { - klog.V(4).Infof("getEligibleLoadBalancersForService: although load balancer %q has AllowServicePlacement=false, service %q is allowed to be placed on load balancer %q because it is using the load balancer", eligibleLB.Name, service.Name, eligibleLB.Name) + logger.V(4).Info("although the load balancer has AllowServicePlacement=false, service is allowed to be placed on load balancer because it is using the load balancer", + "load balancer configuration name", eligibleLB.Name) } else { - klog.V(4).Infof("getEligibleLoadBalancersForService: service %q is not allowed to be placed on load balancer %q", service.Name, eligibleLB.Name) + logger.V(4).Info("the load balancer has AllowServicePlacement=false, service is not allowed to be placed on load balancer", + "load balancer configuration name", eligibleLB.Name) eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) lbFailedPlacementFlag = append(lbFailedPlacementFlag, eligibleLB.Name) continue @@ -3901,15 +3908,23 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri if eligibleLB.ServiceLabelSelector != nil { serviceLabelSelector, err := metav1.LabelSelectorAsSelector(eligibleLB.ServiceLabelSelector) if err != nil { - klog.Errorf("Failed to parse label selector %q for load balancer %q: %s", eligibleLB.ServiceLabelSelector.String(), eligibleLB.Name, err.Error()) + logger.Error(err, "failed to parse label selector", + "label selector", eligibleLB.ServiceLabelSelector.String(), + "load balancer configuration name", eligibleLB.Name) return []string{}, err } if !serviceLabelSelector.Matches(labels.Set(service.Labels)) { - klog.V(2).Infof("getEligibleLoadBalancersForService: service %q does not match label selector %q for load balancer %q", service.Name, eligibleLB.ServiceLabelSelector.String(), eligibleLB.Name) + logger.V(2).Info("service does not match the label selector", + "label selector", eligibleLB.ServiceLabelSelector.String(), + "load balancer configuration name", eligibleLB.Name) eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) lbFailedLabelSelector = append(lbFailedLabelSelector, eligibleLB.Name) continue } + logger.V(4).Info("service matches the label selector", + "label selector", eligibleLB.ServiceLabelSelector.String(), + "load balancer configuration name", eligibleLB.Name) + selectorMatched = true } // 4. Check the service namespace selector. The service can be migrated from one LB to another LB @@ -3917,20 +3932,32 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri if eligibleLB.ServiceNamespaceSelector != nil { serviceNamespaceSelector, err := metav1.LabelSelectorAsSelector(eligibleLB.ServiceNamespaceSelector) if err != nil { - klog.Errorf("Failed to parse namespace selector %q for load balancer %q: %s", eligibleLB.ServiceNamespaceSelector.String(), eligibleLB.Name, err.Error()) + logger.Error(err, "failed to parse namespace selector", + "namespace selector", eligibleLB.ServiceNamespaceSelector.String(), + "load balancer configuration name", eligibleLB.Name) return []string{}, err } ns, err := az.KubeClient.CoreV1().Namespaces().Get(context.Background(), service.Namespace, metav1.GetOptions{}) if err != nil { - klog.Errorf("Failed to get namespace %q for load balancer %q: %s", service.Namespace, eligibleLB.Name, err.Error()) + logger.Error(err, "failed to get namespace", + "namespace", service.Namespace, + "load balancer configuration name", eligibleLB.Name) return []string{}, err } if !serviceNamespaceSelector.Matches(labels.Set(ns.Labels)) { - klog.V(2).Infof("getEligibleLoadBalancersForService: namespace %q does not match namespace selector %q for load balancer %q", service.Namespace, eligibleLB.ServiceNamespaceSelector.String(), eligibleLB.Name) + logger.V(2).Info("namespace does not match the namespace selector", + "namespace", service.Namespace, + "namespace selector", eligibleLB.ServiceNamespaceSelector.String(), + "load balancer configuration name", eligibleLB.Name) eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) lbFailedNamespaceSelector = append(lbFailedNamespaceSelector, eligibleLB.Name) continue } + logger.V(4).Info("namespace matches the namespace selector", + "namespace", service.Namespace, + "namespace selector", eligibleLB.ServiceNamespaceSelector.String(), + "load balancer configuration name", eligibleLB.Name) + selectorMatched = true } } @@ -3950,6 +3977,19 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri ) } + if selectorMatched { + for i := len(eligibleLBs) - 1; i >= 0; i-- { + eligibleLB := eligibleLBs[i] + if eligibleLB.ServiceLabelSelector == nil && eligibleLB.ServiceNamespaceSelector == nil { + logger.V(6).Info("service matches at least one label/namespace selector of the load balancer, so it should not be placed on the load balancer that does not have any label/namespace selector", + "load balancer configuration name", eligibleLB.Name) + eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) + } + } + } else { + logger.V(4).Info("no load balancer that has label/namespace selector matches the service, so the service can be placed on the load balancers that do not have label/namespace selector") + } + for _, eligibleLB := range eligibleLBs { eligibleLBNames = append(eligibleLBNames, eligibleLB.Name) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go index 83ac584872..c34b4738da 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go @@ -110,18 +110,18 @@ func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterNa } for agentPoolVMSetName := range agentPoolVMSetNamesMap { - managedLBNames.Insert(az.mapVMSetNameToLoadBalancerName(agentPoolVMSetName, clusterName)) + managedLBNames.Insert(strings.ToLower(az.mapVMSetNameToLoadBalancerName(agentPoolVMSetName, clusterName))) } } if az.useMultipleStandardLoadBalancers() { for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { - managedLBNames.Insert(multiSLBConfig.Name, fmt.Sprintf("%s%s", multiSLBConfig.Name, consts.InternalLoadBalancerNameSuffix)) + managedLBNames.Insert(strings.ToLower(multiSLBConfig.Name), fmt.Sprintf("%s%s", strings.ToLower(multiSLBConfig.Name), consts.InternalLoadBalancerNameSuffix)) } } for _, lb := range allLBs { - if managedLBNames.Has(strings.ToLower(strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix))) { + if managedLBNames.Has(trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix)) { managedLBs = append(managedLBs, lb) klog.V(4).Infof("ListManagedLBs: found managed LB %s", pointer.StringDeref(lb.Name, "")) } @@ -372,7 +372,7 @@ func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []strin } newLBName := matches[1] - newLBNameTrimmed := strings.TrimSuffix(newLBName, consts.InternalLoadBalancerNameSuffix) + newLBNameTrimmed := trimSuffixIgnoreCase(newLBName, consts.InternalLoadBalancerNameSuffix) for _, backendPool := range existingBackendPools { matches := backendPoolIDRE.FindStringSubmatch(backendPool) if len(matches) != 2 { @@ -380,7 +380,7 @@ func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []strin } lbName := matches[1] - if !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) { + if !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) { return false, lbName, nil } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go index 833fb38bc9..40daae0d74 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go @@ -1,30 +1,40 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: azure_vmsets.go +// +// Generated by this command: +// +// mockgen -package provider -source azure_vmsets.go -self_package sigs.k8s.io/cloud-provider-azure/pkg/provider -copyright_file ../../hack/boilerplate/boilerplate.generatego.txt +// + +// Package provider is a generated GoMock package. package provider import ( context "context" reflect "reflect" + v5 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" azure "github.com/Azure/go-autorest/autorest/azure" gomock "go.uber.org/mock/gomock" - v1 "k8s.io/api/core/v1" types "k8s.io/apimachinery/pkg/types" cloud_provider "k8s.io/cloud-provider" @@ -65,7 +75,7 @@ func (m *MockVMSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis } // AttachDisk indicates an expected call of AttachDisk. -func (mr *MockVMSetMockRecorder) AttachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) AttachDisk(ctx, nodeName, diskMap any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDisk", reflect.TypeOf((*MockVMSet)(nil).AttachDisk), ctx, nodeName, diskMap) } @@ -79,7 +89,7 @@ func (m *MockVMSet) DeleteCacheForNode(nodeName string) error { } // DeleteCacheForNode indicates an expected call of DeleteCacheForNode. -func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCacheForNode", reflect.TypeOf((*MockVMSet)(nil).DeleteCacheForNode), nodeName) } @@ -93,7 +103,7 @@ func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis } // DetachDisk indicates an expected call of DetachDisk. -func (mr *MockVMSetMockRecorder) DetachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) DetachDisk(ctx, nodeName, diskMap any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachDisk", reflect.TypeOf((*MockVMSet)(nil).DetachDisk), ctx, nodeName, diskMap) } @@ -108,7 +118,7 @@ func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolIDs } // EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted. -func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolIDs, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolIDs, vmSetName, backendAddressPools, deleteFromVMSet any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolIDs, vmSetName, backendAddressPools, deleteFromVMSet) } @@ -122,7 +132,7 @@ func (m *MockVMSet) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string] } // EnsureBackendPoolDeletedFromVMSets indicates an expected call of EnsureBackendPoolDeletedFromVMSets. -func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolIDs interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeletedFromVMSets", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeletedFromVMSets), vmSetNamesMap, backendPoolIDs) } @@ -140,7 +150,7 @@ func (m *MockVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam } // EnsureHostInPool indicates an expected call of EnsureHostInPool. -func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostInPool), service, nodeName, backendPoolID, vmSetName) } @@ -154,7 +164,7 @@ func (m *MockVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac } // EnsureHostsInPool indicates an expected call of EnsureHostsInPool. -func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName) } @@ -169,23 +179,23 @@ func (m *MockVMSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) } // GetAgentPoolVMSetNames indicates an expected call of GetAgentPoolVMSetNames. -func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAgentPoolVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetAgentPoolVMSetNames), nodes) } // GetDataDisks mocks base method. -func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]*v5.DataDisk, *string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDataDisks", nodeName, crt) - ret0, _ := ret[0].([]compute.DataDisk) + ret0, _ := ret[0].([]*v5.DataDisk) ret1, _ := ret[1].(*string) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // GetDataDisks indicates an expected call of GetDataDisks. -func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataDisks", reflect.TypeOf((*MockVMSet)(nil).GetDataDisks), nodeName, crt) } @@ -201,7 +211,7 @@ func (m *MockVMSet) GetIPByNodeName(name string) (string, string, error) { } // GetIPByNodeName indicates an expected call of GetIPByNodeName. -func (mr *MockVMSetMockRecorder) GetIPByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetIPByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetIPByNodeName), name) } @@ -216,7 +226,7 @@ func (m *MockVMSet) GetInstanceIDByNodeName(name string) (string, error) { } // GetInstanceIDByNodeName indicates an expected call of GetInstanceIDByNodeName. -func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIDByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceIDByNodeName), name) } @@ -231,7 +241,7 @@ func (m *MockVMSet) GetInstanceTypeByNodeName(name string) (string, error) { } // GetInstanceTypeByNodeName indicates an expected call of GetInstanceTypeByNodeName. -func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceTypeByNodeName), name) } @@ -247,7 +257,7 @@ func (m *MockVMSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, e } // GetNodeCIDRMasksByProviderID indicates an expected call of GetNodeCIDRMasksByProviderID. -func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeCIDRMasksByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeCIDRMasksByProviderID), providerID) } @@ -263,7 +273,7 @@ func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (st } // GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID. -func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByIPConfigurationID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByIPConfigurationID), ipConfigurationID) } @@ -278,7 +288,7 @@ func (m *MockVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, } // GetNodeNameByProviderID indicates an expected call of GetNodeNameByProviderID. -func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByProviderID), providerID) } @@ -293,7 +303,7 @@ func (m *MockVMSet) GetNodeVMSetName(node *v1.Node) (string, error) { } // GetNodeVMSetName indicates an expected call of GetNodeVMSetName. -func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetNodeVMSetName), node) } @@ -308,7 +318,7 @@ func (m *MockVMSet) GetPowerStatusByNodeName(name string) (string, error) { } // GetPowerStatusByNodeName indicates an expected call of GetPowerStatusByNodeName. -func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerStatusByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPowerStatusByNodeName), name) } @@ -323,7 +333,7 @@ func (m *MockVMSet) GetPrimaryInterface(nodeName string) (network.Interface, err } // GetPrimaryInterface indicates an expected call of GetPrimaryInterface. -func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryInterface", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryInterface), nodeName) } @@ -352,7 +362,7 @@ func (m *MockVMSet) GetPrivateIPsByNodeName(name string) ([]string, error) { } // GetPrivateIPsByNodeName indicates an expected call of GetPrivateIPsByNodeName. -func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateIPsByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPrivateIPsByNodeName), name) } @@ -367,7 +377,7 @@ func (m *MockVMSet) GetProvisioningStateByNodeName(name string) (string, error) } // GetProvisioningStateByNodeName indicates an expected call of GetProvisioningStateByNodeName. -func (mr *MockVMSetMockRecorder) GetProvisioningStateByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetProvisioningStateByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisioningStateByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetProvisioningStateByNodeName), name) } @@ -382,7 +392,7 @@ func (m *MockVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]str } // GetVMSetNames indicates an expected call of GetVMSetNames. -func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetVMSetNames), service, nodes) } @@ -397,7 +407,7 @@ func (m *MockVMSet) GetZoneByNodeName(name string) (cloud_provider.Zone, error) } // GetZoneByNodeName indicates an expected call of GetZoneByNodeName. -func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetZoneByNodeName), name) } @@ -411,7 +421,7 @@ func (m *MockVMSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error } // UpdateVM indicates an expected call of UpdateVM. -func (mr *MockVMSetMockRecorder) UpdateVM(ctx, nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) UpdateVM(ctx, nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVM", reflect.TypeOf((*MockVMSet)(nil).UpdateVM), ctx, nodeName) } @@ -426,7 +436,7 @@ func (m *MockVMSet) UpdateVMAsync(ctx context.Context, nodeName types.NodeName) } // UpdateVMAsync indicates an expected call of UpdateVMAsync. -func (mr *MockVMSetMockRecorder) UpdateVMAsync(ctx, nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) UpdateVMAsync(ctx, nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVMAsync", reflect.TypeOf((*MockVMSet)(nil).UpdateVMAsync), ctx, nodeName) } @@ -440,7 +450,7 @@ func (m *MockVMSet) WaitForUpdateResult(ctx context.Context, future *azure.Futur } // WaitForUpdateResult indicates an expected call of WaitForUpdateResult. -func (mr *MockVMSetMockRecorder) WaitForUpdateResult(ctx, future, nodeName, source interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) WaitForUpdateResult(ctx, future, nodeName, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockVMSet)(nil).WaitForUpdateResult), ctx, future, nodeName, source) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index ca2a1bdaea..7024356521 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -121,7 +121,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string { } func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) { - vmSetName = strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix) + vmSetName = trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix) if strings.EqualFold(clusterName, vmSetName) { vmSetName = az.VMSet.GetPrimaryVMSetName() } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index 912f2d88ce..89b74d42d7 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -18,11 +18,14 @@ package provider import ( "context" + "encoding/json" "fmt" "net" "strings" "sync" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" @@ -566,3 +569,36 @@ func getResourceGroupAndNameFromNICID(ipConfigurationID string) (string, string, func isInternalLoadBalancer(lb *network.LoadBalancer) bool { return strings.HasSuffix(strings.ToLower(*lb.Name), consts.InternalLoadBalancerNameSuffix) } + +// trimSuffixIgnoreCase trims the suffix from the string, case-insensitive. +// It returns the original string if the suffix is not found. +// The returning string is in lower case. +func trimSuffixIgnoreCase(str, suf string) string { + str = strings.ToLower(str) + suf = strings.ToLower(suf) + if strings.HasSuffix(str, suf) { + return strings.TrimSuffix(str, suf) + } + return str +} + +// ToArmcomputeDisk converts compute.DataDisk to armcompute.DataDisk +// This is a workaround during track2 migration. +// TODO: remove this function after compute api is migrated to track2 +func ToArmcomputeDisk(disks []compute.DataDisk) ([]*armcompute.DataDisk, error) { + var result []*armcompute.DataDisk + for _, disk := range disks { + content, err := json.Marshal(disk) + if err != nil { + return nil, err + } + var dataDisk armcompute.DataDisk + err = json.Unmarshal(content, &dataDisk) + if err != nil { + return nil, err + } + result = append(result, &dataDisk) + } + + return result, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go index c40a96f300..0ac8ae1919 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go @@ -19,6 +19,7 @@ package provider import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" "github.com/Azure/go-autorest/autorest/azure" @@ -83,7 +84,7 @@ type VMSet interface { WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error // GetDataDisks gets a list of data disks attached to the node. - GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) + GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) // UpdateVM updates a vm UpdateVM(ctx context.Context, nodeName types.NodeName) error