Skip to content

Commit

Permalink
Merge pull request #2155 from MartinForReal/shafan/track2
Browse files Browse the repository at this point in the history
Refactor: migrate disk client to track2 sdk
  • Loading branch information
k8s-ci-robot committed Jan 19, 2024
2 parents 6eb97e6 + b00c447 commit ba7f97a
Show file tree
Hide file tree
Showing 39 changed files with 1,122 additions and 643 deletions.
28 changes: 14 additions & 14 deletions deploy/example/logs/0.2.0/csi-azuredisk-controller.log

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion deploy/example/logs/0.3.0/csi-azuredisk-controller.log
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ I0708 09:18:06.849897 1 utils.go:119] GRPC response: name:"disk.csi.azure.
I0708 09:18:06.859929 1 utils.go:112] GRPC call: /csi.v1.Controller/DeleteVolume
I0708 09:18:06.859949 1 utils.go:113] GRPC request: volume_id:"/subscriptions/.../resourceGroups/mc_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631"
I0708 09:18:06.859959 1 controllerserver.go:257] deleting azure disk(/subscriptions/.../resourceGroups/mc_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631)
E0708 09:18:07.117134 1 utils.go:117] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status=<nil> Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631 is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0."
E0708 09:18:07.117134 1 utils.go:117] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status=<nil> Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631 is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0."
I0708 09:18:13.587579 1 utils.go:112] GRPC call: /csi.v1.Identity/Probe
I0708 09:18:13.587699 1 utils.go:113] GRPC request:
I0708 09:18:13.587788 1 utils.go:119] GRPC response: ready:<value:true >
Expand Down
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ require (
k8s.io/mount-utils v0.29.0
k8s.io/pod-security-admission v0.0.0
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c
sigs.k8s.io/cloud-provider-azure v1.29.0
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047
sigs.k8s.io/yaml v1.4.0
)

Expand Down
12 changes: 6 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -2037,12 +2037,12 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c h1:5Q300VvljRNIT2LaKZ+xo9Z1NvAOKV1v007YxItji1Y=
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240114181025-ca41d9562e6c/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI=
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c h1:6RWYYBDabcs2L3bs+t5IPSDietkKTqDUqJ+drIhx6xk=
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240114181025-ca41d9562e6c/go.mod h1:dhkW9GQLM9XJMvXcXziwy7QqDOLwaXU11FsGJyIpy+s=
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c h1:/UCu9sYXGn3zhT0/B74nxc3EE5lRNuDVJC8gCYqk+K8=
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240114181025-ca41d9562e6c/go.mod h1:slvzU7aF5CMzat64pn/LdpgU4mLYBDQGqm1fjXMAp1Q=
sigs.k8s.io/cloud-provider-azure v1.29.0 h1:lHk6AB+3XfURM7bbR+uABKeRcMC1TYreWA6GM5wUT6g=
sigs.k8s.io/cloud-provider-azure v1.29.0/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI=
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047 h1:1Nvke3MeBdnYSuyRyQY4iLwaT566ZKUxTkr/YOZLz1o=
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240117080718-1ef87a727047/go.mod h1:dhkW9GQLM9XJMvXcXziwy7QqDOLwaXU11FsGJyIpy+s=
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047 h1:4dmpJRuwCxou60ULbhFASF7FHhxyqkGsoGLtI7kR32w=
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240117080718-1ef87a727047/go.mod h1:slvzU7aF5CMzat64pn/LdpgU4mLYBDQGqm1fjXMAp1Q=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
Expand Down
71 changes: 40 additions & 31 deletions pkg/azuredisk/azure_controller_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ import (
"sync"
"time"

"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/Azure/go-autorest/autorest/azure"

Expand All @@ -37,6 +40,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/utils/pointer"

"sigs.k8s.io/cloud-provider-azure/pkg/azclient"
azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
"sigs.k8s.io/cloud-provider-azure/pkg/provider"
Expand Down Expand Up @@ -95,9 +99,10 @@ var (
)

type controllerCommon struct {
diskStateMap sync.Map // <diskURI, attaching/detaching state>
lockMap *lockMap
cloud *provider.Cloud
diskStateMap sync.Map // <diskURI, attaching/detaching state>
lockMap *lockMap
cloud *provider.Cloud
clientFactory azclient.ClientFactory
// disk queue that is waiting for attach or detach on specific node
// <nodeName, map<diskURI, *provider.AttachDiskOptions/DetachDiskOptions>>
attachDiskMap sync.Map
Expand All @@ -122,14 +127,14 @@ type ExtendedLocation struct {
// occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments
// return (lun, error)
func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName,
cachingMode compute.CachingTypes, disk *compute.Disk, occupiedLuns []int) (int32, error) {
cachingMode armcompute.CachingTypes, disk *armcompute.Disk, occupiedLuns []int) (int32, error) {
diskEncryptionSetID := ""
writeAcceleratorEnabled := false

// there is possibility that disk is nil when GetDisk is throttled
// don't check disk state when GetDisk is throttled
if disk != nil {
if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
if disk.ManagedBy != nil && (disk.Properties == nil || disk.Properties.MaxShares == nil || *disk.Properties.MaxShares <= 1) {
vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
if err != nil {
return -1, err
Expand All @@ -155,22 +160,22 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str
return -1, volerr.NewDanglingError(attachErr, attachedNode, "")
}

if disk.DiskProperties != nil {
if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone {
if disk.Properties != nil {
if disk.Properties.DiskSizeGB != nil && *disk.Properties.DiskSizeGB >= diskCachingLimit && cachingMode != armcompute.CachingTypesNone {
// Disk Caching is not supported for disks 4 TiB and larger
// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
cachingMode = compute.CachingTypesNone
cachingMode = armcompute.CachingTypesNone
klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None",
diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit)
diskURI, *disk.Properties.DiskSizeGB, diskCachingLimit)
}

if disk.DiskProperties.Encryption != nil &&
disk.DiskProperties.Encryption.DiskEncryptionSetID != nil {
diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
if disk.Properties.Encryption != nil &&
disk.Properties.Encryption.DiskEncryptionSetID != nil {
diskEncryptionSetID = *disk.Properties.Encryption.DiskEncryptionSetID
}

if disk.DiskProperties.DiskState != compute.Unattached && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, disk.DiskProperties.DiskState, compute.Unattached)
if disk.Properties.DiskState != nil && *disk.Properties.DiskState != armcompute.DiskStateUnattached && (disk.Properties.MaxShares == nil || *disk.Properties.MaxShares <= 1) {
return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, *disk.Properties.DiskState, armcompute.DiskStateUnattached)
}
}

Expand All @@ -184,7 +189,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str
options := provider.AttachDiskOptions{
Lun: -1,
DiskName: diskName,
CachingMode: cachingMode,
CachingMode: compute.CachingTypes(cachingMode),
DiskEncryptionSetID: diskEncryptionSetID,
WriteAcceleratorEnabled: writeAcceleratorEnabled,
}
Expand Down Expand Up @@ -469,7 +474,7 @@ func (c *controllerCommon) cleanDetachDiskRequests(nodeName string) (map[string]
}

// GetNodeDataDisks invokes vmSet interfaces to get data disks for the node.
func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) {
vmset, err := c.cloud.GetNodeVMSet(nodeName, crt)
if err != nil {
return nil, nil, err
Expand Down Expand Up @@ -611,8 +616,8 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
return attached, nil
}

func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []compute.DataDisk) []compute.DataDisk {
filteredDisks := []compute.DataDisk{}
func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []*armcompute.DataDisk) []*armcompute.DataDisk {
filteredDisks := []*armcompute.DataDisk{}
for _, disk := range unfilteredDisks {
filter := false
if disk.ManagedDisk != nil && disk.ManagedDisk.ID != nil {
Expand Down Expand Up @@ -643,13 +648,17 @@ func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string)
return false, err
}

if _, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName); rerr != nil {
if rerr.HTTPStatusCode == http.StatusNotFound {
diskClient, err := c.clientFactory.GetDiskClientForSub(subsID)
if err != nil {
return false, err
}
if _, err := diskClient.Get(ctx, resourceGroup, diskName); err != nil {
var respErr = &azcore.ResponseError{}
if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound {
return false, nil
}
return false, rerr.Error()
return false, err
}

return true, nil
}

Expand All @@ -658,10 +667,10 @@ func vmUpdateRequired(future *azure.Future, err error) bool {
return configAccepted(future) && errCode == consts.OperationPreemptedErrorCode
}

func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (compute.CreationData, error) {
func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (armcompute.CreationData, error) {
if options.SourceResourceID == "" {
return compute.CreationData{
CreateOption: compute.Empty,
return armcompute.CreationData{
CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty),
PerformancePlus: options.PerformancePlus,
}, nil
}
Expand All @@ -678,21 +687,21 @@ func getValidCreationData(subscriptionID, resourceGroup string, options *Managed
sourceResourceID = fmt.Sprintf(managedDiskPath, subscriptionID, resourceGroup, sourceResourceID)
}
default:
return compute.CreationData{
CreateOption: compute.Empty,
return armcompute.CreationData{
CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty),
PerformancePlus: options.PerformancePlus,
}, nil
}

splits := strings.Split(sourceResourceID, "/")
if len(splits) > 9 {
if options.SourceType == sourceSnapshot {
return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE)
return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE)
}
return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE)
return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE)
}
return compute.CreationData{
CreateOption: compute.Copy,
return armcompute.CreationData{
CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy),
SourceResourceID: &sourceResourceID,
PerformancePlus: options.PerformancePlus,
}, nil
Expand Down
Loading

0 comments on commit ba7f97a

Please sign in to comment.