Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-1.29] fix golint and disable staticcheck #2523

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/static.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
version: v1.60
args: --timeout 10m
args: -E=gofmt,unused,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,depguard,dogsled,durationcheck,errname,forbidigo -D=staticcheck --timeout=30m0s
2 changes: 1 addition & 1 deletion pkg/azuredisk/azure_common_linux_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
)

func TestRescanAllVolumes(t *testing.T) {
if runtime.GOOS == "darwin" { // nolint: staticcheck
if runtime.GOOS == "darwin" {
t.Skipf("skip test on GOOS=%s", runtime.GOOS)
}
err := rescanAllVolumes(azureutils.NewOSIOHandler())
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/azuredisk.go
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ func (d *DriverCore) waitForSnapshotReady(ctx context.Context, subsID, resourceG
return nil
}

timeTick := time.Tick(intervel) // nolint: staticcheck
timeTick := time.Tick(intervel)
timeAfter := time.After(timeout)
for {
select {
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/azuredisk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"testing"
"time"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/Azure/go-autorest/autorest/date"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/azuredisk_v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@ package azuredisk

// NewDriver Creates a NewCSIDriver object. Assumes vendor version is equal to driver version &
// does not support optional driver plugin info manifest field. Refer to CSI spec for more details.
func NewDriver(options *DriverOptions) CSIDriver { // nolint: staticcheck
func NewDriver(options *DriverOptions) CSIDriver {
return newDriverV1(options)
}
2 changes: 1 addition & 1 deletion pkg/azuredisk/azuredisk_v1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
"context"
"testing"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants"
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"strings"
"time"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/container-storage-interface/spec/lib/go/csi"

"google.golang.org/grpc/codes"
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/controllerserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"reflect"
"testing"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/Azure/go-autorest/autorest/date"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/mock/gomock"
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/fake_azuredisk.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"testing"
"time"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
Expand Down
2 changes: 1 addition & 1 deletion pkg/azuredisk/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ func (d *Driver) getDevicePathWithLUN(lunStr string) (string, error) {
scsiHostRescan(d.ioHandler, d.mounter)

newDevicePath := ""
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 2*time.Minute, true, func(context.Context) (bool, error) {
err = wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) {
var err error
if newDevicePath, err = findDiskByLun(int(lun), d.ioHandler, d.mounter); err != nil {
return false, fmt.Errorf("azureDisk - findDiskByLun(%v) failed with error(%s)", lun, err)
Expand Down
4 changes: 2 additions & 2 deletions pkg/azurediskplugin/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,8 @@ func handle() {
WaitForSnapshotReady: *waitForSnapshotReady,
CheckDiskLUNCollision: *checkDiskLUNCollision,
}
driver := azuredisk.NewDriver(&driverOptions) // nolint: staticcheck
if driver == nil { // nolint: staticcheck
driver := azuredisk.NewDriver(&driverOptions)
if driver == nil {
klog.Fatalln("Failed to initialize azuredisk CSI Driver")
}
testingMock := false
Expand Down
2 changes: 1 addition & 1 deletion pkg/csi-common/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c

grpcInterceptor := grpc.UnaryInterceptor(logGRPC)
if enableOtelTracing {
grpcInterceptor = grpc.ChainUnaryInterceptor(logGRPC, otelgrpc.UnaryServerInterceptor()) // nolint: staticcheck
grpcInterceptor = grpc.ChainUnaryInterceptor(logGRPC, otelgrpc.UnaryServerInterceptor())
}

opts := []grpc.ServerOption{
Expand Down
90 changes: 45 additions & 45 deletions pkg/os/disk/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,38 +27,38 @@ type StoragePropertyID uint32

const (
StorageDeviceProperty StoragePropertyID = 0
StorageAdapterProperty StoragePropertyID = 1
StorageDeviceIDProperty StoragePropertyID = 2
StorageDeviceUniqueIDProperty StoragePropertyID = 3
StorageDeviceWriteCacheProperty StoragePropertyID = 4
StorageMiniportProperty StoragePropertyID = 5
StorageAccessAlignmentProperty StoragePropertyID = 6
StorageDeviceSeekPenaltyProperty StoragePropertyID = 7
StorageDeviceTrimProperty StoragePropertyID = 8
StorageDeviceWriteAggregationProperty StoragePropertyID = 9
StorageDeviceDeviceTelemetryProperty StoragePropertyID = 10
StorageDeviceLBProvisioningProperty StoragePropertyID = 11
StorageDevicePowerProperty StoragePropertyID = 12
StorageDeviceCopyOffloadProperty StoragePropertyID = 13
StorageDeviceResiliencyProperty StoragePropertyID = 14
StorageDeviceMediumProductType StoragePropertyID = 15
StorageAdapterRpmbProperty StoragePropertyID = 16
StorageAdapterCryptoProperty StoragePropertyID = 17
StorageDeviceIoCapabilityProperty StoragePropertyID = 18
StorageAdapterProtocolSpecificProperty StoragePropertyID = 19
StorageDeviceProtocolSpecificProperty StoragePropertyID = 20
StorageAdapterTemperatureProperty StoragePropertyID = 21
StorageDeviceTemperatureProperty StoragePropertyID = 22
StorageAdapterPhysicalTopologyProperty StoragePropertyID = 23
StorageDevicePhysicalTopologyProperty StoragePropertyID = 24
StorageDeviceAttributesProperty StoragePropertyID = 25
StorageDeviceManagementStatus StoragePropertyID = 26
StorageAdapterSerialNumberProperty StoragePropertyID = 27
StorageDeviceLocationProperty StoragePropertyID = 28
StorageDeviceNumaProperty StoragePropertyID = 29
StorageDeviceZonedDeviceProperty StoragePropertyID = 30
StorageDeviceUnsafeShutdownCount StoragePropertyID = 31
StorageDeviceEnduranceProperty StoragePropertyID = 32
StorageAdapterProperty = 1
StorageDeviceIDProperty = 2
StorageDeviceUniqueIDProperty = 3
StorageDeviceWriteCacheProperty = 4
StorageMiniportProperty = 5
StorageAccessAlignmentProperty = 6
StorageDeviceSeekPenaltyProperty = 7
StorageDeviceTrimProperty = 8
StorageDeviceWriteAggregationProperty = 9
StorageDeviceDeviceTelemetryProperty = 10
StorageDeviceLBProvisioningProperty = 11
StorageDevicePowerProperty = 12
StorageDeviceCopyOffloadProperty = 13
StorageDeviceResiliencyProperty = 14
StorageDeviceMediumProductType = 15
StorageAdapterRpmbProperty = 16
StorageAdapterCryptoProperty = 17
StorageDeviceIoCapabilityProperty = 18
StorageAdapterProtocolSpecificProperty = 19
StorageDeviceProtocolSpecificProperty = 20
StorageAdapterTemperatureProperty = 21
StorageDeviceTemperatureProperty = 22
StorageAdapterPhysicalTopologyProperty = 23
StorageDevicePhysicalTopologyProperty = 24
StorageDeviceAttributesProperty = 25
StorageDeviceManagementStatus = 26
StorageAdapterSerialNumberProperty = 27
StorageDeviceLocationProperty = 28
StorageDeviceNumaProperty = 29
StorageDeviceZonedDeviceProperty = 30
StorageDeviceUnsafeShutdownCount = 31
StorageDeviceEnduranceProperty = 32
)

type StorageQueryType uint32
Expand Down Expand Up @@ -89,31 +89,31 @@ type StorageIdentifierCodeSet uint32

const (
StorageIDCodeSetReserved StorageIdentifierCodeSet = 0
StorageIDCodeSetBinary StorageIdentifierCodeSet = 1
StorageIDCodeSetASCII StorageIdentifierCodeSet = 2
StorageIDCodeSetUtf8 StorageIdentifierCodeSet = 3
StorageIDCodeSetBinary = 1
StorageIDCodeSetASCII = 2
StorageIDCodeSetUtf8 = 3
)

type StorageIdentifierType uint32

const (
StorageIDTypeVendorSpecific StorageIdentifierType = 0
StorageIDTypeVendorID StorageIdentifierType = 1
StorageIDTypeEUI64 StorageIdentifierType = 2
StorageIDTypeFCPHName StorageIdentifierType = 3
StorageIDTypePortRelative StorageIdentifierType = 4
StorageIDTypeTargetPortGroup StorageIdentifierType = 5
StorageIDTypeLogicalUnitGroup StorageIdentifierType = 6
StorageIDTypeMD5LogicalUnitIdentifier StorageIdentifierType = 7
StorageIDTypeScsiNameString StorageIdentifierType = 8
StorageIDTypeVendorID = 1
StorageIDTypeEUI64 = 2
StorageIDTypeFCPHName = 3
StorageIDTypePortRelative = 4
StorageIDTypeTargetPortGroup = 5
StorageIDTypeLogicalUnitGroup = 6
StorageIDTypeMD5LogicalUnitIdentifier = 7
StorageIDTypeScsiNameString = 8
)

type StorageAssociationType uint32

const (
StorageIDAssocDevice StorageAssociationType = 0
StorageIDAssocPort StorageAssociationType = 1
StorageIDAssocTarget StorageAssociationType = 2
StorageIDAssocPort = 1
StorageIDAssocTarget = 2
)

type StorageIdentifier struct {
Expand Down
4 changes: 2 additions & 2 deletions pkg/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,13 +131,13 @@ func MakeFile(pathname string) error {
}

type VolumeLocks struct {
locks sets.Set[string]
locks sets.String
mux sync.Mutex
}

func NewVolumeLocks() *VolumeLocks {
return &VolumeLocks{
locks: sets.New[string](),
locks: sets.NewString(),
}
}

Expand Down
3 changes: 1 addition & 2 deletions test/e2e/pre_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (

"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
Expand Down Expand Up @@ -172,7 +171,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
req := makeCreateVolumeReq("invalid-maxShares", 256)
req.Parameters = map[string]string{"maxShares": "0"}
_, err := azurediskDriver.CreateVolume(ctx, req)
gomega.Expect(err).To(gomega.HaveOccurred())
framework.ExpectError(err)
})

ginkgo.It("should succeed when attaching a shared block volume to multiple pods [disk.csi.azure.com][shared disk]", func(ctx ginkgo.SpecContext) {
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"testing"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/gomega"
"github.com/pborman/uuid"
"k8s.io/kubernetes/test/e2e/framework"
Expand Down Expand Up @@ -265,9 +266,8 @@ func TestE2E(t *testing.T) {
if reportDir == "" {
reportDir = defaultReportDir
}
_, reporterConfig := ginkgo.GinkgoConfiguration()
reporterConfig.JUnitReport = path.Join(reportDir, "junit_01.xml")
ginkgo.RunSpecs(t, "AzureDisk CSI Driver End-to-End Tests", reporterConfig)
r := []ginkgo.Reporter{reporters.NewJUnitReporter(path.Join(reportDir, "junit_01.xml"))}
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "AzureDisk CSI Driver End-to-End Tests", r)
}

func execTestCmd(cmds []testCmd) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,12 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client
framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azuredisk %v", err))
disktest, err := disksClient.Get(ctx, resourceGroup, diskName)
framework.ExpectNoError(err, fmt.Sprintf("Error getting disk for azuredisk %v", err))
gomega.Expect(string(compute.Attached)).To(gomega.Equal(string(*disktest.Properties.DiskState)))
framework.ExpectEqual(string(compute.Attached), string(*disktest.Properties.DiskState))

ginkgo.By("begin to delete the pod")
tpod.Cleanup(ctx)

err = wait.PollUntilContextTimeout(ctx, 15*time.Second, 10*time.Minute, false, func(context.Context) (bool, error) {
err = wait.Poll(15*time.Second, 10*time.Minute, func() (bool, error) {
disktest, err := disksClient.Get(ctx, resourceGroup, diskName)
if err != nil {
return false, fmt.Errorf("Error getting disk for azuredisk %v", err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,28 +90,28 @@ func (t *DynamicallyProvisionedAzureDiskWithTag) Run(ctx context.Context, client

for k, v := range test {
_, ok := disktest.Tags[k]
gomega.Expect(ok).To(gomega.Equal(true))
framework.ExpectEqual(ok, true)
if ok {
gomega.Expect(*disktest.Tags[k]).To(gomega.Equal(v))
framework.ExpectEqual(*disktest.Tags[k], v)
}
}
tag, ok := disktest.Tags["kubernetes.io-created-for-pv-name"]
gomega.Expect(ok).To(gomega.Equal(true))
gomega.Expect(tag != nil).To(gomega.Equal(true))
framework.ExpectEqual(ok, true)
framework.ExpectEqual(tag != nil, true)
if tag != nil {
ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pv-name: %s", *tag))
}

tag, ok = disktest.Tags["kubernetes.io-created-for-pvc-name"]
gomega.Expect(ok).To(gomega.Equal(true))
gomega.Expect(tag != nil).To(gomega.Equal(true))
framework.ExpectEqual(ok, true)
framework.ExpectEqual(tag != nil, true)
if tag != nil {
ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pvc-name: %s", *tag))
}

tag, ok = disktest.Tags["kubernetes.io-created-for-pvc-namespace"]
gomega.Expect(ok).To(gomega.Equal(true))
gomega.Expect(tag != nil).To(gomega.Equal(true))
framework.ExpectEqual(ok, true)
framework.ExpectEqual(tag != nil, true)
if tag != nil {
ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pvc-namespace: %s", *tag))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client

var newPv *v1.PersistentVolume
var newPvSize resource.Quantity
err = wait.PollUntilContextTimeout(ctx, 30*time.Second, 10*time.Minute, true, func(context.Context) (bool, error) {
err = wait.PollImmediate(30*time.Second, 10*time.Minute, func() (bool, error) {
//takes 3-6 minutes on average for dynamic resize
ginkgo.By("checking the resizing PV result")
newPv, _ = client.CoreV1().PersistentVolumes().Get(ctx, newPvc.Spec.VolumeName, metav1.GetOptions{})
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/testsuites/testsuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func (t *TestVolumeSnapshotClass) CreateSnapshot(ctx context.Context, pvc *v1.Pe

func (t *TestVolumeSnapshotClass) ReadyToUse(ctx context.Context, snapshot *snapshotv1.VolumeSnapshot) {
ginkgo.By("waiting for VolumeSnapshot to be ready to use - " + snapshot.Name)
err := wait.PollUntilContextTimeout(ctx, 15*time.Second, 30*time.Minute, false, func(context.Context) (bool, error) {
err := wait.Poll(15*time.Second, 30*time.Minute, func() (bool, error) {
vs, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Get(ctx, snapshot.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("did not see ReadyToUse: %v", err)
Expand Down Expand Up @@ -752,7 +752,7 @@ func (t *TestStatefulset) Logs(ctx context.Context) ([]byte, error) {
return podLogs(ctx, t.client, t.podName, t.namespace.Name)
}
func waitForStatefulSetComplete(ctx context.Context, cs clientset.Interface, ns *v1.Namespace, ss *apps.StatefulSet) error {
err := wait.PollUntilContextTimeout(ctx, poll, pollTimeout, true, func(context.Context) (bool, error) {
err := wait.PollImmediate(poll, pollTimeout, func() (bool, error) {
var err error
statefulSet, err := cs.AppsV1().StatefulSets(ns.Name).Get(ctx, ss.Name, metav1.GetOptions{})
if err != nil {
Expand Down Expand Up @@ -1055,7 +1055,7 @@ func getWinImageTag(winServerVer string) string {

func pollForStringWorker(namespace string, pod string, command []string, expectedString string, ch chan<- error) {
args := append([]string{"exec", pod, "--"}, command...)
err := wait.PollUntilContextTimeout(context.Background(), poll, pollForStringTimeout, true, func(context.Context) (bool, error) {
err := wait.PollImmediate(poll, pollForStringTimeout, func() (bool, error) {
stdout, err := e2ekubectl.RunKubectl(namespace, args...)
if err != nil {
framework.Logf("Error waiting for output %q in pod %q: %v.", expectedString, pod, err)
Expand Down
Loading