Skip to content

Commit

Permalink
Merge pull request #257 from dell/fix-linter-errors
Browse files Browse the repository at this point in the history
[1271] Fix linter errors
  • Loading branch information
donatwork authored Apr 26, 2024
2 parents d5f0db6 + 999bc5b commit df036d6
Show file tree
Hide file tree
Showing 15 changed files with 135 additions and 147 deletions.
4 changes: 1 addition & 3 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,9 @@
# Chiman Jain (chimanjain)
# Christian Coffield (ChristianAtDell)
# Don Khan (donatwork)
# Emily Kinuthia (EmilyKatdell)
# Harish H (HarishH-DELL)
# Harsha Yalamanchili (harshaatdell)
# Leonard Ulman (Leonard-Dell)
# Marek Suski (mareksuski-dell)
# Małgorzata Dutka (mdutka-dell)
# Meghana GM (meggm)
# Nitesh Rewatkar (nitesh3108)
Expand All @@ -31,4 +29,4 @@
# Shefali Malhotra (shefali-malhotra)

# for all files:
* @adamginna-dell @atye @bogdanNovikovDell @bpjain2004 @cbartoszDell @chimanjain @ChristianAtDell @donatwork @EmilyKatdell @HarishH-DELL @harshaatdell @Leonard-Dell @mareksuski-dell @mdutka-dell @meggm @nitesh3108 @prablr79 @rajkumar-palani @Sakshi-dell @santhoshatdell @shaynafinocchiaro @shefali-malhotra
* @adamginna-dell @atye @bogdanNovikovDell @bpjain2004 @cbartoszDell @chimanjain @ChristianAtDell @donatwork @HarishH-DELL @harshaatdell @Leonard-Dell @mdutka-dell @meggm @nitesh3108 @prablr79 @rajkumar-palani @Sakshi-dell @santhoshatdell @shaynafinocchiaro @shefali-malhotra
36 changes: 15 additions & 21 deletions service/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -647,11 +647,11 @@ func (s *service) CreateVolume(
}
} else {
// clear quota and delete volume since the export cannot be created
if error := isiConfig.isiSvc.ClearQuotaByID(ctx, quotaID); error != nil {
log.Infof("Clear Quota returned error '%s'", error)
if err := isiConfig.isiSvc.ClearQuotaByID(ctx, quotaID); err != nil {
log.Infof("Clear Quota returned error '%s'", err)
}
if error := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, req.GetName()); error != nil {
log.Infof("Delete volume in CreateVolume returned error '%s'", error)
if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, req.GetName()); err != nil {
log.Infof("Delete volume in CreateVolume returned error '%s'", err)
}
return nil, err
}
Expand Down Expand Up @@ -924,10 +924,6 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteVolume(
}
// Delete the snapshot tracking directory entry for this volume
isiPath, snapshotName, _ := isiConfig.isiSvc.GetSnapshotIsiPathComponents(exportPath, zone.Path)

if err != nil {
return err
}
log.Debugf("snapshot name associated with volume '%s' is '%s'", volName, snapshotName)

// Populate names for snapshot's tracking dir, snapshot tracking dir entry for this volume
Expand Down Expand Up @@ -1283,8 +1279,8 @@ func (s *service) ValidateVolumeCapabilities(
return resp, nil
}

func (s *service) ListVolumes(ctx context.Context,
req *csi.ListVolumesRequest,
func (s *service) ListVolumes(_ context.Context,
_ *csi.ListVolumesRequest,
) (*csi.ListVolumesResponse, error) {
// TODO The below implementation(commented code) doesn't work for multi-cluster.
// Add multi-cluster support by considering both MaxEntries and StartingToken(if specified) attributes.
Expand Down Expand Up @@ -1468,8 +1464,8 @@ func (s *service) GetCapacity(
}

func (s *service) ControllerGetCapabilities(
ctx context.Context,
req *csi.ControllerGetCapabilitiesRequest) (
_ context.Context,
_ *csi.ControllerGetCapabilitiesRequest) (
*csi.ControllerGetCapabilitiesResponse, error,
) {
capabilities := []*csi.ControllerServiceCapability{
Expand Down Expand Up @@ -1775,15 +1771,13 @@ func (s *service) DeleteSnapshot(
return &csi.DeleteSnapshotResponse{}, nil
}
// Internal server error if the error is not about "not found"
if err != nil {
return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "cannot check the existence of the snapshot: '%s'", err.Error()))
}
} else {
if jsonError.StatusCode == 404 {
return &csi.DeleteSnapshotResponse{}, nil
}
return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "cannot check the existence of the snapshot: '%s'", err.Error()))
}

if jsonError.StatusCode == 404 {
return &csi.DeleteSnapshotResponse{}, nil
}
return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "cannot check the existence of the snapshot: '%s'", err.Error()))
}

// Get snapshot path
Expand Down Expand Up @@ -1876,7 +1870,7 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteSnapshot(
// Validate volume capabilities
func validateVolumeCaps(
vcs []*csi.VolumeCapability,
vol isi.Volume,
_ isi.Volume,
) (bool, string) {
var (
supported = true
Expand Down Expand Up @@ -2068,6 +2062,6 @@ func removeString(exportList []string, strToRemove string) []string {
return exportList
}

func (s *service) CreateVolumeGroupSnapshot(ctx context.Context, request *vgsext.CreateVolumeGroupSnapshotRequest) (*vgsext.CreateVolumeGroupSnapshotResponse, error) {
func (s *service) CreateVolumeGroupSnapshot(_ context.Context, _ *vgsext.CreateVolumeGroupSnapshotRequest) (*vgsext.CreateVolumeGroupSnapshotResponse, error) {
panic("implement me")
}
6 changes: 3 additions & 3 deletions service/csi_extension_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (s *service) ValidateVolumeHostConnectivity(ctx context.Context, req *podmo
systemIDs := make(map[string]bool)
systemID := req.GetArrayId()
if systemID == "" {
foundOne := s.getArrayIdsFromVolumes(ctx, systemIDs, req.GetVolumeIds())
foundOne := s.getArrayIDsFromVolumes(ctx, systemIDs, req.GetVolumeIds())
if !foundOne {
systemID = s.defaultIsiClusterName
systemIDs[systemID] = true
Expand Down Expand Up @@ -89,12 +89,12 @@ func (s *service) ValidateVolumeHostConnectivity(ctx context.Context, req *podmo
return rep, nil
}

func (s *service) getArrayIdsFromVolumes(ctx context.Context, systemIDs map[string]bool, requestVolumeIds []string) bool {
func (s *service) getArrayIDsFromVolumes(ctx context.Context, systemIDs map[string]bool, requestVolumeIDs []string) bool {
ctx, log, _ := GetRunIDLog(ctx)
var err error
var systemID string
var foundAtLeastOne bool
for _, volumeID := range requestVolumeIds {
for _, volumeID := range requestVolumeIDs {
// Extract clusterName from the volume ID (if any volumes in the request)
if _, _, _, systemID, err = utils.ParseNormalizedVolumeID(ctx, volumeID); err != nil {
log.Warnf("Error getting Cluster Name for %s - %s", volumeID, err.Error())
Expand Down
12 changes: 6 additions & 6 deletions service/identity.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ import (
)

func (s *service) GetPluginInfo(
ctx context.Context,
req *csi.GetPluginInfoRequest) (
_ context.Context,
_ *csi.GetPluginInfoRequest) (
*csi.GetPluginInfoResponse, error,
) {
return &csi.GetPluginInfoResponse{
Expand All @@ -42,8 +42,8 @@ func (s *service) GetPluginInfo(
}

func (s *service) GetPluginCapabilities(
ctx context.Context,
req *csi.GetPluginCapabilitiesRequest) (
_ context.Context,
_ *csi.GetPluginCapabilitiesRequest) (
*csi.GetPluginCapabilitiesResponse, error,
) {
var rep csi.GetPluginCapabilitiesResponse
Expand Down Expand Up @@ -77,7 +77,7 @@ func (s *service) GetPluginCapabilities(

func (s *service) Probe(
ctx context.Context,
req *csi.ProbeRequest) (
_ *csi.ProbeRequest) (
*csi.ProbeResponse, error,
) {
ctx, log := GetLogger(ctx)
Expand All @@ -99,7 +99,7 @@ func (s *service) Probe(
return rep, nil
}

func (s *service) GetReplicationCapabilities(ctx context.Context, req *csiext.GetReplicationCapabilityRequest) (*csiext.GetReplicationCapabilityResponse, error) {
func (s *service) GetReplicationCapabilities(_ context.Context, _ *csiext.GetReplicationCapabilityRequest) (*csiext.GetReplicationCapabilityResponse, error) {
rep := new(csiext.GetReplicationCapabilityResponse)
if !strings.EqualFold(s.mode, "node") {
rep.Capabilities = []*csiext.ReplicationCapability{
Expand Down
12 changes: 6 additions & 6 deletions service/interceptor/interceptor.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
type rewriteRequestIDInterceptor struct{}

func (r *rewriteRequestIDInterceptor) handleServer(ctx context.Context, req interface{},
info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
_ *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (interface{}, error) {
// Retrieve the gRPC metadata from the incoming context.
md, mdOK := metadata.FromIncomingContext(ctx)
Expand Down Expand Up @@ -59,7 +59,7 @@ type lockProvider struct {
volNameLocks map[string]gosync.TryLocker
}

func (i *lockProvider) GetLockWithID(ctx context.Context, id string) (gosync.TryLocker, error) {
func (i *lockProvider) GetLockWithID(_ context.Context, id string) (gosync.TryLocker, error) {
i.volIDLocksL.Lock()
defer i.volIDLocksL.Unlock()

Expand All @@ -72,7 +72,7 @@ func (i *lockProvider) GetLockWithID(ctx context.Context, id string) (gosync.Try
return lock, nil
}

func (i *lockProvider) GetLockWithName(ctx context.Context, name string) (gosync.TryLocker, error) {
func (i *lockProvider) GetLockWithName(_ context.Context, name string) (gosync.TryLocker, error) {
i.volNameLocksL.Lock()
defer i.volNameLocksL.Unlock()

Expand Down Expand Up @@ -148,7 +148,7 @@ func (i *interceptor) createMetadataRetrieverClient(ctx context.Context) {
const pending = "pending"

func (i *interceptor) nodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest,
info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
_ *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (res interface{}, resErr error) {
lock, err := i.opts.locker.GetLockWithID(ctx, req.VolumeId)
if err != nil {
Expand All @@ -168,7 +168,7 @@ func (i *interceptor) nodeStageVolume(ctx context.Context, req *csi.NodeStageVol
}

func (i *interceptor) nodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest,
info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
_ *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (res interface{}, resErr error) {
lock, err := i.opts.locker.GetLockWithID(ctx, req.VolumeId)
if err != nil {
Expand All @@ -186,7 +186,7 @@ func (i *interceptor) nodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
}

func (i *interceptor) createVolume(ctx context.Context, req *csi.CreateVolumeRequest,
info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
_ *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (res interface{}, resErr error) {
lock, err := i.opts.locker.GetLockWithID(ctx, req.Name)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion service/interceptor/interceptor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ const (
)

func getSleepHandler(millisec int) grpc.UnaryHandler {
return func(ctx context.Context, req interface{}) (interface{}, error) {
return func(_ context.Context, _ interface{}) (interface{}, error) {
fmt.Println("start sleep")
time.Sleep(time.Duration(millisec) * time.Millisecond)
fmt.Println("stop sleep")
Expand Down
22 changes: 10 additions & 12 deletions service/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ func (s *service) NodeExpandVolume(
}

func (s *service) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (
_ context.Context,
_ *csi.NodeStageVolumeRequest) (
*csi.NodeStageVolumeResponse, error,
) {
// TODO - Need to have logic for staging path of export
Expand All @@ -54,8 +54,8 @@ func (s *service) NodeStageVolume(
}

func (s *service) NodeUnstageVolume(
ctx context.Context,
req *csi.NodeUnstageVolumeRequest) (
_ context.Context,
_ *csi.NodeUnstageVolumeRequest) (
*csi.NodeUnstageVolumeResponse, error,
) {
// TODO - Need to have logic for staging path of export
Expand Down Expand Up @@ -281,8 +281,8 @@ func (s *service) nodeProbe(ctx context.Context, isiConfig *IsilonClusterConfig)
}

func (s *service) NodeGetCapabilities(
ctx context.Context,
req *csi.NodeGetCapabilitiesRequest) (
_ context.Context,
_ *csi.NodeGetCapabilitiesRequest) (
*csi.NodeGetCapabilitiesResponse, error,
) {
capabilities := []*csi.NodeServiceCapability{
Expand Down Expand Up @@ -337,7 +337,7 @@ func (s *service) NodeGetCapabilities(
// NodeGetInfo RPC call returns NodeId and AccessibleTopology as part of NodeGetInfoResponse
func (s *service) NodeGetInfo(
ctx context.Context,
req *csi.NodeGetInfoRequest) (
_ *csi.NodeGetInfoRequest) (
*csi.NodeGetInfoResponse, error,
) {
// Fetch log handler
Expand Down Expand Up @@ -612,7 +612,6 @@ func (s *service) ephemeralNodePublish(ctx context.Context, req *csi.NodePublish
Secrets: req.Secrets,
VolumeContext: createEphemeralVolResp.Volume.VolumeContext,
})

if err != nil {
log.Error("Need to rollback because NodePublish ephemeral volume failed with error :" + err.Error())
if rollbackError := s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest); rollbackError != nil {
Expand Down Expand Up @@ -655,12 +654,11 @@ func (s *service) ephemeralNodePublish(ctx context.Context, req *csi.NodePublish
}()
_, err2 := f.WriteString(createEphemeralVolResp.Volume.VolumeId)
if err2 != nil {
log.Error("Writing to id file in target path for ephemeral vol failed with error :" + err.Error())
log.Error("Writing to id file in target path for ephemeral vol failed with error :" + err2.Error())
if rollbackError := s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest); rollbackError != nil {
log.Error("Rollback failed with error :" + err.Error())
return nil, err
log.Error("Rollback failed with error :" + rollbackError.Error())
}
return nil, err
return nil, err2
}
log.Infof("Ephemeral Node Publish was successful...")

Expand Down
6 changes: 3 additions & 3 deletions service/nodeConnectivityChecker.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func (s *service) startAPIService(ctx context.Context) {
}

// apiRouter serves http requests
func (s *service) apiRouter(ctx context.Context) {
func (s *service) apiRouter(_ context.Context) {
log.Infof("starting http server on port %s", apiPort)
// create a new router
router := mux.NewRouter()
Expand Down Expand Up @@ -158,14 +158,14 @@ func getArrayConnectivityStatus(w http.ResponseWriter, r *http.Request) {
}

// nodeHealth states if node is up
func nodeHealth(w http.ResponseWriter, r *http.Request) {
func nodeHealth(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "node is up and running \n")
}

// connectivityStatus Returns array connectivity status
func connectivityStatus(w http.ResponseWriter, r *http.Request) {
func connectivityStatus(w http.ResponseWriter, _ *http.Request) {
log.Infof("connectivityStatus called, urr status is %v \n", probeStatus)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
Expand Down
8 changes: 4 additions & 4 deletions service/replication.go
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ func failover(ctx context.Context, localIsiConfig *IsilonClusterConfig, remoteIs
return nil
}

func failoverUnplanned(ctx context.Context, localIsiConfig *IsilonClusterConfig, remoteIsiConfig *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
func failoverUnplanned(ctx context.Context, localIsiConfig *IsilonClusterConfig, _ *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
log.Info("Running unplanned failover action")
// With unplanned failover -- do minimum requests, we will ensure mirrored policy is created in further reprotect call
// We can't use remote config (source site) because we need to assume it's down
Expand Down Expand Up @@ -901,7 +901,7 @@ func failbackDiscardRemote(ctx context.Context, localIsiConfig *IsilonClusterCon
return nil
}

func synchronize(ctx context.Context, localIsiConfig *IsilonClusterConfig, remoteIsiConfig *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
func synchronize(ctx context.Context, localIsiConfig *IsilonClusterConfig, _ *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
log.Info("Running sync action")
// get all running
// if running - wait for it and succeed
Expand All @@ -916,7 +916,7 @@ func synchronize(ctx context.Context, localIsiConfig *IsilonClusterConfig, remot
return nil
}

func suspend(ctx context.Context, localIsiConfig *IsilonClusterConfig, remoteIsiConfig *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
func suspend(ctx context.Context, localIsiConfig *IsilonClusterConfig, _ *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
log.Info("Running suspend action")

ppName := strings.ReplaceAll(vgName, ".", "-")
Expand All @@ -936,7 +936,7 @@ func suspend(ctx context.Context, localIsiConfig *IsilonClusterConfig, remoteIsi
return nil
}

func resume(ctx context.Context, localIsiConfig *IsilonClusterConfig, remoteIsiConfig *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
func resume(ctx context.Context, localIsiConfig *IsilonClusterConfig, _ *IsilonClusterConfig, vgName string, log *logrus.Entry) error {
log.Info("Running resume action")

ppName := strings.ReplaceAll(vgName, ".", "-")
Expand Down
Loading

0 comments on commit df036d6

Please sign in to comment.