diff --git a/.golangci.yml b/.golangci.yml index 022d38b277..dded23b9b5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,6 +22,8 @@ linters: - dupl - exportloopref - importas + - misspell + - nestif run: timeout: 10m # golangci-lint run's timeout. @@ -37,6 +39,8 @@ issues: - unparam # Tests might have unused function parameters. - lll - dupl + - misspell + - nestif - text: "`ctx` is unused" # Context might not be in use in places, but for consistency, we pass it. linters: @@ -60,3 +64,5 @@ linters-settings: alias: metav1 - pkg: github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1 alias: crv1alpha1 + nestif: + min-complexity: 6 diff --git a/pkg/app/app.go b/pkg/app/app.go index 57c8cab3e2..f027d9d713 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -23,7 +23,7 @@ import ( // App represents an application we can install into a namespace. type App interface { - // Init instantiates the app based on the environemnt configuration, + // Init instantiates the app based on the environment configuration, // including environement variables and state in the Kubernetes cluster. If // any required configuration is not discoverable, Init will return an // error. @@ -48,7 +48,7 @@ type App interface { type DatabaseApp interface { App // Ping will issue trivial request to the database to see if it is - // accessable. + // accessible. Ping(context.Context) error // Insert adds n entries to the database. Insert(ctx context.Context) error diff --git a/pkg/app/cassandra.go b/pkg/app/cassandra.go index 46d9c5d24b..5c41e20994 100644 --- a/pkg/app/cassandra.go +++ b/pkg/app/cassandra.go @@ -123,7 +123,7 @@ func (cas *CassandraInstance) Object() crv1alpha1.ObjectReference { } } -// Uninstall us used to remove the datbase application +// Uninstall us used to remove the database application func (cas *CassandraInstance) Uninstall(ctx context.Context) error { log.Print("Uninstalling application.", field.M{"app": cas.name}) cli, err := helm.NewCliClient() @@ -142,7 +142,7 @@ func (cas *CassandraInstance) GetClusterScopedResources(ctx context.Context) []c return nil } -// Ping is used to ping the application to check the datbase connectivity +// Ping is used to ping the application to check the database connectivity func (cas *CassandraInstance) Ping(ctx context.Context) error { log.Print("Pinging the application.", field.M{"app": cas.name}) diff --git a/pkg/app/elasticsearch.go b/pkg/app/elasticsearch.go index 5cc4896293..5aba83846a 100644 --- a/pkg/app/elasticsearch.go +++ b/pkg/app/elasticsearch.go @@ -174,7 +174,7 @@ func (esi *ElasticsearchInstance) Insert(ctx context.Context) error { addDocumentToIndexCMD := []string{"sh", "-c", esi.curlCommandWithPayload("POST", esi.indexname+"/_doc/?refresh=true", "'{\"appname\": \"kanister\" }'")} _, stderr, err := esi.execCommand(ctx, addDocumentToIndexCMD) if err != nil { - // even one insert failed we will have to return becasue + // even one insert failed we will have to return because // the count wont match anyway and the test will fail return errors.Wrapf(err, "Error %s inserting document to an index %s.", stderr, esi.indexname) } diff --git a/pkg/app/mongodb-deploymentconfig.go b/pkg/app/mongodb-deploymentconfig.go index 043508028d..d83da72016 100644 --- a/pkg/app/mongodb-deploymentconfig.go +++ b/pkg/app/mongodb-deploymentconfig.go @@ -184,7 +184,7 @@ func (mongo *MongoDBDepConfig) execCommand(ctx context.Context, command []string return "", "", err } stdout, stderr, err := kube.Exec(mongo.cli, mongo.namespace, podName, containerName, command, nil) - log.Print("Executing the command in pod and contianer", field.M{"pod": podName, "container": containerName, "cmd": command}) + log.Print("Executing the command in pod and container", field.M{"pod": podName, "container": containerName, "cmd": command}) return stdout, stderr, errors.Wrapf(err, "Error executing command in the pod") } diff --git a/pkg/blockstorage/awsefs/awsefs.go b/pkg/blockstorage/awsefs/awsefs.go index f62d301a90..fdfc2d49ba 100644 --- a/pkg/blockstorage/awsefs/awsefs.go +++ b/pkg/blockstorage/awsefs/awsefs.go @@ -66,7 +66,7 @@ var allowedMetadataKeys = map[string]bool{ "newFileSystem": true, } -// NewEFSProvider retuns a blockstorage provider for AWS EFS. +// NewEFSProvider returns a blockstorage provider for AWS EFS. func NewEFSProvider(ctx context.Context, config map[string]string) (blockstorage.Provider, error) { awsConfig, region, err := awsconfig.GetConfig(ctx, config) if err != nil { diff --git a/pkg/blockstorage/azure/auth.go b/pkg/blockstorage/azure/auth.go index d9e462532c..f909a1b41e 100644 --- a/pkg/blockstorage/azure/auth.go +++ b/pkg/blockstorage/azure/auth.go @@ -11,7 +11,7 @@ import ( const ActiveDirectory = "activeDirectory" -// currently avaialble types: https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authorization +// currently available types: https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authorization // to be available with azidentity: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-credential-types // determine if the combination of creds are client secret creds func isClientCredsAvailable(config map[string]string) bool { diff --git a/pkg/blockstorage/gcepd/gcepd.go b/pkg/blockstorage/gcepd/gcepd.go index 71365733a7..c84aadba27 100644 --- a/pkg/blockstorage/gcepd/gcepd.go +++ b/pkg/blockstorage/gcepd/gcepd.go @@ -479,20 +479,20 @@ func (s *GpdStorage) SetTags(ctx context.Context, resource interface{}, tags map if err != nil { return err } - } else { - vol, err := s.service.Disks.Get(s.project, res.Az, res.ID).Context(ctx).Do() - if err != nil { - return err - } - tags = ktags.AddMissingTags(vol.Labels, ktags.GetTags(tags)) - slr := &compute.ZoneSetLabelsRequest{ - LabelFingerprint: vol.LabelFingerprint, - Labels: blockstorage.SanitizeTags(tags), - } - op, err = s.service.Disks.SetLabels(s.project, res.Az, vol.Name, slr).Do() - if err != nil { - return err - } + return s.waitOnOperation(ctx, op, res.Az) + } + vol, err := s.service.Disks.Get(s.project, res.Az, res.ID).Context(ctx).Do() + if err != nil { + return err + } + tags = ktags.AddMissingTags(vol.Labels, ktags.GetTags(tags)) + slr := &compute.ZoneSetLabelsRequest{ + LabelFingerprint: vol.LabelFingerprint, + Labels: blockstorage.SanitizeTags(tags), + } + op, err = s.service.Disks.SetLabels(s.project, res.Az, vol.Name, slr).Do() + if err != nil { + return err } return s.waitOnOperation(ctx, op, res.Az) } diff --git a/pkg/blockstorage/getter/getter.go b/pkg/blockstorage/getter/getter.go index 9536595cdb..150e4857eb 100644 --- a/pkg/blockstorage/getter/getter.go +++ b/pkg/blockstorage/getter/getter.go @@ -34,7 +34,7 @@ var _ Getter = (*getter)(nil) type getter struct{} -// New retuns a new Getter +// New returns a new Getter func New() Getter { return &getter{} } diff --git a/pkg/blockstorage/vmware/vmware.go b/pkg/blockstorage/vmware/vmware.go index 8cc2c23954..2a1f215cf4 100644 --- a/pkg/blockstorage/vmware/vmware.go +++ b/pkg/blockstorage/vmware/vmware.go @@ -795,7 +795,7 @@ func (ge govmomiError) Format() string { return fmt.Sprintf("[%s]", strings.Join(msgs, "; ")) } -//nolint:gocognit +//nolint:gocognit,nestif func (ge govmomiError) ExtractMessages() []string { err := ge.err diff --git a/pkg/blockstorage/zone/zone.go b/pkg/blockstorage/zone/zone.go index f90c149eb7..841ca182e7 100644 --- a/pkg/blockstorage/zone/zone.go +++ b/pkg/blockstorage/zone/zone.go @@ -57,7 +57,7 @@ func FromSourceRegionZone(ctx context.Context, m Mapper, kubeCli kubernetes.Inte } } if len(newZones) == 0 { - return nil, errors.Errorf("Unable to find valid availabilty zones for region (%s)", sourceRegion) + return nil, errors.Errorf("Unable to find valid availability zones for region (%s)", sourceRegion) } var zones []string for z := range newZones { diff --git a/pkg/blockstorage/zone/zone_test.go b/pkg/blockstorage/zone/zone_test.go index b345ccfe80..0d1eaa6083 100644 --- a/pkg/blockstorage/zone/zone_test.go +++ b/pkg/blockstorage/zone/zone_test.go @@ -562,14 +562,14 @@ func (s ZoneSuite) TestFromSourceRegionZone(c *C) { inZones: []string{"us-west-2a"}, inCli: nil, outZones: nil, - outErr: fmt.Errorf(".*Unable to find valid availabilty zones for region.*"), + outErr: fmt.Errorf(".*Unable to find valid availability zones for region.*"), }, { // Kubernetes provided zones are invalid use valid sourceZones inRegion: "us-west-2", inZones: []string{"us-west-2a", "us-west-2b", "us-west-2d"}, inCli: nil, outZones: []string{"us-west-2a", "us-west-2b"}, - outErr: fmt.Errorf(".*Unable to find valid availabilty zones for region.*"), + outErr: fmt.Errorf(".*Unable to find valid availability zones for region.*"), }, { // Source zone not found but other valid zones available inRegion: "us-west-2", diff --git a/pkg/config/helpers.go b/pkg/config/helpers.go index 4459c9e632..e62a193537 100644 --- a/pkg/config/helpers.go +++ b/pkg/config/helpers.go @@ -39,7 +39,7 @@ func GetClusterName(cli kubernetes.Interface) (string, error) { func GetEnvOrSkip(c *check.C, varName string) string { v := os.Getenv(varName) if v == "" { - reason := fmt.Sprintf("Test %s requires the environemnt variable '%s'", c.TestName(), varName) + reason := fmt.Sprintf("Test %s requires the environment variable '%s'", c.TestName(), varName) c.Skip(reason) } return v diff --git a/pkg/controllers/repositoryserver/repository.go b/pkg/controllers/repositoryserver/repository.go index 80732e0408..a5be49cbf7 100644 --- a/pkg/controllers/repositoryserver/repository.go +++ b/pkg/controllers/repositoryserver/repository.go @@ -37,7 +37,7 @@ func (h *RepoServerHandler) connectToKopiaRepository() error { MetadataCacheLimitMB: *cacheSizeSettings.Metadata, }, Username: h.RepositoryServer.Spec.Repository.Username, - // TODO(Amruta): Generate path for respository + // TODO(Amruta): Generate path for repository RepoPathPrefix: h.RepositoryServer.Spec.Repository.RootPath, Location: h.RepositoryServerSecrets.storage.Data, } diff --git a/pkg/controllers/repositoryserver/secrets_manager.go b/pkg/controllers/repositoryserver/secrets_manager.go index 1802abc657..9272eeafb3 100644 --- a/pkg/controllers/repositoryserver/secrets_manager.go +++ b/pkg/controllers/repositoryserver/secrets_manager.go @@ -35,7 +35,7 @@ type repositoryServerSecrets struct { // getSecretsFromCR fetches all the secrets in the RepositoryServer CR func (h *RepoServerHandler) getSecretsFromCR(ctx context.Context) error { // TODO: For now, users should make sure all the secrets and the RepositoryServer CR are present in the - // same namespace. This namespace field can be overriden when we start creating secrets using 'kanctl' utility + // same namespace. This namespace field can be overridden when we start creating secrets using 'kanctl' utility repositoryServer := h.RepositoryServer h.Logger.Info("Fetching secrets from all the secret references in the CR") storage, err := h.fetchSecret(ctx, &repositoryServer.Spec.Storage.SecretRef) diff --git a/pkg/customresource/customresource.go b/pkg/customresource/customresource.go index 992d05155a..a7800f8ec2 100644 --- a/pkg/customresource/customresource.go +++ b/pkg/customresource/customresource.go @@ -153,7 +153,7 @@ func createCRD(context Context, resource CustomResource) error { } func rawCRDFromFile(path string) ([]byte, error) { - // yamls is the variable that has embeded custom resource manifest. More at `embed.go` + // yamls is the variable that has embedded custom resource manifest. More at `embed.go` return yamls.ReadFile(path) } diff --git a/pkg/customresource/embed.go b/pkg/customresource/embed.go index a607bdd6a4..12a30fdb11 100644 --- a/pkg/customresource/embed.go +++ b/pkg/customresource/embed.go @@ -5,7 +5,7 @@ import "embed" // embed.go embeds the CRD yamls (actionset, profile, blueprint) with the // controller binary so that we can read these manifests in runtime. -// We need these manfiests at two places, at `pkg/customresource/` and at +// We need these manifests at two places, at `pkg/customresource/` and at // `helm/kanister-operator/crds`. To make sure we are not duplicating the // things we have original files at `pkg/customresource` and have soft links // at `helm/kanister-operator/crds`. diff --git a/pkg/function/create_rds_snapshot.go b/pkg/function/create_rds_snapshot.go index 5cf40964fd..489faa6c79 100644 --- a/pkg/function/create_rds_snapshot.go +++ b/pkg/function/create_rds_snapshot.go @@ -95,30 +95,9 @@ func createRDSSnapshot(ctx context.Context, instanceID string, dbEngine RDSDBEng // Create Snapshot snapshotID := fmt.Sprintf("%s-%s", instanceID, rand.String(10)) - log.WithContext(ctx).Print("Creating RDS snapshot", field.M{"SnapshotID": snapshotID}) - if !isAuroraCluster(string(dbEngine)) { - dbSnapshotOutput, err := rdsCli.CreateDBSnapshot(ctx, instanceID, snapshotID) - if err != nil { - return nil, errors.Wrap(err, "Failed to create snapshot") - } - - // Wait until snapshot becomes available - log.WithContext(ctx).Print("Waiting for RDS snapshot to be available", field.M{"SnapshotID": snapshotID}) - if err := rdsCli.WaitUntilDBSnapshotAvailable(ctx, snapshotID); err != nil { - return nil, errors.Wrap(err, "Error while waiting snapshot to be available") - } - if dbSnapshotOutput.DBSnapshot != nil && dbSnapshotOutput.DBSnapshot.AllocatedStorage != nil { - allocatedStorage = *(dbSnapshotOutput.DBSnapshot.AllocatedStorage) - } - } else { - if _, err := rdsCli.CreateDBClusterSnapshot(ctx, instanceID, snapshotID); err != nil { - return nil, errors.Wrap(err, "Failed to create cluster snapshot") - } - - log.WithContext(ctx).Print("Waiting for RDS Aurora snapshot to be available", field.M{"SnapshotID": snapshotID}) - if err := rdsCli.WaitUntilDBClusterSnapshotAvailable(ctx, snapshotID); err != nil { - return nil, errors.Wrap(err, "Error while waiting snapshot to be available") - } + allocatedStorage, err = createSnapshot(ctx, rdsCli, snapshotID, instanceID, string(dbEngine)) + if err != nil { + return nil, err } // Find security group ids @@ -160,6 +139,36 @@ func createRDSSnapshot(ctx context.Context, instanceID string, dbEngine RDSDBEng return output, nil } +func createSnapshot(ctx context.Context, rdsCli *rds.RDS, snapshotID, dbEngine, instanceID string) (int64, error) { + log.WithContext(ctx).Print("Creating RDS snapshot", field.M{"SnapshotID": snapshotID}) + var allocatedStorage int64 + if !isAuroraCluster(dbEngine) { + dbSnapshotOutput, err := rdsCli.CreateDBSnapshot(ctx, instanceID, snapshotID) + if err != nil { + return allocatedStorage, errors.Wrap(err, "Failed to create snapshot") + } + + // Wait until snapshot becomes available + log.WithContext(ctx).Print("Waiting for RDS snapshot to be available", field.M{"SnapshotID": snapshotID}) + if err := rdsCli.WaitUntilDBSnapshotAvailable(ctx, snapshotID); err != nil { + return allocatedStorage, errors.Wrap(err, "Error while waiting snapshot to be available") + } + if dbSnapshotOutput.DBSnapshot != nil && dbSnapshotOutput.DBSnapshot.AllocatedStorage != nil { + allocatedStorage = *(dbSnapshotOutput.DBSnapshot.AllocatedStorage) + } + return allocatedStorage, nil + } + if _, err := rdsCli.CreateDBClusterSnapshot(ctx, instanceID, snapshotID); err != nil { + return allocatedStorage, errors.Wrap(err, "Failed to create cluster snapshot") + } + + log.WithContext(ctx).Print("Waiting for RDS Aurora snapshot to be available", field.M{"SnapshotID": snapshotID}) + if err := rdsCli.WaitUntilDBClusterSnapshotAvailable(ctx, snapshotID); err != nil { + return allocatedStorage, errors.Wrap(err, "Error while waiting snapshot to be available") + } + return allocatedStorage, nil +} + func (crs *createRDSSnapshotFunc) Exec(ctx context.Context, tp param.TemplateParams, args map[string]interface{}) (map[string]interface{}, error) { // Set progress percent crs.progressPercent = progress.StartedPercent diff --git a/pkg/function/restore_data_using_kopia_server.go b/pkg/function/restore_data_using_kopia_server.go index eab31abed8..1ecec0e396 100644 --- a/pkg/function/restore_data_using_kopia_server.go +++ b/pkg/function/restore_data_using_kopia_server.go @@ -36,7 +36,7 @@ import ( const ( RestoreDataUsingKopiaServerFuncName = "RestoreDataUsingKopiaServer" - // SparseRestoreOption is the key for specifiying whether to do a sparse restore + // SparseRestoreOption is the key for specifying whether to do a sparse restore SparseRestoreOption = "sparseRestore" ) diff --git a/pkg/function/restore_rds_snapshot.go b/pkg/function/restore_rds_snapshot.go index c52099b17b..d3abaa256b 100644 --- a/pkg/function/restore_rds_snapshot.go +++ b/pkg/function/restore_rds_snapshot.go @@ -199,11 +199,7 @@ func restoreRDSSnapshot( // If securityGroupID arg is nil, we will try to find the sgIDs by describing the existing instance // Find security group ids if sgIDs == nil { - if !isAuroraCluster(string(dbEngine)) { - sgIDs, err = findSecurityGroups(ctx, rdsCli, instanceID) - } else { - sgIDs, err = findAuroraSecurityGroups(ctx, rdsCli, instanceID) - } + sgIDs, err = findSecurityGroupIDs(ctx, rdsCli, instanceID, string(dbEngine)) if err != nil { return nil, errors.Wrapf(err, "Failed to fetch security group ids. InstanceID=%s", instanceID) } @@ -236,6 +232,12 @@ func restoreRDSSnapshot( RestoreRDSSnapshotEndpoint: dbEndpoint, }, nil } +func findSecurityGroupIDs(ctx context.Context, rdsCli *rds.RDS, instanceID, dbEngine string) ([]string, error) { + if !isAuroraCluster(dbEngine) { + return findSecurityGroups(ctx, rdsCli, instanceID) + } + return findAuroraSecurityGroups(ctx, rdsCli, instanceID) +} //nolint:unparam func postgresRestoreCommand(pgHost, username, password string, backupArtifactPrefix, backupID string, profile []byte, dbEngineVersion string) ([]string, error) { @@ -336,7 +338,7 @@ func restoreAuroraFromSnapshot(ctx context.Context, rdsCli *rds.RDS, instanceID, } log.WithContext(ctx).Print("Creating DB instance in the cluster") - // After Aurora cluster is created, we will have to explictly create the DB instance + // After Aurora cluster is created, we will have to explicitly create the DB instance dbInsOp, err := rdsCli.CreateDBInstance( ctx, nil, diff --git a/pkg/kube/pod.go b/pkg/kube/pod.go index e61df31a70..c7164ab943 100644 --- a/pkg/kube/pod.go +++ b/pkg/kube/pod.go @@ -376,7 +376,7 @@ func checkPVCAndPVStatus(ctx context.Context, vol corev1.Volume, p *corev1.Pod, switch pvc.Status.Phase { case corev1.ClaimLost: - return errors.Errorf("PVC %s assoicated with pod %s has status: %s", pvcName, p.Name, corev1.ClaimLost) + return errors.Errorf("PVC %s associated with pod %s has status: %s", pvcName, p.Name, corev1.ClaimLost) case corev1.ClaimPending: pvName := pvc.Spec.VolumeName if pvName == "" { diff --git a/pkg/kube/pod_controller.go b/pkg/kube/pod_controller.go index ac2caeee31..19f7af931d 100644 --- a/pkg/kube/pod_controller.go +++ b/pkg/kube/pod_controller.go @@ -57,7 +57,7 @@ type PodController interface { // podController keeps Kubernetes Client and PodOptions needed for creating a Pod. // It implements the PodControllerProcessor interface. -// All communication with kubernetes API are done via PodControllerProcessor interface, which could be overriden for testing purposes. +// All communication with kubernetes API are done via PodControllerProcessor interface, which could be overridden for testing purposes. type podController struct { cli kubernetes.Interface podOptions *PodOptions diff --git a/pkg/objectstore/objectstore.go b/pkg/objectstore/objectstore.go index 4efbfc61bd..fb0ac26c69 100644 --- a/pkg/objectstore/objectstore.go +++ b/pkg/objectstore/objectstore.go @@ -63,7 +63,7 @@ type Directory interface { // DeleteDirectory deletes the current directory DeleteDirectory(context.Context) error - // DeleteAllWithPrefix deletes all directorys and objects with a provided prefix + // DeleteAllWithPrefix deletes all directories and objects with a provided prefix DeleteAllWithPrefix(context.Context, string) error // ListDirectories lists all the directories rooted in diff --git a/pkg/phase.go b/pkg/phase.go index 87198b551c..7ba0071357 100644 --- a/pkg/phase.go +++ b/pkg/phase.go @@ -72,29 +72,37 @@ func (p *Phase) Exec(ctx context.Context, bp crv1alpha1.Blueprint, action string phases = append(phases, *a.DeferPhase) } - for _, ap := range phases { - if ap.Name != p.name { - continue - } + err := p.setPhaseArgs(phases, tp) + if err != nil { + return nil, err + } + } + // Execute the function + return p.f.Exec(ctx, tp, p.args) +} - args, err := renderFuncArgs(ap.Func, ap.Args, tp) - if err != nil { - return nil, err - } +func (p *Phase) setPhaseArgs(phases []crv1alpha1.BlueprintPhase, tp param.TemplateParams) error { + for _, ap := range phases { + if ap.Name != p.name { + continue + } - if err = checkRequiredArgs(p.f.RequiredArgs(), args); err != nil { - return nil, errors.Wrapf(err, "Required args missing for function %s", p.f.Name()) - } + args, err := renderFuncArgs(ap.Func, ap.Args, tp) + if err != nil { + return err + } - if err = checkSupportedArgs(p.f.Arguments(), args); err != nil { - return nil, errors.Wrapf(err, "Checking supported args for function %s.", p.f.Name()) - } + if err = checkRequiredArgs(p.f.RequiredArgs(), args); err != nil { + return errors.Wrapf(err, "Required args missing for function %s", p.f.Name()) + } - p.args = args + if err = checkSupportedArgs(p.f.Arguments(), args); err != nil { + return errors.Wrapf(err, "Checking supported args for function %s.", p.f.Name()) } + + p.args = args } - // Execute the function - return p.f.Exec(ctx, tp, p.args) + return nil } func renderFuncArgs( diff --git a/pkg/testutil/fixture.go b/pkg/testutil/fixture.go index 5652a61c58..a969e522ca 100644 --- a/pkg/testutil/fixture.go +++ b/pkg/testutil/fixture.go @@ -116,7 +116,7 @@ func ObjectStoreProfileOrSkip(c *check.C, osType objectstore.ProviderType, locat func GetEnvOrSkip(c *check.C, varName string) string { v := os.Getenv(varName) if v == "" { - reason := fmt.Sprintf("Test %s requires the environemnt variable '%s'", c.TestName(), varName) + reason := fmt.Sprintf("Test %s requires the environment variable '%s'", c.TestName(), varName) c.Skip(reason) } return v diff --git a/pkg/testutil/mockblockstorage/mockblockstorage.go b/pkg/testutil/mockblockstorage/mockblockstorage.go index a3df8e744d..d36e38e565 100644 --- a/pkg/testutil/mockblockstorage/mockblockstorage.go +++ b/pkg/testutil/mockblockstorage/mockblockstorage.go @@ -43,7 +43,7 @@ var _ getter.Getter = (*mockGetter)(nil) type mockGetter struct{} -// NewGetter retuns a new mockGetter +// NewGetter returns a new mockGetter func NewGetter() getter.Getter { return &mockGetter{} }