Skip to content

Commit

Permalink
remove unmaintained errors pkg (#552)
Browse files Browse the repository at this point in the history
The errors package was deprecated in 2021 https://github.com/pkg/errors

I converted wrapped errors to the standard method assuming the stack trace was not intended to be included in these calls.

I opted to removing the stack trace from the calls that explicitly included it in favour of proper error wrapping and propagation. I haven't needed to look at stack traces so far, but I'm not sure if others rely on it. This can be re-added later if needed.
  • Loading branch information
mrahs authored May 6, 2024
1 parent e64dd90 commit b95d279
Show file tree
Hide file tree
Showing 23 changed files with 69 additions and 178 deletions.
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ require (
github.com/google/go-cmp v0.5.9
github.com/google/uuid v1.3.0
github.com/klauspost/compress v1.17.8
github.com/pkg/errors v0.9.1
github.com/pkg/xattr v0.4.4
golang.org/x/oauth2 v0.10.0
golang.org/x/sync v0.6.0
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/xattr v0.4.4 h1:FSoblPdYobYoKCItkqASqcrKCxRn9Bgurz0sCBwzO5g=
github.com/pkg/xattr v0.4.4/go.mod h1:sBD3RAqlr8Q+RC3FutZcikpT8nyDrIEEBw2J744gVWs=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
Expand Down
2 changes: 0 additions & 2 deletions go/pkg/cas/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ go_library(
"@com_github_golang_glog//:go_default_library",
"@com_github_google_uuid//:uuid",
"@com_github_klauspost_compress//zstd:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@go_googleapis//google/bytestream:bytestream_go_proto",
"@org_golang_google_api//support/bundler:go_default_library",
"@org_golang_google_grpc//:go_default_library",
Expand All @@ -42,7 +41,6 @@ go_test(
"//go/pkg/fakes",
"@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto",
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
Expand Down
15 changes: 7 additions & 8 deletions go/pkg/cas/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (
"time"

// Redundant imports are required for the google3 mirror. Aliases should not be changed.
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
bsgrpc "google.golang.org/genproto/googleapis/bytestream"
"google.golang.org/grpc"
Expand Down Expand Up @@ -209,13 +208,13 @@ func (c *ClientConfig) Validate() error {
}

if err := c.FindMissingBlobs.validate(); err != nil {
return errors.Wrap(err, "FindMissingBlobs")
return fmt.Errorf("FindMissingBlobs: %w", err)
}
if err := c.BatchUpdateBlobs.validate(); err != nil {
return errors.Wrap(err, "BatchUpdateBlobs")
return fmt.Errorf("BatchUpdateBlobs: %w", err)
}
if err := c.ByteStreamWrite.validate(); err != nil {
return errors.Wrap(err, "BatchUpdateBlobs")
return fmt.Errorf("BatchUpdateBlobs: %w", err)
}
return nil
}
Expand All @@ -242,7 +241,7 @@ func NewClient(ctx context.Context, conn grpc.ClientConnInterface, instanceName
func NewClientWithConfig(ctx context.Context, conn grpc.ClientConnInterface, instanceName string, config ClientConfig) (*Client, error) {
switch err := config.Validate(); {
case err != nil:
return nil, errors.Wrap(err, "invalid config")
return nil, fmt.Errorf("invalid config: %w", err)
case conn == nil:
return nil, fmt.Errorf("conn is unspecified")
case instanceName == "":
Expand All @@ -258,7 +257,7 @@ func NewClientWithConfig(ctx context.Context, conn grpc.ClientConnInterface, ins
}
if !client.Config.IgnoreCapabilities {
if err := client.checkCapabilities(ctx); err != nil {
return nil, errors.Wrapf(err, "checking capabilities")
return nil, fmt.Errorf("checking capabilities: %w", err)
}
}

Expand Down Expand Up @@ -315,11 +314,11 @@ func (c *Client) withRetries(ctx context.Context, f func(context.Context) error)
func (c *Client) checkCapabilities(ctx context.Context) error {
caps, err := regrpc.NewCapabilitiesClient(c.conn).GetCapabilities(ctx, &repb.GetCapabilitiesRequest{InstanceName: c.InstanceName})
if err != nil {
return errors.Wrapf(err, "GetCapabilities RPC")
return fmt.Errorf("GetCapabilities RPC: %w", err)
}

if err := digest.CheckCapabilities(caps); err != nil {
return errors.Wrapf(err, "digest function mismatch")
return fmt.Errorf("digest function mismatch: %w", err)
}

if c.Config.BatchUpdateBlobs.MaxSizeBytes > int(caps.CacheCapabilities.MaxBatchTotalSizeBytes) {
Expand Down
56 changes: 28 additions & 28 deletions go/pkg/cas/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ import (
"sync/atomic"
"time"

"errors"
log "github.com/golang/glog"
"github.com/google/uuid"
"github.com/klauspost/compress/zstd"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"google.golang.org/api/support/bundler"
"google.golang.org/grpc/status"
Expand Down Expand Up @@ -102,7 +102,7 @@ type UploadInput struct {
// If the file is a danging symlink, then its digest is unknown.
func (in *UploadInput) Digest(relPath string) (digest.Digest, error) {
if in.cleanPath == "" {
return digest.Digest{}, errors.Errorf("Digest called too soon")
return digest.Digest{}, errors.New("Digest called too soon")
}

relPath = filepath.Clean(relPath)
Expand All @@ -118,15 +118,15 @@ func (in *UploadInput) Digest(relPath string) (digest.Digest, error) {
// TODO(nodir): cache this syscall, perhaps using filemetadata package.
info, err := os.Lstat(absPath)
if err != nil {
return digest.Digest{}, errors.WithStack(err)
return digest.Digest{}, fmt.Errorf("failed to digest %q: %w", relPath, err)
}

key := makeFSCacheKey(absPath, info.Mode().IsRegular(), in.Exclude)
switch val, err, loaded := in.u.fsCache.Load(key); {
case !loaded:
return digest.Digest{}, errors.Wrapf(ErrDigestUnknown, "digest not found for %#v", absPath)
return digest.Digest{}, fmt.Errorf("digest not found for %#v: %w", absPath, ErrDigestUnknown)
case err != nil:
return digest.Digest{}, errors.WithStack(err)
return digest.Digest{}, fmt.Errorf("lookup failed for %q: %w", relPath, err)
default:
return digest.NewFromProtoUnvalidated(val.(*digested).digest), nil
}
Expand Down Expand Up @@ -155,14 +155,14 @@ func (in *UploadInput) init(u *uploader) error {
in.u = u

if !filepath.IsAbs(in.Path) {
return errors.Errorf("%q is not absolute", in.Path)
return fmt.Errorf("%q is not absolute", in.Path)
}
in.cleanPath = filepath.Clean(in.Path)

// Do not use os.Stat() here. We want to know if it is a symlink.
var err error
if in.pathInfo, err = os.Lstat(in.cleanPath); err != nil {
return errors.WithStack(err)
return fmt.Errorf("failed to initialize: %w", err)
}

// Process the allowlist.
Expand All @@ -172,18 +172,18 @@ func (in *UploadInput) init(u *uploader) error {
in.cleanAllowlist = oneDot

case in.pathInfo.Mode().IsRegular():
return errors.Errorf("the Allowlist is not supported for regular files")
return errors.New("the Allowlist is not supported for regular files")

default:
in.cleanAllowlist = make([]string, len(in.Allowlist))
for i, subPath := range in.Allowlist {
if filepath.IsAbs(subPath) {
return errors.Errorf("the allowlisted path %q is not relative", subPath)
return fmt.Errorf("the allowlisted path %q is not relative", subPath)
}

cleanSubPath := filepath.Clean(subPath)
if cleanSubPath == ".." || strings.HasPrefix(cleanSubPath, parentDirPrefix) {
return errors.Errorf("the allowlisted path %q is not contained by %q", subPath, in.Path)
return fmt.Errorf("the allowlisted path %q is not contained by %q", subPath, in.Path)
}
in.cleanAllowlist[i] = cleanSubPath
}
Expand Down Expand Up @@ -392,7 +392,7 @@ func (c *Client) Upload(ctx context.Context, opt UploadOptions, inputC <-chan *U
}
})

return &UploadResult{Stats: u.stats, u: u}, errors.WithStack(eg.Wait())
return &UploadResult{Stats: u.stats, u: u}, eg.Wait()
}

// uploader implements a concurrent multi-stage pipeline to read blobs from the
Expand Down Expand Up @@ -433,11 +433,11 @@ type uploader struct {
// startProcessing adds the item to the appropriate stage depending on its type.
func (u *uploader) startProcessing(ctx context.Context, in *UploadInput) error {
if !filepath.IsAbs(in.Path) {
return errors.Errorf("%q is not absolute", in.Path)
return fmt.Errorf("%q is not absolute", in.Path)
}

if err := in.init(u); err != nil {
return errors.WithStack(err)
return err
}

// Schedule a file system walk.
Expand All @@ -463,13 +463,13 @@ func (u *uploader) startProcessing(ctx context.Context, in *UploadInput) error {
var err error
// TODO(nodir): cache this syscall too.
if info, err = os.Lstat(absPath); err != nil {
return errors.WithStack(err)
return fmt.Errorf("failed to access %q: %w", absPath, err)
}
}

switch dig, err := u.visitPath(ctx, absPath, info, in.Exclude); {
case err != nil:
return errors.Wrapf(err, "%q", absPath)
return fmt.Errorf("%q: %w", absPath, err)
case dig != nil:
treeMu.Lock()
in.tree[relPath] = dig
Expand All @@ -479,7 +479,7 @@ func (u *uploader) startProcessing(ctx context.Context, in *UploadInput) error {
})
}
if err := localEg.Wait(); err != nil {
return errors.WithStack(err)
return err
}
log.Infof("done localEg %s", in.Path)
// At this point, all allowlisted paths are digest'ed, and we only need to
Expand Down Expand Up @@ -596,7 +596,7 @@ func (u *uploader) visitRegularFile(ctx context.Context, absPath string, info os
if isLarge {
// Read only a few large files at a time.
if err := u.semLargeFile.Acquire(ctx, 1); err != nil {
return nil, errors.WithStack(err)
return nil, err
}
defer u.semLargeFile.Release(1)
}
Expand Down Expand Up @@ -640,7 +640,7 @@ func (u *uploader) visitRegularFile(ctx context.Context, absPath string, info os
dig, err := digest.NewFromReader(f)
region.End()
if err != nil {
return nil, errors.Wrapf(err, "failed to compute hash")
return nil, fmt.Errorf("failed to compute hash: %w", err)
}
log.Infof("compute digest %s: %s", info.Name(), time.Since(now))
ret.Digest = dig.ToProto()
Expand All @@ -658,7 +658,7 @@ func (u *uploader) visitRegularFile(ctx context.Context, absPath string, info os
// https://github.com/bazelbuild/remote-apis/blob/0cd22f7b466ced15d7803e8845d08d3e8d2c51bc/build/bazel/remote/execution/v2/remote_execution.proto#L250-L254

if res, err := u.findMissingBlobs(ctx, []*uploadItem{item}); err != nil {
return nil, errors.Wrapf(err, "failed to check existence")
return nil, fmt.Errorf("failed to check existence: %w", err)
} else if len(res.MissingBlobDigests) == 0 {
log.Infof("the file already exists. do not upload %s", absPath)
atomic.AddInt64(&u.stats.CacheHits.Digests, 1)
Expand Down Expand Up @@ -756,7 +756,7 @@ func (u *uploader) visitDir(ctx context.Context, absPath string, pathExclude *re

wgChildren.Wait()
if subErr != nil {
return nil, errors.Wrapf(subErr, "failed to read the directory %q entirely", absPath)
return nil, fmt.Errorf("failed to read the directory %q entirely: %w", absPath, subErr)
}

item := uploadItemFromDirMsg(absPath, dir)
Expand All @@ -778,7 +778,7 @@ func (u *uploader) visitDir(ctx context.Context, absPath string, pathExclude *re
func (u *uploader) visitSymlink(ctx context.Context, absPath string, pathExclude *regexp.Regexp) (*digested, error) {
target, err := os.Readlink(absPath)
if err != nil {
return nil, errors.Wrapf(err, "os.ReadLink")
return nil, fmt.Errorf("os.ReadLink: %w", err)
}

// Determine absolute and relative paths of the target.
Expand Down Expand Up @@ -809,7 +809,7 @@ func (u *uploader) visitSymlink(ctx context.Context, absPath string, pathExclude
// Need to check symlink if AllowDanglingSymlinks is not set.
targetInfo, err := os.Lstat(absTarget)
if err != nil {
return nil, errors.Wrapf(err, "lstat to target of symlink (%s -> %s) has error", absPath, relTarget)
return nil, fmt.Errorf("lstat to target of symlink (%s -> %s) has error: %w", absPath, relTarget, err)
}

// TODO: detect cycles by symlink if needs to follow symlinks in this case.
Expand Down Expand Up @@ -853,7 +853,7 @@ func (u *uploader) scheduleCheck(ctx context.Context, item *uploadItem) error {

func (u *uploader) findMissingBlobs(ctx context.Context, items []*uploadItem) (res *repb.FindMissingBlobsResponse, err error) {
if err := u.semFindMissingBlobs.Acquire(ctx, 1); err != nil {
return nil, errors.WithStack(err)
return nil, err
}
defer u.semFindMissingBlobs.Release(1)

Expand Down Expand Up @@ -892,7 +892,7 @@ func (u *uploader) check(ctx context.Context, items []*uploadItem) error {
missingBytes += d.SizeBytes
item := byDigest[digest.NewFromProtoUnvalidated(d)]
if err := u.scheduleUpload(ctx, item); err != nil {
return errors.Wrapf(err, "%q", item.Title)
return fmt.Errorf("%q: %w", item.Title, err)
}
}
atomic.AddInt64(&u.stats.CacheMisses.Digests, int64(len(res.MissingBlobDigests)))
Expand All @@ -907,15 +907,15 @@ func (u *uploader) scheduleUpload(ctx context.Context, item *uploadItem) error {
if marshalledRequestSize(item.Digest) > int64(u.batchBundler.BundleByteLimit) {
// There is no way this blob can fit in a batch request.
u.eg.Go(func() error {
return errors.Wrap(u.stream(ctx, item, false), item.Title)
return fmt.Errorf("%q: %w", item.Title, u.stream(ctx, item, false))
})
return nil
}

// Since this blob is small enough, just read it entirely.
contents, err := item.ReadAll()
if err != nil {
return errors.Wrapf(err, "failed to read the item")
return fmt.Errorf("failed to read the item: %w", err)
}
req := &repb.BatchUpdateBlobsRequest_Request{Digest: item.Digest, Data: contents}
return u.batchBundler.AddWait(ctx, req, proto.Size(req))
Expand Down Expand Up @@ -1033,11 +1033,11 @@ func (u *uploader) stream(ctx context.Context, item *uploadItem, updateCacheStat
// here.
return nil
case err != nil:
return errors.Wrapf(err, "failed to read the file/blob")
return fmt.Errorf("failed to read the file/blob: %w", err)
}

if err := enc.Close(); err != nil {
return errors.Wrapf(err, "failed to close the zstd encoder")
return fmt.Errorf("failed to close the zstd encoder: %w", err)
}
return pw.Close()
})
Expand Down
2 changes: 1 addition & 1 deletion go/pkg/cas/upload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ import (
"testing"
"time"

"errors"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
Expand Down
1 change: 0 additions & 1 deletion go/pkg/chunker/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ go_library(
"//go/pkg/reader",
"//go/pkg/uploadinfo",
"@com_github_klauspost_compress//zstd:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

Expand Down
4 changes: 2 additions & 2 deletions go/pkg/chunker/chunker.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ import (
"fmt"
"io"

"errors"
"github.com/klauspost/compress/zstd"
"github.com/pkg/errors"

"github.com/bazelbuild/remote-apis-sdks/go/pkg/reader"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo"
Expand Down Expand Up @@ -110,7 +110,7 @@ func (c *Chunker) ChunkSize() int {
func (c *Chunker) Reset() error {
if c.r != nil {
if err := c.r.SeekOffset(0); err != nil {
return errors.Wrapf(err, "failed to call SeekOffset(0) for %s", c.ue.Path)
return fmt.Errorf("failed to call SeekOffset(0) for %s: %w", c.ue.Path, err)
}
}
c.offset = 0
Expand Down
2 changes: 0 additions & 2 deletions go/pkg/client/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ go_library(
"@com_github_golang_glog//:go_default_library",
"@com_github_google_uuid//:uuid",
"@com_github_klauspost_compress//zstd:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@go_googleapis//google/bytestream:bytestream_go_proto",
"@go_googleapis//google/longrunning:longrunning_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
Expand Down Expand Up @@ -79,7 +78,6 @@ go_test(
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
"@com_github_klauspost_compress//zstd:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@go_googleapis//google/bytestream:bytestream_go_proto",
"@go_googleapis//google/longrunning:longrunning_go_proto",
"@go_googleapis//google/rpc:status_go_proto",
Expand Down
5 changes: 2 additions & 3 deletions go/pkg/client/bytestream.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"os"

log "github.com/golang/glog"
"github.com/pkg/errors"
bspb "google.golang.org/genproto/googleapis/bytestream"

"github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker"
Expand All @@ -34,7 +33,7 @@ func (c *Client) WriteBytesAtRemoteOffset(ctx context.Context, name string, data
ue := uploadinfo.EntryFromBlob(data)
ch, err := chunker.New(ue, false, int(c.ChunkMaxSize))
if err != nil {
return 0, errors.Wrap(err, "failed to create a chunk")
return 0, fmt.Errorf("failed to create a chunk: %w", err)
}
writtenBytes, err := c.writeChunked(ctx, name, ch, doNotFinalize, initialOffset)
if err != nil {
Expand All @@ -49,7 +48,7 @@ func (c *Client) writeChunked(ctx context.Context, name string, ch *chunker.Chun
closure := func() error {
// Retry by starting the stream from the beginning.
if err := ch.Reset(); err != nil {
return errors.Wrap(err, "failed to Reset")
return fmt.Errorf("failed to Reset: %w", err)
}
totalBytes = int64(0)
// TODO(olaola): implement resumable uploads. initialOffset passed in allows to
Expand Down
Loading

0 comments on commit b95d279

Please sign in to comment.