From b95d27956eeb6a5a395ecae85312baca048a7bce Mon Sep 17 00:00:00 2001 From: "Anas H. Sulaiman" Date: Mon, 6 May 2024 12:43:01 -0400 Subject: [PATCH] remove unmaintained errors pkg (#552) The errors package was deprecated in 2021 https://github.com/pkg/errors I converted wrapped errors to the standard method assuming the stack trace was not intended to be included in these calls. I opted to removing the stack trace from the calls that explicitly included it in favour of proper error wrapping and propagation. I haven't needed to look at stack traces so far, but I'm not sure if others rely on it. This can be re-added later if needed. --- go.mod | 1 - go.sum | 2 - go/pkg/cas/BUILD.bazel | 2 - go/pkg/cas/client.go | 15 +++--- go/pkg/cas/upload.go | 56 +++++++++++----------- go/pkg/cas/upload_test.go | 2 +- go/pkg/chunker/BUILD.bazel | 1 - go/pkg/chunker/chunker.go | 4 +- go/pkg/client/BUILD.bazel | 2 - go/pkg/client/bytestream.go | 5 +- go/pkg/client/bytestream_test.go | 3 +- go/pkg/client/capabilities.go | 5 +- go/pkg/client/cas_upload.go | 3 +- go/pkg/client/client.go | 6 +-- go/pkg/client/exec.go | 20 ++++---- go/pkg/client/tree.go | 6 +-- go/pkg/errors/BUILD.bazel | 8 ---- go/pkg/errors/errors.go | 81 -------------------------------- go/pkg/retry/BUILD.bazel | 1 - go/pkg/retry/retry.go | 7 ++- go/pkg/tool/BUILD.bazel | 1 - go/pkg/tool/tool.go | 10 ++-- go_deps.bzl | 6 --- 23 files changed, 69 insertions(+), 178 deletions(-) delete mode 100644 go/pkg/errors/BUILD.bazel delete mode 100644 go/pkg/errors/errors.go diff --git a/go.mod b/go.mod index 8403fd38c..ad7865bee 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/klauspost/compress v1.17.8 - github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.4 golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.6.0 diff --git a/go.sum b/go.sum index 454aef1b4..377acdaf7 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,6 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/xattr v0.4.4 h1:FSoblPdYobYoKCItkqASqcrKCxRn9Bgurz0sCBwzO5g= github.com/pkg/xattr v0.4.4/go.mod h1:sBD3RAqlr8Q+RC3FutZcikpT8nyDrIEEBw2J744gVWs= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/go/pkg/cas/BUILD.bazel b/go/pkg/cas/BUILD.bazel index ef1ea73ac..5cf98bbc4 100644 --- a/go/pkg/cas/BUILD.bazel +++ b/go/pkg/cas/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "@com_github_golang_glog//:go_default_library", "@com_github_google_uuid//:uuid", "@com_github_klauspost_compress//zstd:go_default_library", - "@com_github_pkg_errors//:go_default_library", "@go_googleapis//google/bytestream:bytestream_go_proto", "@org_golang_google_api//support/bundler:go_default_library", "@org_golang_google_grpc//:go_default_library", @@ -42,7 +41,6 @@ go_test( "//go/pkg/fakes", "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", "@com_github_google_go_cmp//cmp:go_default_library", - "@com_github_pkg_errors//:go_default_library", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes:go_default_library", "@org_golang_google_grpc//status:go_default_library", diff --git a/go/pkg/cas/client.go b/go/pkg/cas/client.go index 487297e72..b1fbc77b0 100644 --- a/go/pkg/cas/client.go +++ b/go/pkg/cas/client.go @@ -10,7 +10,6 @@ import ( "time" // Redundant imports are required for the google3 mirror. Aliases should not be changed. - "github.com/pkg/errors" "golang.org/x/sync/semaphore" bsgrpc "google.golang.org/genproto/googleapis/bytestream" "google.golang.org/grpc" @@ -209,13 +208,13 @@ func (c *ClientConfig) Validate() error { } if err := c.FindMissingBlobs.validate(); err != nil { - return errors.Wrap(err, "FindMissingBlobs") + return fmt.Errorf("FindMissingBlobs: %w", err) } if err := c.BatchUpdateBlobs.validate(); err != nil { - return errors.Wrap(err, "BatchUpdateBlobs") + return fmt.Errorf("BatchUpdateBlobs: %w", err) } if err := c.ByteStreamWrite.validate(); err != nil { - return errors.Wrap(err, "BatchUpdateBlobs") + return fmt.Errorf("BatchUpdateBlobs: %w", err) } return nil } @@ -242,7 +241,7 @@ func NewClient(ctx context.Context, conn grpc.ClientConnInterface, instanceName func NewClientWithConfig(ctx context.Context, conn grpc.ClientConnInterface, instanceName string, config ClientConfig) (*Client, error) { switch err := config.Validate(); { case err != nil: - return nil, errors.Wrap(err, "invalid config") + return nil, fmt.Errorf("invalid config: %w", err) case conn == nil: return nil, fmt.Errorf("conn is unspecified") case instanceName == "": @@ -258,7 +257,7 @@ func NewClientWithConfig(ctx context.Context, conn grpc.ClientConnInterface, ins } if !client.Config.IgnoreCapabilities { if err := client.checkCapabilities(ctx); err != nil { - return nil, errors.Wrapf(err, "checking capabilities") + return nil, fmt.Errorf("checking capabilities: %w", err) } } @@ -315,11 +314,11 @@ func (c *Client) withRetries(ctx context.Context, f func(context.Context) error) func (c *Client) checkCapabilities(ctx context.Context) error { caps, err := regrpc.NewCapabilitiesClient(c.conn).GetCapabilities(ctx, &repb.GetCapabilitiesRequest{InstanceName: c.InstanceName}) if err != nil { - return errors.Wrapf(err, "GetCapabilities RPC") + return fmt.Errorf("GetCapabilities RPC: %w", err) } if err := digest.CheckCapabilities(caps); err != nil { - return errors.Wrapf(err, "digest function mismatch") + return fmt.Errorf("digest function mismatch: %w", err) } if c.Config.BatchUpdateBlobs.MaxSizeBytes > int(caps.CacheCapabilities.MaxBatchTotalSizeBytes) { diff --git a/go/pkg/cas/upload.go b/go/pkg/cas/upload.go index 0ce88c7b5..101b4cf06 100644 --- a/go/pkg/cas/upload.go +++ b/go/pkg/cas/upload.go @@ -14,10 +14,10 @@ import ( "sync/atomic" "time" + "errors" log "github.com/golang/glog" "github.com/google/uuid" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" "google.golang.org/api/support/bundler" "google.golang.org/grpc/status" @@ -102,7 +102,7 @@ type UploadInput struct { // If the file is a danging symlink, then its digest is unknown. func (in *UploadInput) Digest(relPath string) (digest.Digest, error) { if in.cleanPath == "" { - return digest.Digest{}, errors.Errorf("Digest called too soon") + return digest.Digest{}, errors.New("Digest called too soon") } relPath = filepath.Clean(relPath) @@ -118,15 +118,15 @@ func (in *UploadInput) Digest(relPath string) (digest.Digest, error) { // TODO(nodir): cache this syscall, perhaps using filemetadata package. info, err := os.Lstat(absPath) if err != nil { - return digest.Digest{}, errors.WithStack(err) + return digest.Digest{}, fmt.Errorf("failed to digest %q: %w", relPath, err) } key := makeFSCacheKey(absPath, info.Mode().IsRegular(), in.Exclude) switch val, err, loaded := in.u.fsCache.Load(key); { case !loaded: - return digest.Digest{}, errors.Wrapf(ErrDigestUnknown, "digest not found for %#v", absPath) + return digest.Digest{}, fmt.Errorf("digest not found for %#v: %w", absPath, ErrDigestUnknown) case err != nil: - return digest.Digest{}, errors.WithStack(err) + return digest.Digest{}, fmt.Errorf("lookup failed for %q: %w", relPath, err) default: return digest.NewFromProtoUnvalidated(val.(*digested).digest), nil } @@ -155,14 +155,14 @@ func (in *UploadInput) init(u *uploader) error { in.u = u if !filepath.IsAbs(in.Path) { - return errors.Errorf("%q is not absolute", in.Path) + return fmt.Errorf("%q is not absolute", in.Path) } in.cleanPath = filepath.Clean(in.Path) // Do not use os.Stat() here. We want to know if it is a symlink. var err error if in.pathInfo, err = os.Lstat(in.cleanPath); err != nil { - return errors.WithStack(err) + return fmt.Errorf("failed to initialize: %w", err) } // Process the allowlist. @@ -172,18 +172,18 @@ func (in *UploadInput) init(u *uploader) error { in.cleanAllowlist = oneDot case in.pathInfo.Mode().IsRegular(): - return errors.Errorf("the Allowlist is not supported for regular files") + return errors.New("the Allowlist is not supported for regular files") default: in.cleanAllowlist = make([]string, len(in.Allowlist)) for i, subPath := range in.Allowlist { if filepath.IsAbs(subPath) { - return errors.Errorf("the allowlisted path %q is not relative", subPath) + return fmt.Errorf("the allowlisted path %q is not relative", subPath) } cleanSubPath := filepath.Clean(subPath) if cleanSubPath == ".." || strings.HasPrefix(cleanSubPath, parentDirPrefix) { - return errors.Errorf("the allowlisted path %q is not contained by %q", subPath, in.Path) + return fmt.Errorf("the allowlisted path %q is not contained by %q", subPath, in.Path) } in.cleanAllowlist[i] = cleanSubPath } @@ -392,7 +392,7 @@ func (c *Client) Upload(ctx context.Context, opt UploadOptions, inputC <-chan *U } }) - return &UploadResult{Stats: u.stats, u: u}, errors.WithStack(eg.Wait()) + return &UploadResult{Stats: u.stats, u: u}, eg.Wait() } // uploader implements a concurrent multi-stage pipeline to read blobs from the @@ -433,11 +433,11 @@ type uploader struct { // startProcessing adds the item to the appropriate stage depending on its type. func (u *uploader) startProcessing(ctx context.Context, in *UploadInput) error { if !filepath.IsAbs(in.Path) { - return errors.Errorf("%q is not absolute", in.Path) + return fmt.Errorf("%q is not absolute", in.Path) } if err := in.init(u); err != nil { - return errors.WithStack(err) + return err } // Schedule a file system walk. @@ -463,13 +463,13 @@ func (u *uploader) startProcessing(ctx context.Context, in *UploadInput) error { var err error // TODO(nodir): cache this syscall too. if info, err = os.Lstat(absPath); err != nil { - return errors.WithStack(err) + return fmt.Errorf("failed to access %q: %w", absPath, err) } } switch dig, err := u.visitPath(ctx, absPath, info, in.Exclude); { case err != nil: - return errors.Wrapf(err, "%q", absPath) + return fmt.Errorf("%q: %w", absPath, err) case dig != nil: treeMu.Lock() in.tree[relPath] = dig @@ -479,7 +479,7 @@ func (u *uploader) startProcessing(ctx context.Context, in *UploadInput) error { }) } if err := localEg.Wait(); err != nil { - return errors.WithStack(err) + return err } log.Infof("done localEg %s", in.Path) // At this point, all allowlisted paths are digest'ed, and we only need to @@ -596,7 +596,7 @@ func (u *uploader) visitRegularFile(ctx context.Context, absPath string, info os if isLarge { // Read only a few large files at a time. if err := u.semLargeFile.Acquire(ctx, 1); err != nil { - return nil, errors.WithStack(err) + return nil, err } defer u.semLargeFile.Release(1) } @@ -640,7 +640,7 @@ func (u *uploader) visitRegularFile(ctx context.Context, absPath string, info os dig, err := digest.NewFromReader(f) region.End() if err != nil { - return nil, errors.Wrapf(err, "failed to compute hash") + return nil, fmt.Errorf("failed to compute hash: %w", err) } log.Infof("compute digest %s: %s", info.Name(), time.Since(now)) ret.Digest = dig.ToProto() @@ -658,7 +658,7 @@ func (u *uploader) visitRegularFile(ctx context.Context, absPath string, info os // https://github.com/bazelbuild/remote-apis/blob/0cd22f7b466ced15d7803e8845d08d3e8d2c51bc/build/bazel/remote/execution/v2/remote_execution.proto#L250-L254 if res, err := u.findMissingBlobs(ctx, []*uploadItem{item}); err != nil { - return nil, errors.Wrapf(err, "failed to check existence") + return nil, fmt.Errorf("failed to check existence: %w", err) } else if len(res.MissingBlobDigests) == 0 { log.Infof("the file already exists. do not upload %s", absPath) atomic.AddInt64(&u.stats.CacheHits.Digests, 1) @@ -756,7 +756,7 @@ func (u *uploader) visitDir(ctx context.Context, absPath string, pathExclude *re wgChildren.Wait() if subErr != nil { - return nil, errors.Wrapf(subErr, "failed to read the directory %q entirely", absPath) + return nil, fmt.Errorf("failed to read the directory %q entirely: %w", absPath, subErr) } item := uploadItemFromDirMsg(absPath, dir) @@ -778,7 +778,7 @@ func (u *uploader) visitDir(ctx context.Context, absPath string, pathExclude *re func (u *uploader) visitSymlink(ctx context.Context, absPath string, pathExclude *regexp.Regexp) (*digested, error) { target, err := os.Readlink(absPath) if err != nil { - return nil, errors.Wrapf(err, "os.ReadLink") + return nil, fmt.Errorf("os.ReadLink: %w", err) } // Determine absolute and relative paths of the target. @@ -809,7 +809,7 @@ func (u *uploader) visitSymlink(ctx context.Context, absPath string, pathExclude // Need to check symlink if AllowDanglingSymlinks is not set. targetInfo, err := os.Lstat(absTarget) if err != nil { - return nil, errors.Wrapf(err, "lstat to target of symlink (%s -> %s) has error", absPath, relTarget) + return nil, fmt.Errorf("lstat to target of symlink (%s -> %s) has error: %w", absPath, relTarget, err) } // TODO: detect cycles by symlink if needs to follow symlinks in this case. @@ -853,7 +853,7 @@ func (u *uploader) scheduleCheck(ctx context.Context, item *uploadItem) error { func (u *uploader) findMissingBlobs(ctx context.Context, items []*uploadItem) (res *repb.FindMissingBlobsResponse, err error) { if err := u.semFindMissingBlobs.Acquire(ctx, 1); err != nil { - return nil, errors.WithStack(err) + return nil, err } defer u.semFindMissingBlobs.Release(1) @@ -892,7 +892,7 @@ func (u *uploader) check(ctx context.Context, items []*uploadItem) error { missingBytes += d.SizeBytes item := byDigest[digest.NewFromProtoUnvalidated(d)] if err := u.scheduleUpload(ctx, item); err != nil { - return errors.Wrapf(err, "%q", item.Title) + return fmt.Errorf("%q: %w", item.Title, err) } } atomic.AddInt64(&u.stats.CacheMisses.Digests, int64(len(res.MissingBlobDigests))) @@ -907,7 +907,7 @@ func (u *uploader) scheduleUpload(ctx context.Context, item *uploadItem) error { if marshalledRequestSize(item.Digest) > int64(u.batchBundler.BundleByteLimit) { // There is no way this blob can fit in a batch request. u.eg.Go(func() error { - return errors.Wrap(u.stream(ctx, item, false), item.Title) + return fmt.Errorf("%q: %w", item.Title, u.stream(ctx, item, false)) }) return nil } @@ -915,7 +915,7 @@ func (u *uploader) scheduleUpload(ctx context.Context, item *uploadItem) error { // Since this blob is small enough, just read it entirely. contents, err := item.ReadAll() if err != nil { - return errors.Wrapf(err, "failed to read the item") + return fmt.Errorf("failed to read the item: %w", err) } req := &repb.BatchUpdateBlobsRequest_Request{Digest: item.Digest, Data: contents} return u.batchBundler.AddWait(ctx, req, proto.Size(req)) @@ -1033,11 +1033,11 @@ func (u *uploader) stream(ctx context.Context, item *uploadItem, updateCacheStat // here. return nil case err != nil: - return errors.Wrapf(err, "failed to read the file/blob") + return fmt.Errorf("failed to read the file/blob: %w", err) } if err := enc.Close(); err != nil { - return errors.Wrapf(err, "failed to close the zstd encoder") + return fmt.Errorf("failed to close the zstd encoder: %w", err) } return pw.Close() }) diff --git a/go/pkg/cas/upload_test.go b/go/pkg/cas/upload_test.go index e21367301..597c9802c 100644 --- a/go/pkg/cas/upload_test.go +++ b/go/pkg/cas/upload_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" + "errors" "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" diff --git a/go/pkg/chunker/BUILD.bazel b/go/pkg/chunker/BUILD.bazel index 69f28760e..bf6cae74f 100644 --- a/go/pkg/chunker/BUILD.bazel +++ b/go/pkg/chunker/BUILD.bazel @@ -9,7 +9,6 @@ go_library( "//go/pkg/reader", "//go/pkg/uploadinfo", "@com_github_klauspost_compress//zstd:go_default_library", - "@com_github_pkg_errors//:go_default_library", ], ) diff --git a/go/pkg/chunker/chunker.go b/go/pkg/chunker/chunker.go index f55566aac..213cd5564 100644 --- a/go/pkg/chunker/chunker.go +++ b/go/pkg/chunker/chunker.go @@ -5,8 +5,8 @@ import ( "fmt" "io" + "errors" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "github.com/bazelbuild/remote-apis-sdks/go/pkg/reader" "github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo" @@ -110,7 +110,7 @@ func (c *Chunker) ChunkSize() int { func (c *Chunker) Reset() error { if c.r != nil { if err := c.r.SeekOffset(0); err != nil { - return errors.Wrapf(err, "failed to call SeekOffset(0) for %s", c.ue.Path) + return fmt.Errorf("failed to call SeekOffset(0) for %s: %w", c.ue.Path, err) } } c.offset = 0 diff --git a/go/pkg/client/BUILD.bazel b/go/pkg/client/BUILD.bazel index b555e58fd..3d7819256 100644 --- a/go/pkg/client/BUILD.bazel +++ b/go/pkg/client/BUILD.bazel @@ -31,7 +31,6 @@ go_library( "@com_github_golang_glog//:go_default_library", "@com_github_google_uuid//:uuid", "@com_github_klauspost_compress//zstd:go_default_library", - "@com_github_pkg_errors//:go_default_library", "@go_googleapis//google/bytestream:bytestream_go_proto", "@go_googleapis//google/longrunning:longrunning_go_proto", "@go_googleapis//google/rpc:errdetails_go_proto", @@ -79,7 +78,6 @@ go_test( "@com_github_google_go_cmp//cmp:go_default_library", "@com_github_google_go_cmp//cmp/cmpopts:go_default_library", "@com_github_klauspost_compress//zstd:go_default_library", - "@com_github_pkg_errors//:go_default_library", "@go_googleapis//google/bytestream:bytestream_go_proto", "@go_googleapis//google/longrunning:longrunning_go_proto", "@go_googleapis//google/rpc:status_go_proto", diff --git a/go/pkg/client/bytestream.go b/go/pkg/client/bytestream.go index d84f4261a..0a1bffd97 100644 --- a/go/pkg/client/bytestream.go +++ b/go/pkg/client/bytestream.go @@ -8,7 +8,6 @@ import ( "os" log "github.com/golang/glog" - "github.com/pkg/errors" bspb "google.golang.org/genproto/googleapis/bytestream" "github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker" @@ -34,7 +33,7 @@ func (c *Client) WriteBytesAtRemoteOffset(ctx context.Context, name string, data ue := uploadinfo.EntryFromBlob(data) ch, err := chunker.New(ue, false, int(c.ChunkMaxSize)) if err != nil { - return 0, errors.Wrap(err, "failed to create a chunk") + return 0, fmt.Errorf("failed to create a chunk: %w", err) } writtenBytes, err := c.writeChunked(ctx, name, ch, doNotFinalize, initialOffset) if err != nil { @@ -49,7 +48,7 @@ func (c *Client) writeChunked(ctx context.Context, name string, ch *chunker.Chun closure := func() error { // Retry by starting the stream from the beginning. if err := ch.Reset(); err != nil { - return errors.Wrap(err, "failed to Reset") + return fmt.Errorf("failed to Reset: %w", err) } totalBytes = int64(0) // TODO(olaola): implement resumable uploads. initialOffset passed in allows to diff --git a/go/pkg/client/bytestream_test.go b/go/pkg/client/bytestream_test.go index 6fc44655d..de4d0d622 100644 --- a/go/pkg/client/bytestream_test.go +++ b/go/pkg/client/bytestream_test.go @@ -6,7 +6,6 @@ import ( "net" "testing" - "github.com/pkg/errors" "google.golang.org/grpc" // Redundant imports are required for the google3 mirror. Aliases should not be changed. @@ -232,7 +231,7 @@ func (b *ByteStream) Write(stream bsgrpc.ByteStream_WriteServer) error { defer stream.SendAndClose(&bspb.WriteResponse{}) req, err := stream.Recv() if err != nil { - return errors.Wrap(err, "failed to write") + return fmt.Errorf("failed to write: %w", err) } ls, ok := b.logStreams[req.GetResourceName()] diff --git a/go/pkg/client/capabilities.go b/go/pkg/client/capabilities.go index 48fa2005a..45f9326a8 100644 --- a/go/pkg/client/capabilities.go +++ b/go/pkg/client/capabilities.go @@ -2,9 +2,10 @@ package client import ( "context" + "fmt" + "errors" "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest" - "github.com/pkg/errors" repb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" ) @@ -22,7 +23,7 @@ func (c *Client) CheckCapabilities(ctx context.Context) (err error) { } if err := digest.CheckCapabilities(c.serverCaps); err != nil { - return errors.Wrapf(err, "digest function mismatch") + return fmt.Errorf("digest function mismatch: %w", err) } if c.serverCaps.CacheCapabilities != nil { diff --git a/go/pkg/client/cas_upload.go b/go/pkg/client/cas_upload.go index cb6b0d94d..4833f4e44 100644 --- a/go/pkg/client/cas_upload.go +++ b/go/pkg/client/cas_upload.go @@ -16,7 +16,6 @@ import ( log "github.com/golang/glog" "github.com/google/uuid" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -568,7 +567,7 @@ func (c *Client) uploadNonUnified(ctx context.Context, data ...*uploadinfo.Entry } if dg.Size != int64(len(data)) { - return errors.Errorf("blob size changed while uploading, given:%d now:%d for %s", dg.Size, int64(len(data)), ue.Path) + return fmt.Errorf("blob size changed while uploading, given:%d now:%d for %s", dg.Size, int64(len(data)), ue.Path) } bchMap[dg] = data diff --git a/go/pkg/client/client.go b/go/pkg/client/client.go index 92877e7e1..f99f25858 100644 --- a/go/pkg/client/client.go +++ b/go/pkg/client/client.go @@ -14,13 +14,13 @@ import ( "sync" "time" + "errors" "github.com/bazelbuild/remote-apis-sdks/go/pkg/actas" "github.com/bazelbuild/remote-apis-sdks/go/pkg/balancer" "github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker" "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest" "github.com/bazelbuild/remote-apis-sdks/go/pkg/retry" "github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo" - "github.com/pkg/errors" "golang.org/x/oauth2" "golang.org/x/sync/semaphore" "google.golang.org/grpc" @@ -1140,7 +1140,7 @@ func (c *Client) DeleteOperation(ctx context.Context, req *oppb.DeleteOperationR // https://github.com/grpc/grpc-go/issues/3115 func statusWrap(err error) error { if st, ok := status.FromError(err); ok { - return status.Errorf(st.Code(), errors.WithStack(err).Error()) + return status.Errorf(st.Code(), err.Error()) } - return errors.WithStack(err) + return err } diff --git a/go/pkg/client/exec.go b/go/pkg/client/exec.go index 679b734ce..ce0f51fa4 100644 --- a/go/pkg/client/exec.go +++ b/go/pkg/client/exec.go @@ -3,6 +3,7 @@ package client import ( "context" "errors" + "fmt" "io" "sort" "time" @@ -16,7 +17,6 @@ import ( // Redundant imports are required for the google3 mirror. Aliases should not be changed. regrpc "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" repb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - gerrors "github.com/pkg/errors" oppb "google.golang.org/genproto/googleapis/longrunning" dpb "google.golang.org/protobuf/types/known/durationpb" ) @@ -85,13 +85,13 @@ func (c *Client) ExecuteAction(ctx context.Context, ac *Action) (*repb.ActionRes // Upload any remaining inputs. if err := c.WriteBlobs(ctx, ac.InputFiles); err != nil { - return nil, gerrors.WithMessage(err, "uploading input files to the CAS") + return nil, fmt.Errorf("uploading input files to the CAS: %w", err) } log.V(1).Info("Executing job") res, err = c.executeJob(ctx, ac.SkipCache, acDg) if err != nil { - return res, gerrors.WithMessage(err, "executing an action") + return res, fmt.Errorf("executing an action: %w", err) } return res, nil @@ -109,7 +109,7 @@ func (c *Client) CheckActionCache(ctx context.Context, acDg *repb.Digest) (*repb case codes.NotFound: return nil, nil default: - return nil, gerrors.WithMessage(err, "checking the action cache") + return nil, fmt.Errorf("checking the action cache: %w", err) } } @@ -121,7 +121,7 @@ func (c *Client) executeJob(ctx context.Context, skipCache bool, acDg *repb.Dige } op, err := c.ExecuteAndWait(ctx, execReq) if err != nil { - return nil, gerrors.WithMessage(err, "execution error") + return nil, fmt.Errorf("execution error: %w", err) } switch r := op.Result.(type) { @@ -130,10 +130,10 @@ func (c *Client) executeJob(ctx context.Context, skipCache bool, acDg *repb.Dige case *oppb.Operation_Response: res := new(repb.ExecuteResponse) if err := r.Response.UnmarshalTo(res); err != nil { - return nil, gerrors.WithMessage(err, "extracting ExecuteResponse from execution operation") + return nil, fmt.Errorf("extracting ExecuteResponse from execution operation: %w", err) } if st := status.FromProto(res.Status); st.Code() != codes.OK { - return res.Result, gerrors.WithMessage(StatusDetailedError(st), "job failed with error") + return res.Result, fmt.Errorf("job failed with error: %w", StatusDetailedError(st)) } return res.Result, nil default: @@ -148,7 +148,7 @@ func (c *Client) executeJob(ctx context.Context, skipCache bool, acDg *repb.Dige func (c *Client) PrepAction(ctx context.Context, ac *Action) (*repb.Digest, *repb.ActionResult, error) { comDg, err := c.WriteProto(ctx, buildCommand(ac)) if err != nil { - return nil, nil, gerrors.WithMessage(err, "storing Command proto") + return nil, nil, fmt.Errorf("storing Command proto: %w", err) } reAc := &repb.Action{ @@ -164,7 +164,7 @@ func (c *Client) PrepAction(ctx context.Context, ac *Action) (*repb.Digest, *rep acBlob, err := proto.Marshal(reAc) if err != nil { - return nil, nil, gerrors.WithMessage(err, "marshalling Action proto") + return nil, nil, fmt.Errorf("marshalling Action proto: %w", err) } acDg := digest.NewFromBlob(acBlob).ToProto() @@ -182,7 +182,7 @@ func (c *Client) PrepAction(ctx context.Context, ac *Action) (*repb.Digest, *rep // No cache hit, or we didn't check. Upload the action instead. if _, err := c.WriteBlob(ctx, acBlob); err != nil { - return nil, nil, gerrors.WithMessage(err, "uploading action to the CAS") + return nil, nil, fmt.Errorf("uploading action to the CAS: %w", err) } return acDg, nil, nil diff --git a/go/pkg/client/tree.go b/go/pkg/client/tree.go index f8d921e9f..8e3a88e47 100644 --- a/go/pkg/client/tree.go +++ b/go/pkg/client/tree.go @@ -10,12 +10,12 @@ import ( "sort" "strings" + "errors" cpb "github.com/bazelbuild/remote-apis-sdks/go/api/command" "github.com/bazelbuild/remote-apis-sdks/go/pkg/command" "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest" "github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata" "github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo" - "github.com/pkg/errors" repb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" log "github.com/golang/glog" @@ -325,10 +325,10 @@ func loadFiles(execRoot, localWorkingDir, remoteWorkingDir string, excl []*comma // error unless materialization of symlinks pointing outside the // exec root is enabled. if !opts.MaterializeOutsideExecRoot { - return errors.Wrapf(err, "failed to determine the target of symlink %q as a child of %q", normPath, execRoot) + return fmt.Errorf("failed to determine the target of symlink %q as a child of %q: %w", normPath, execRoot, err) } if meta.Symlink.IsDangling { - return errors.Errorf("failed to materialize dangling symlink %q with target %q", normPath, meta.Symlink.Target) + return fmt.Errorf("failed to materialize dangling symlink %q with target %q", normPath, meta.Symlink.Target) } goto processNonSymlink } diff --git a/go/pkg/errors/BUILD.bazel b/go/pkg/errors/BUILD.bazel deleted file mode 100644 index 6364043bf..000000000 --- a/go/pkg/errors/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "errors", - srcs = ["errors.go"], - importpath = "github.com/bazelbuild/remote-apis-sdks/go/pkg/errors", - visibility = ["//visibility:public"], -) diff --git a/go/pkg/errors/errors.go b/go/pkg/errors/errors.go deleted file mode 100644 index 36d005e6d..000000000 --- a/go/pkg/errors/errors.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package errors provides the ability to wrap multiple errors while maintaining API compatibility with the standard package. -// This functionality is a drop-in replacement from go1.20. Remove this package once the SDK is updated to g1.20. -package errors - -import "errors" - -type joinError struct { - errs []error -} - -// Error joins the texts of the wrapped errors using newline as the delimiter. -func (e *joinError) Error() string { - var b []byte - for i, err := range e.errs { - if i > 0 { - b = append(b, '\n') - } - b = append(b, err.Error()...) - } - return string(b) -} - -// Join wraps the specified errors into one error that prints the entire list using newline as the delimiter. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - - if n == 0 { - return nil - } - - // Unlike go1.20, this allows for efficient and convenient wrapping of errors without additional guards. - // E.g. the following code returns err as is without any allocations if errClose is nil: - // errClose := f.Close(); err = errors.Join(errClose, err) - if n == 1 { - for _, err := range errs { - if err != nil { - return err - } - } - } - - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -// Is implements the corresponding interface allowing the joinError type to be compatible with -// errors.Is(error) bool of the standard package without having to override it. -func (e *joinError) Is(target error) bool { - if target == nil || e == nil { - return e == target - } - - for _, e := range e.errs { - if errors.Is(e, target) { - return true - } - } - return false -} - -// Is delegates to the standard package. -func Is(err, target error) bool { - return errors.Is(err, target) -} - -// New delegates to the standard package. -func New(text string) error { - return errors.New(text) -} diff --git a/go/pkg/retry/BUILD.bazel b/go/pkg/retry/BUILD.bazel index 30d290124..6c420e13e 100644 --- a/go/pkg/retry/BUILD.bazel +++ b/go/pkg/retry/BUILD.bazel @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "@com_github_golang_glog//:go_default_library", - "@com_github_pkg_errors//:go_default_library", "@org_golang_google_grpc//codes:go_default_library", "@org_golang_google_grpc//status:go_default_library", ], diff --git a/go/pkg/retry/retry.go b/go/pkg/retry/retry.go index f25222e8f..678ca78c3 100644 --- a/go/pkg/retry/retry.go +++ b/go/pkg/retry/retry.go @@ -7,14 +7,13 @@ package retry import ( "context" - stderrors "errors" + "errors" "fmt" "math/rand" "sync" "time" log "github.com/golang/glog" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -67,7 +66,7 @@ func Always(error) bool { return true } func TransientOnly(err error) bool { // Retry RPC timeouts. Note that we do *not* retry context cancellations (context.Cancelled); // if the user wants to back out of the call we should let them. - if stderrors.Is(err, context.DeadlineExceeded) { + if errors.Is(err, context.DeadlineExceeded) { return true } s, ok := status.FromError(err) @@ -113,7 +112,7 @@ func WithPolicy(ctx context.Context, shouldRetry ShouldRetry, bp BackoffPolicy, spb.Message = fmt.Sprintf("retry budget exhausted (%d attempts): ", bp.maxAttempts) + spb.Message return status.ErrorProto(spb) } - return errors.Wrapf(err, "retry budget exhausted (%d attempts)", bp.maxAttempts) + return fmt.Errorf("retry budget exhausted (%d attempts): %w", bp.maxAttempts, err) } select { diff --git a/go/pkg/tool/BUILD.bazel b/go/pkg/tool/BUILD.bazel index e6a2e06fe..2e98d3270 100644 --- a/go/pkg/tool/BUILD.bazel +++ b/go/pkg/tool/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "//go/pkg/uploadinfo", "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", "@com_github_golang_glog//:go_default_library", - "@com_github_pkg_errors//:go_default_library", "@org_golang_google_protobuf//encoding/prototext:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_x_sync//errgroup:go_default_library", diff --git a/go/pkg/tool/tool.go b/go/pkg/tool/tool.go index aa36c9290..604a0c71b 100644 --- a/go/pkg/tool/tool.go +++ b/go/pkg/tool/tool.go @@ -13,8 +13,8 @@ import ( "strings" "time" + "errors" log "github.com/golang/glog" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" @@ -289,7 +289,7 @@ func (c *Client) UploadBlob(ctx context.Context, path string) error { func (c *Client) UploadBlobV2(ctx context.Context, path string) error { casC, err := cas.NewClient(ctx, c.GrpcClient.Connection(), c.GrpcClient.InstanceName) if err != nil { - return errors.WithStack(err) + return err } inputC := make(chan *cas.UploadInput) @@ -305,10 +305,10 @@ func (c *Client) UploadBlobV2(ctx context.Context, path string) error { eg.Go(func() error { _, err := casC.Upload(ctx, cas.UploadOptions{}, inputC) - return errors.WithStack(err) + return err }) - return errors.WithStack(eg.Wait()) + return eg.Wait() } // DownloadDirectory downloads a an input root from the remote cache into the specified path. @@ -413,7 +413,7 @@ func (c *Client) DownloadAction(ctx context.Context, actionDigest, outputPath st input = strings.ToLower(input) if !(input == "yes" || input == "y") { - return errors.Errorf("operation aborted.") + return errors.New("operation aborted.") } } // If the user confirms, remove the existing directory and create a new one diff --git a/go_deps.bzl b/go_deps.bzl index 79d6c8407..417225f5b 100644 --- a/go_deps.bzl +++ b/go_deps.bzl @@ -121,12 +121,6 @@ def remote_apis_sdks_go_deps(): sum = "h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=", version = "v1.17.8", ) - go_repository( - name = "com_github_pkg_errors", - importpath = "github.com/pkg/errors", - sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", - version = "v0.9.1", - ) go_repository( name = "com_github_pkg_xattr", importpath = "github.com/pkg/xattr",