diff --git a/go.mod b/go.mod index 4664be5519f8..dc96a79af7ea 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/fatih/color v1.16.0 github.com/felixge/fgprof v0.9.4 github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c - github.com/fsouza/fake-gcs-server v1.47.7 + github.com/fsouza/fake-gcs-server v1.7.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 github.com/go-redis/redis/v8 v8.11.5 @@ -166,14 +166,12 @@ require ( github.com/go-ini/ini v1.67.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/goccy/go-json v0.10.3 // indirect - github.com/gorilla/handlers v1.5.2 // indirect github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/ncw/swift v1.0.53 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect - github.com/pkg/xattr v0.4.10 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rivo/uniseg v0.4.7 // indirect diff --git a/go.sum b/go.sum index 53733480e554..1e74b10a6933 100644 --- a/go.sum +++ b/go.sum @@ -645,8 +645,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fsouza/fake-gcs-server v1.47.7 h1:56/U4rKY081TaNbq0gHWi7/71UxC2KROqcnrD9BRJhs= -github.com/fsouza/fake-gcs-server v1.47.7/go.mod h1:4vPUynN8/zZlxk5Jpy6LvvTTxItdTAObK4DYnp89Jys= +github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= +github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= @@ -1034,9 +1034,8 @@ github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdy github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= -github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= @@ -1597,8 +1596,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= -github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -2298,7 +2295,6 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2462,6 +2458,7 @@ gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= diff --git a/pkg/storage/chunk/client/gcp/fixtures.go b/pkg/storage/chunk/client/gcp/fixtures.go index 153a906776cf..982e6ad11307 100644 --- a/pkg/storage/chunk/client/gcp/fixtures.go +++ b/pkg/storage/chunk/client/gcp/fixtures.go @@ -49,10 +49,19 @@ func (f *fixture) Clients() ( } f.gcssrv = fakestorage.NewServer(nil) - opts := fakestorage.CreateBucketOpts{ - Name: "chunks", - } - f.gcssrv.CreateBucketWithOpts(opts) + /* + // Note: fake-gcs-server upgrade does not work in the `dist` tooling builds. + // Leave at v1.7.0 until the issue is resolved. + // Example failure: https://github.com/grafana/loki/actions/runs/10744853958/job/29802951861 + // Open issue: https://github.com/fsouza/fake-gcs-server/issues/1739 + // Once the issue is resolved, this code block can be used to replace the + // `CreateBucket` call below. + opts := fakestorage.CreateBucketOpts{ + Name: "chunks", + } + f.gcssrv.CreateBucketWithOpts(opts) + */ + f.gcssrv.CreateBucket("chunks") conn, err := grpc.NewClient(f.btsrv.Addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { diff --git a/vendor/github.com/fsouza/fake-gcs-server/LICENSE b/vendor/github.com/fsouza/fake-gcs-server/LICENSE index a619aaecef9d..529faa468606 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/LICENSE +++ b/vendor/github.com/fsouza/fake-gcs-server/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) Francisco Souza +Copyright (c) 2017-2019, Francisco Souza All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go index 4026f1a4a0de..e2fa2ad3716e 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go @@ -6,161 +6,49 @@ package fakestorage import ( "encoding/json" - "errors" - "fmt" - "io" "net/http" - "regexp" - "github.com/fsouza/fake-gcs-server/internal/backend" "github.com/gorilla/mux" ) -var bucketRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$`) - // CreateBucket creates a bucket inside the server, so any API calls that // require the bucket name will recognize this bucket. // // If the bucket already exists, this method does nothing. -// -// Deprecated: use CreateBucketWithOpts. func (s *Server) CreateBucket(name string) { - err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: false, DefaultEventBasedHold: false}) - if err != nil { - panic(err) - } -} - -func (s *Server) updateBucket(r *http.Request) jsonResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - attrsToUpdate := getBucketAttrsToUpdate(r.Body) - err := s.backend.UpdateBucket(bucketName, attrsToUpdate) + s.mtx.Lock() + defer s.mtx.Unlock() + err := s.backend.CreateBucket(name) if err != nil { panic(err) } - return jsonResponse{} -} - -func getBucketAttrsToUpdate(body io.ReadCloser) backend.BucketAttrs { - var data struct { - DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` - Versioning bucketVersioning `json:"versioning,omitempty"` - } - err := json.NewDecoder(body).Decode(&data) - if err != nil { - panic(err) - } - attrsToUpdate := backend.BucketAttrs{ - DefaultEventBasedHold: data.DefaultEventBasedHold, - VersioningEnabled: data.Versioning.Enabled, - } - return attrsToUpdate -} - -// CreateBucketOpts defines the properties of a bucket you can create with -// CreateBucketWithOpts. -type CreateBucketOpts struct { - Name string - VersioningEnabled bool - DefaultEventBasedHold bool -} - -// CreateBucketWithOpts creates a bucket inside the server, so any API calls that -// require the bucket name will recognize this bucket. Use CreateBucketOpts to -// customize the options for this bucket -// -// If the underlying backend returns an error, this method panics. -func (s *Server) CreateBucketWithOpts(opts CreateBucketOpts) { - err := s.backend.CreateBucket(opts.Name, backend.BucketAttrs{VersioningEnabled: opts.VersioningEnabled, DefaultEventBasedHold: opts.DefaultEventBasedHold}) - if err != nil { - panic(err) - } -} - -func (s *Server) createBucketByPost(r *http.Request) jsonResponse { - // Minimal version of Bucket from google.golang.org/api/storage/v1 - - var data struct { - Name string `json:"name,omitempty"` - Versioning *bucketVersioning `json:"versioning,omitempty"` - DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` - } - - // Read the bucket props from the request body JSON - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&data); err != nil { - return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest} - } - name := data.Name - versioning := false - if data.Versioning != nil { - versioning = data.Versioning.Enabled - } - defaultEventBasedHold := data.DefaultEventBasedHold - if err := validateBucketName(name); err != nil { - return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest} - } - - _, err := s.backend.GetBucket(name) - if err == nil { - return jsonResponse{ - errorMessage: fmt.Sprintf( - "A Cloud Storage bucket named '%s' already exists. "+ - "Try another name. Bucket names must be globally unique "+ - "across all Google Cloud projects, including those "+ - "outside of your organization.", name), - status: http.StatusConflict, - } - } - - // Create the named bucket - if err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: versioning, DefaultEventBasedHold: defaultEventBasedHold}); err != nil { - return jsonResponse{errorMessage: err.Error()} - } - - // Return the created bucket: - bucket, err := s.backend.GetBucket(name) - if err != nil { - return jsonResponse{errorMessage: err.Error()} - } - return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)} } -func (s *Server) listBuckets(r *http.Request) jsonResponse { - buckets, err := s.backend.ListBuckets() - if err != nil { - return jsonResponse{errorMessage: err.Error()} - } - return jsonResponse{data: newListBucketsResponse(buckets, s.options.BucketsLocation)} -} +func (s *Server) listBuckets(w http.ResponseWriter, r *http.Request) { + s.mtx.RLock() + defer s.mtx.RUnlock() -func (s *Server) getBucket(r *http.Request) jsonResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - bucket, err := s.backend.GetBucket(bucketName) - if err != nil { - return jsonResponse{status: http.StatusNotFound} - } - return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)} -} - -func (s *Server) deleteBucket(r *http.Request) jsonResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - err := s.backend.DeleteBucket(bucketName) - if err == backend.BucketNotFound { - return jsonResponse{status: http.StatusNotFound} - } - if err == backend.BucketNotEmpty { - return jsonResponse{status: http.StatusPreconditionFailed, errorMessage: err.Error()} - } + bucketNames, err := s.backend.ListBuckets() if err != nil { - return jsonResponse{status: http.StatusInternalServerError, errorMessage: err.Error()} + http.Error(w, err.Error(), http.StatusInternalServerError) + return } - return jsonResponse{} + resp := newListBucketsResponse(bucketNames) + json.NewEncoder(w).Encode(resp) } -func validateBucketName(bucketName string) error { - if !bucketRegexp.MatchString(bucketName) { - return errors.New("invalid bucket name") +func (s *Server) getBucket(w http.ResponseWriter, r *http.Request) { + bucketName := mux.Vars(r)["bucketName"] + s.mtx.RLock() + defer s.mtx.RUnlock() + encoder := json.NewEncoder(w) + if err := s.backend.GetBucket(bucketName); err != nil { + w.WriteHeader(http.StatusNotFound) + err := newErrorResponse(http.StatusNotFound, "Not found", nil) + encoder.Encode(err) + return } - return nil + resp := newBucketResponse(bucketName) + w.WriteHeader(http.StatusOK) + encoder.Encode(resp) } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go deleted file mode 100644 index a57d154279a5..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go +++ /dev/null @@ -1,30 +0,0 @@ -package fakestorage - -import ( - "encoding/json" - "net/http" -) - -func (s *Server) updateServerConfig(r *http.Request) jsonResponse { - var configOptions struct { - ExternalUrl string `json:"externalUrl,omitempty"` - PublicHost string `json:"publicHost,omitempty"` - } - err := json.NewDecoder(r.Body).Decode(&configOptions) - if err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: "Update server config payload can not be parsed.", - } - } - - if configOptions.ExternalUrl != "" { - s.externalURL = configOptions.ExternalUrl - } - - if configOptions.PublicHost != "" { - s.publicHost = configOptions.PublicHost - } - - return jsonResponse{status: http.StatusOK} -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go deleted file mode 100644 index f16a7c5c1018..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go +++ /dev/null @@ -1,72 +0,0 @@ -package fakestorage - -import ( - "encoding/json" - "errors" - "net/http" - "os" - "syscall" - - "github.com/fsouza/fake-gcs-server/internal/backend" -) - -type jsonResponse struct { - status int - header http.Header - data any - errorMessage string -} - -type jsonHandler = func(r *http.Request) jsonResponse - -func jsonToHTTPHandler(h jsonHandler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - resp := h(r) - w.Header().Set("Content-Type", "application/json") - for name, values := range resp.header { - for _, value := range values { - w.Header().Add(name, value) - } - } - - status := resp.getStatus() - var data any - if status > 399 { - data = newErrorResponse(status, resp.getErrorMessage(status), nil) - } else { - data = resp.data - } - - w.WriteHeader(status) - json.NewEncoder(w).Encode(data) - } -} - -func (r *jsonResponse) getStatus() int { - if r.status > 0 { - return r.status - } - if r.errorMessage != "" { - return http.StatusInternalServerError - } - return http.StatusOK -} - -func (r *jsonResponse) getErrorMessage(status int) string { - if r.errorMessage != "" { - return r.errorMessage - } - return http.StatusText(status) -} - -func errToJsonResponse(err error) jsonResponse { - status := 0 - var pathError *os.PathError - if errors.As(err, &pathError) && pathError.Err == syscall.ENAMETOOLONG { - status = http.StatusBadRequest - } - if err == backend.PreConditionFailed { - status = http.StatusPreconditionFailed - } - return jsonResponse{errorMessage: err.Error(), status: status} -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go index b228c787ae68..afaa2efeac76 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go @@ -7,14 +7,16 @@ package fakestorage import ( "net/http" "net/http/httptest" + + "github.com/gorilla/mux" ) type muxTransport struct { - handler http.Handler + router *mux.Router } func (t *muxTransport) RoundTrip(r *http.Request) (*http.Response, error) { w := httptest.NewRecorder() - t.handler.ServeHTTP(w, r) + t.router.ServeHTTP(w, r) return w.Result(), nil } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go index 6c1533ffea45..bc1d472f36e3 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go @@ -5,365 +5,84 @@ package fakestorage import ( - "bytes" - "compress/gzip" "encoding/json" - "encoding/xml" - "errors" "fmt" - "io" "net/http" "sort" "strconv" "strings" - "time" - "cloud.google.com/go/storage" "github.com/fsouza/fake-gcs-server/internal/backend" - "github.com/fsouza/fake-gcs-server/internal/notification" "github.com/gorilla/mux" ) -var errInvalidGeneration = errors.New("invalid generation ID") - -// ObjectAttrs returns only the meta-data about an object without its contents. -type ObjectAttrs struct { - BucketName string - Name string - Size int64 - ContentType string - ContentEncoding string +// Object represents the object that is stored within the fake server. +type Object struct { + BucketName string `json:"-"` + Name string `json:"name"` + Content []byte `json:"-"` // Crc32c checksum of Content. calculated by server when it's upload methods are used. - Crc32c string - Md5Hash string - Etag string - ACL []storage.ACLRule - // Dates and generation can be manually injected, so you can do assertions on them, - // or let us fill these fields for you - Created time.Time - Updated time.Time - Deleted time.Time - CustomTime time.Time - Generation int64 - Metadata map[string]string + Crc32c string `json:"crc32c,omitempty"` + Md5Hash string `json:"md5hash,omitempty"` } -func (o *ObjectAttrs) id() string { +func (o *Object) id() string { return o.BucketName + "/" + o.Name } -type jsonObject struct { - BucketName string `json:"bucket"` - Name string `json:"name"` - Size int64 `json:"size,string"` - ContentType string `json:"contentType"` - ContentEncoding string `json:"contentEncoding"` - Crc32c string `json:"crc32c,omitempty"` - Md5Hash string `json:"md5Hash,omitempty"` - Etag string `json:"etag,omitempty"` - ACL []aclRule `json:"acl,omitempty"` - Created time.Time `json:"created,omitempty"` - Updated time.Time `json:"updated,omitempty"` - Deleted time.Time `json:"deleted,omitempty"` - CustomTime time.Time `json:"customTime,omitempty"` - Generation int64 `json:"generation,omitempty,string"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// MarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule -func (o ObjectAttrs) MarshalJSON() ([]byte, error) { - temp := jsonObject{ - BucketName: o.BucketName, - Name: o.Name, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - Size: o.Size, - Crc32c: o.Crc32c, - Md5Hash: o.Md5Hash, - Etag: o.Etag, - Created: o.Created, - Updated: o.Updated, - Deleted: o.Deleted, - CustomTime: o.CustomTime, - Generation: o.Generation, - Metadata: o.Metadata, - } - temp.ACL = make([]aclRule, len(o.ACL)) - for i, ACL := range o.ACL { - temp.ACL[i] = aclRule(ACL) - } - return json.Marshal(temp) -} - -// UnmarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule -func (o *ObjectAttrs) UnmarshalJSON(data []byte) error { - var temp jsonObject - if err := json.Unmarshal(data, &temp); err != nil { - return err - } - o.BucketName = temp.BucketName - o.Name = temp.Name - o.ContentType = temp.ContentType - o.ContentEncoding = temp.ContentEncoding - o.Size = temp.Size - o.Crc32c = temp.Crc32c - o.Md5Hash = temp.Md5Hash - o.Etag = temp.Etag - o.Created = temp.Created - o.Updated = temp.Updated - o.Deleted = temp.Deleted - o.Generation = temp.Generation - o.Metadata = temp.Metadata - o.CustomTime = temp.CustomTime - o.ACL = make([]storage.ACLRule, len(temp.ACL)) - for i, ACL := range temp.ACL { - o.ACL[i] = storage.ACLRule(ACL) - } - - return nil -} - -// Object represents an object that is stored within the fake server. The -// content of this type is stored is buffered, i.e. it's stored in memory. -// Use StreamingObject to stream the content from a reader, e.g a file. -type Object struct { - ObjectAttrs - Content []byte `json:"-"` -} - -type noopSeekCloser struct { - io.ReadSeeker -} - -func (n noopSeekCloser) Close() error { - return nil -} - -func (o Object) StreamingObject() StreamingObject { - return StreamingObject{ - ObjectAttrs: o.ObjectAttrs, - Content: noopSeekCloser{bytes.NewReader(o.Content)}, - } -} - -// StreamingObject is the streaming version of Object. -type StreamingObject struct { - ObjectAttrs - Content io.ReadSeekCloser `json:"-"` -} - -func (o *StreamingObject) Close() error { - if o != nil && o.Content != nil { - return o.Content.Close() - } - return nil -} - -func (o *StreamingObject) BufferedObject() (Object, error) { - data, err := io.ReadAll(o.Content) - return Object{ - ObjectAttrs: o.ObjectAttrs, - Content: data, - }, err -} - -// ACLRule is an alias of storage.ACLRule to have custom JSON marshal -type aclRule storage.ACLRule - -// ProjectTeam is an alias of storage.ProjectTeam to have custom JSON marshal -type projectTeam storage.ProjectTeam - -// MarshalJSON for ACLRule to customize field names -func (acl aclRule) MarshalJSON() ([]byte, error) { - temp := struct { - Entity storage.ACLEntity `json:"entity"` - EntityID string `json:"entityId"` - Role storage.ACLRole `json:"role"` - Domain string `json:"domain"` - Email string `json:"email"` - ProjectTeam *projectTeam `json:"projectTeam"` - }{ - Entity: acl.Entity, - EntityID: acl.EntityID, - Role: acl.Role, - Domain: acl.Domain, - Email: acl.Email, - ProjectTeam: (*projectTeam)(acl.ProjectTeam), - } - return json.Marshal(temp) -} - -// UnmarshalJSON for ACLRule to customize field names -func (acl *aclRule) UnmarshalJSON(data []byte) error { - temp := struct { - Entity storage.ACLEntity `json:"entity"` - EntityID string `json:"entityId"` - Role storage.ACLRole `json:"role"` - Domain string `json:"domain"` - Email string `json:"email"` - ProjectTeam *projectTeam `json:"projectTeam"` - }{} - if err := json.Unmarshal(data, &temp); err != nil { - return err - } - acl.Entity = temp.Entity - acl.EntityID = temp.EntityID - acl.Role = temp.Role - acl.Domain = temp.Domain - acl.Email = temp.Email - acl.ProjectTeam = (*storage.ProjectTeam)(temp.ProjectTeam) - return nil -} - -// MarshalJSON for ProjectTeam to customize field names -func (team projectTeam) MarshalJSON() ([]byte, error) { - temp := struct { - ProjectNumber string `json:"projectNumber"` - Team string `json:"team"` - }{ - ProjectNumber: team.ProjectNumber, - Team: team.Team, - } - return json.Marshal(temp) -} - -// UnmarshalJSON for ProjectTeam to customize field names -func (team *projectTeam) UnmarshalJSON(data []byte) error { - temp := struct { - ProjectNumber string `json:"projectNumber"` - Team string `json:"team"` - }{} - if err := json.Unmarshal(data, &temp); err != nil { - return err - } - team.ProjectNumber = temp.ProjectNumber - team.Team = temp.Team - return nil -} +type objectList []Object -type objectAttrsList []ObjectAttrs - -func (o objectAttrsList) Len() int { +func (o objectList) Len() int { return len(o) } -func (o objectAttrsList) Less(i int, j int) bool { +func (o objectList) Less(i int, j int) bool { return o[i].Name < o[j].Name } -func (o *objectAttrsList) Swap(i int, j int) { +func (o *objectList) Swap(i int, j int) { d := *o d[i], d[j] = d[j], d[i] } -// CreateObject is the non-streaming version of CreateObjectStreaming. +// CreateObject stores the given object internally. // -// In addition to streaming, CreateObjectStreaming returns an error instead of -// panicking when an error occurs. +// If the bucket within the object doesn't exist, it also creates it. If the +// object already exists, it overrides the object. func (s *Server) CreateObject(obj Object) { - err := s.CreateObjectStreaming(obj.StreamingObject()) + s.mtx.Lock() + defer s.mtx.Unlock() + err := s.createObject(obj) if err != nil { panic(err) } } -// CreateObjectStreaming stores the given object internally. -// -// If the bucket within the object doesn't exist, it also creates it. If the -// object already exists, it overwrites the object. -func (s *Server) CreateObjectStreaming(obj StreamingObject) error { - obj, err := s.createObject(obj, backend.NoConditions{}) - if err != nil { - return err - } - obj.Close() - return nil -} - -func (s *Server) createObject(obj StreamingObject, conditions backend.Conditions) (StreamingObject, error) { - oldBackendObj, err := s.backend.GetObject(obj.BucketName, obj.Name) - // Calling Close before checking err is okay on objects, and the object - // may need to be closed whether or not there's an error. - defer oldBackendObj.Close() //lint:ignore SA5001 // see above - - prevVersionExisted := err == nil - - // The caller is responsible for closing the created object. - newBackendObj, err := s.backend.CreateObject(toBackendObjects([]StreamingObject{obj})[0], conditions) - if err != nil { - return StreamingObject{}, err - } - - var newObjEventAttr map[string]string - if prevVersionExisted { - newObjEventAttr = map[string]string{ - "overwroteGeneration": strconv.FormatInt(oldBackendObj.Generation, 10), - } - - oldObjEventAttr := map[string]string{ - "overwrittenByGeneration": strconv.FormatInt(newBackendObj.Generation, 10), - } - - bucket, _ := s.backend.GetBucket(obj.BucketName) - if bucket.VersioningEnabled { - s.eventManager.Trigger(&oldBackendObj, notification.EventArchive, oldObjEventAttr) - } else { - s.eventManager.Trigger(&oldBackendObj, notification.EventDelete, oldObjEventAttr) - } - } - - newObj := fromBackendObjects([]backend.StreamingObject{newBackendObj})[0] - s.eventManager.Trigger(&newBackendObj, notification.EventFinalize, newObjEventAttr) - return newObj, nil -} - -type ListOptions struct { - Prefix string - Delimiter string - Versions bool - StartOffset string - EndOffset string - IncludeTrailingDelimiter bool +func (s *Server) createObject(obj Object) error { + return s.backend.CreateObject(toBackendObjects([]Object{obj})[0]) } // ListObjects returns a sorted list of objects that match the given criteria, // or an error if the bucket doesn't exist. -// -// Deprecated: use ListObjectsWithOptions. -func (s *Server) ListObjects(bucketName, prefix, delimiter string, versions bool) ([]ObjectAttrs, []string, error) { - return s.ListObjectsWithOptions(bucketName, ListOptions{ - Prefix: prefix, - Delimiter: delimiter, - Versions: versions, - }) -} - -func (s *Server) ListObjectsWithOptions(bucketName string, options ListOptions) ([]ObjectAttrs, []string, error) { - backendObjects, err := s.backend.ListObjects(bucketName, options.Prefix, options.Versions) +func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, []string, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + backendObjects, err := s.backend.ListObjects(bucketName) if err != nil { return nil, nil, err } - objects := fromBackendObjectsAttrs(backendObjects) - olist := objectAttrsList(objects) + objects := fromBackendObjects(backendObjects) + olist := objectList(objects) sort.Sort(&olist) - var respObjects []ObjectAttrs + var respObjects []Object prefixes := make(map[string]bool) for _, obj := range olist { - if !strings.HasPrefix(obj.Name, options.Prefix) { - continue - } - objName := strings.Replace(obj.Name, options.Prefix, "", 1) - delimPos := strings.Index(objName, options.Delimiter) - if options.Delimiter != "" && delimPos > -1 { - prefix := obj.Name[:len(options.Prefix)+delimPos+1] - if isInOffset(prefix, options.StartOffset, options.EndOffset) { - prefixes[prefix] = true - } - if options.IncludeTrailingDelimiter && obj.Name == prefix { - respObjects = append(respObjects, obj) - } - } else { - if isInOffset(obj.Name, options.StartOffset, options.EndOffset) { + if strings.HasPrefix(obj.Name, prefix) { + objName := strings.Replace(obj.Name, prefix, "", 1) + delimPos := strings.Index(objName, delimiter) + if delimiter != "" && delimPos > -1 { + prefixes[obj.Name[:len(prefix)+delimPos+1]] = true + } else { respObjects = append(respObjects, obj) } } @@ -376,781 +95,143 @@ func (s *Server) ListObjectsWithOptions(bucketName string, options ListOptions) return respObjects, respPrefixes, nil } -func isInOffset(name, startOffset, endOffset string) bool { - if endOffset != "" && startOffset != "" { - return strings.Compare(name, endOffset) < 0 && strings.Compare(name, startOffset) >= 0 - } else if endOffset != "" { - return strings.Compare(name, endOffset) < 0 - } else if startOffset != "" { - return strings.Compare(name, startOffset) >= 0 - } else { - return true - } -} - -func getCurrentIfZero(date time.Time) time.Time { - if date.IsZero() { - return time.Now() - } - return date -} - -func toBackendObjects(objects []StreamingObject) []backend.StreamingObject { - backendObjects := make([]backend.StreamingObject, 0, len(objects)) +func toBackendObjects(objects []Object) []backend.Object { + backendObjects := []backend.Object{} for _, o := range objects { - backendObjects = append(backendObjects, backend.StreamingObject{ - ObjectAttrs: backend.ObjectAttrs{ - BucketName: o.BucketName, - Name: o.Name, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ACL: o.ACL, - Created: getCurrentIfZero(o.Created).Format(timestampFormat), - Deleted: o.Deleted.Format(timestampFormat), - Updated: getCurrentIfZero(o.Updated).Format(timestampFormat), - CustomTime: o.CustomTime.Format(timestampFormat), - Generation: o.Generation, - Metadata: o.Metadata, - }, - Content: o.Content, - }) - } - return backendObjects -} - -func bufferedObjectsToBackendObjects(objects []Object) []backend.StreamingObject { - backendObjects := make([]backend.StreamingObject, 0, len(objects)) - for _, bufferedObject := range objects { - o := bufferedObject.StreamingObject() - backendObjects = append(backendObjects, backend.StreamingObject{ - ObjectAttrs: backend.ObjectAttrs{ - BucketName: o.BucketName, - Name: o.Name, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ACL: o.ACL, - Created: getCurrentIfZero(o.Created).Format(timestampFormat), - Deleted: o.Deleted.Format(timestampFormat), - Updated: getCurrentIfZero(o.Updated).Format(timestampFormat), - CustomTime: o.CustomTime.Format(timestampFormat), - Generation: o.Generation, - Metadata: o.Metadata, - Crc32c: o.Crc32c, - Md5Hash: o.Md5Hash, - Size: o.Size, - Etag: o.Etag, - }, - Content: o.Content, + backendObjects = append(backendObjects, backend.Object{ + BucketName: o.BucketName, + Name: o.Name, + Content: o.Content, + Crc32c: o.Crc32c, + Md5Hash: o.Md5Hash, }) } return backendObjects } -func fromBackendObjects(objects []backend.StreamingObject) []StreamingObject { - backendObjects := make([]StreamingObject, 0, len(objects)) +func fromBackendObjects(objects []backend.Object) []Object { + backendObjects := []Object{} for _, o := range objects { - backendObjects = append(backendObjects, StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: o.BucketName, - Name: o.Name, - Size: o.Size, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - Crc32c: o.Crc32c, - Md5Hash: o.Md5Hash, - Etag: o.Etag, - ACL: o.ACL, - Created: convertTimeWithoutError(o.Created), - Deleted: convertTimeWithoutError(o.Deleted), - Updated: convertTimeWithoutError(o.Updated), - CustomTime: convertTimeWithoutError(o.CustomTime), - Generation: o.Generation, - Metadata: o.Metadata, - }, - Content: o.Content, + backendObjects = append(backendObjects, Object{ + BucketName: o.BucketName, + Name: o.Name, + Content: o.Content, + Crc32c: o.Crc32c, + Md5Hash: o.Md5Hash, }) } return backendObjects } -func fromBackendObjectsAttrs(objectAttrs []backend.ObjectAttrs) []ObjectAttrs { - oattrs := make([]ObjectAttrs, 0, len(objectAttrs)) - for _, o := range objectAttrs { - oattrs = append(oattrs, ObjectAttrs{ - BucketName: o.BucketName, - Name: o.Name, - Size: o.Size, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - Crc32c: o.Crc32c, - Md5Hash: o.Md5Hash, - Etag: o.Etag, - ACL: o.ACL, - Created: convertTimeWithoutError(o.Created), - Deleted: convertTimeWithoutError(o.Deleted), - Updated: convertTimeWithoutError(o.Updated), - CustomTime: convertTimeWithoutError(o.CustomTime), - Generation: o.Generation, - Metadata: o.Metadata, - }) - } - return oattrs -} - -func convertTimeWithoutError(t string) time.Time { - r, _ := time.Parse(timestampFormat, t) - return r -} - -// GetObject is the non-streaming version of GetObjectStreaming. +// GetObject returns the object with the given name in the given bucket, or an +// error if the object doesn't exist. func (s *Server) GetObject(bucketName, objectName string) (Object, error) { - streamingObject, err := s.GetObjectStreaming(bucketName, objectName) - if err != nil { - return Object{}, err - } - return streamingObject.BufferedObject() -} - -// GetObjectStreaming returns the object with the given name in the given -// bucket, or an error if the object doesn't exist. -func (s *Server) GetObjectStreaming(bucketName, objectName string) (StreamingObject, error) { backendObj, err := s.backend.GetObject(bucketName, objectName) - if err != nil { - return StreamingObject{}, err - } - obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0] - return obj, nil -} - -// GetObjectWithGeneration is the non-streaming version of -// GetObjectWithGenerationStreaming. -func (s *Server) GetObjectWithGeneration(bucketName, objectName string, generation int64) (Object, error) { - streamingObject, err := s.GetObjectWithGenerationStreaming(bucketName, objectName, generation) if err != nil { return Object{}, err } - return streamingObject.BufferedObject() -} - -// GetObjectWithGenerationStreaming returns the object with the given name and -// given generation ID in the given bucket, or an error if the object doesn't -// exist. -// -// If versioning is enabled, archived versions are considered. -func (s *Server) GetObjectWithGenerationStreaming(bucketName, objectName string, generation int64) (StreamingObject, error) { - backendObj, err := s.backend.GetObjectWithGeneration(bucketName, objectName, generation) - if err != nil { - return StreamingObject{}, err - } - obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0] + obj := fromBackendObjects([]backend.Object{backendObj})[0] return obj, nil } -func (s *Server) objectWithGenerationOnValidGeneration(bucketName, objectName, generationStr string) (StreamingObject, error) { - generation, err := strconv.ParseInt(generationStr, 10, 64) - if err != nil && generationStr != "" { - return StreamingObject{}, errInvalidGeneration - } else if generation > 0 { - return s.GetObjectWithGenerationStreaming(bucketName, objectName, generation) - } - return s.GetObjectStreaming(bucketName, objectName) -} - -func (s *Server) listObjects(r *http.Request) jsonResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - objs, prefixes, err := s.ListObjectsWithOptions(bucketName, ListOptions{ - Prefix: r.URL.Query().Get("prefix"), - Delimiter: r.URL.Query().Get("delimiter"), - Versions: r.URL.Query().Get("versions") == "true", - StartOffset: r.URL.Query().Get("startOffset"), - EndOffset: r.URL.Query().Get("endOffset"), - IncludeTrailingDelimiter: r.URL.Query().Get("includeTrailingDelimiter") == "true", - }) +func (s *Server) listObjects(w http.ResponseWriter, r *http.Request) { + bucketName := mux.Vars(r)["bucketName"] + prefix := r.URL.Query().Get("prefix") + delimiter := r.URL.Query().Get("delimiter") + objs, prefixes, err := s.ListObjects(bucketName, prefix, delimiter) + encoder := json.NewEncoder(w) if err != nil { - return jsonResponse{status: http.StatusNotFound} - } - return jsonResponse{data: newListObjectsResponse(objs, prefixes)} -} - -func (s *Server) xmlListObjects(r *http.Request) xmlResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - - opts := ListOptions{ - Prefix: r.URL.Query().Get("prefix"), - Delimiter: r.URL.Query().Get("delimiter"), - Versions: r.URL.Query().Get("versions") == "true", - } - - objs, prefixes, err := s.ListObjectsWithOptions(bucketName, opts) - if err != nil { - return xmlResponse{ - status: http.StatusInternalServerError, - errorMessage: err.Error(), - } - } - - result := ListBucketResult{ - Name: bucketName, - Delimiter: opts.Delimiter, - Prefix: opts.Prefix, - KeyCount: len(objs), - } - - if opts.Delimiter != "" { - for _, prefix := range prefixes { - result.CommonPrefixes = append(result.CommonPrefixes, CommonPrefix{Prefix: prefix}) - } - } - - for _, obj := range objs { - result.Contents = append(result.Contents, Contents{ - Key: obj.Name, - Generation: obj.Generation, - Size: obj.Size, - LastModified: obj.Updated.Format(time.RFC3339), - ETag: ETag{Value: obj.Etag}, - }) - } - - raw, err := xml.Marshal(result) - if err != nil { - return xmlResponse{ - status: http.StatusInternalServerError, - errorMessage: err.Error(), - } - } - - return xmlResponse{ - status: http.StatusOK, - data: []byte(xml.Header + string(raw)), - } -} - -func (s *Server) getObject(w http.ResponseWriter, r *http.Request) { - if alt := r.URL.Query().Get("alt"); alt == "media" || r.Method == http.MethodHead { - s.downloadObject(w, r) + w.WriteHeader(http.StatusNotFound) + errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil) + encoder.Encode(errResp) return } - - handler := jsonToHTTPHandler(func(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - - obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation")) - // Calling Close before checking err is okay on objects, and the object - // may need to be closed whether or not there's an error. - defer obj.Close() //lint:ignore SA5001 // see above - if err != nil { - statusCode := http.StatusNotFound - var errMessage string - if errors.Is(err, errInvalidGeneration) { - statusCode = http.StatusBadRequest - errMessage = err.Error() - } - return jsonResponse{ - status: statusCode, - errorMessage: errMessage, - } - } - header := make(http.Header) - header.Set("Accept-Ranges", "bytes") - return jsonResponse{ - header: header, - data: newObjectResponse(obj.ObjectAttrs), - } - }) - - handler(w, r) -} - -func (s *Server) deleteObject(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"]) - // Calling Close before checking err is okay on objects, and the object - // may need to be closed whether or not there's an error. - defer obj.Close() //lint:ignore SA5001 // see above - if err == nil { - err = s.backend.DeleteObject(vars["bucketName"], vars["objectName"]) - } - if err != nil { - return jsonResponse{status: http.StatusNotFound} - } - bucket, _ := s.backend.GetBucket(obj.BucketName) - backendObj := toBackendObjects([]StreamingObject{obj})[0] - if bucket.VersioningEnabled { - s.eventManager.Trigger(&backendObj, notification.EventArchive, nil) - } else { - s.eventManager.Trigger(&backendObj, notification.EventDelete, nil) - } - return jsonResponse{} + encoder.Encode(newListObjectsResponse(objs, prefixes)) } -func (s *Server) listObjectACL(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - - obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"]) +func (s *Server) getObject(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + encoder := json.NewEncoder(w) + obj, err := s.GetObject(vars["bucketName"], vars["objectName"]) if err != nil { - return jsonResponse{status: http.StatusNotFound} + errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil) + w.WriteHeader(http.StatusNotFound) + encoder.Encode(errResp) + return } - defer obj.Close() - - return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)} + w.Header().Set("Accept-Ranges", "bytes") + encoder.Encode(newObjectResponse(obj)) } -func (s *Server) setObjectACL(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - - obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"]) +func (s *Server) deleteObject(w http.ResponseWriter, r *http.Request) { + s.mtx.Lock() + defer s.mtx.Unlock() + vars := mux.Vars(r) + err := s.backend.DeleteObject(vars["bucketName"], vars["objectName"]) if err != nil { - return jsonResponse{status: http.StatusNotFound} - } - defer obj.Close() - - var data struct { - Entity string - Role string - } - - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&data); err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: err.Error(), - } - } - - entity := storage.ACLEntity(data.Entity) - role := storage.ACLRole(data.Role) - obj.ACL = []storage.ACLRule{{ - Entity: entity, - Role: role, - }} - - obj, err = s.createObject(obj, backend.NoConditions{}) - if err != nil { - return errToJsonResponse(err) + errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil) + w.WriteHeader(http.StatusNotFound) + json.NewEncoder(w).Encode(errResp) + return } - defer obj.Close() - - return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)} + w.WriteHeader(http.StatusOK) } -func (s *Server) rewriteObject(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - obj, err := s.objectWithGenerationOnValidGeneration(vars["sourceBucket"], vars["sourceObject"], r.FormValue("sourceGeneration")) - // Calling Close before checking err is okay on objects, and the object - // may need to be closed whether or not there's an error. - defer obj.Close() //lint:ignore SA5001 // see above +func (s *Server) rewriteObject(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + obj, err := s.GetObject(vars["sourceBucket"], vars["sourceObject"]) if err != nil { - statusCode := http.StatusNotFound - var errMessage string - if errors.Is(err, errInvalidGeneration) { - statusCode = http.StatusBadRequest - errMessage = err.Error() - } - return jsonResponse{errorMessage: errMessage, status: statusCode} - } - - var metadata multipartMetadata - err = json.NewDecoder(r.Body).Decode(&metadata) - if err != nil && err != io.EOF { // The body is optional - return jsonResponse{errorMessage: "Invalid metadata", status: http.StatusBadRequest} - } - - // Only supplied metadata overwrites the new object's metdata - if len(metadata.Metadata) == 0 { - metadata.Metadata = obj.Metadata - } - if metadata.ContentType == "" { - metadata.ContentType = obj.ContentType - } - if metadata.ContentEncoding == "" { - metadata.ContentEncoding = obj.ContentEncoding + http.Error(w, "not found", http.StatusNotFound) + return } - dstBucket := vars["destinationBucket"] - newObject := StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: dstBucket, - Name: vars["destinationObject"], - ACL: obj.ACL, - ContentType: metadata.ContentType, - ContentEncoding: metadata.ContentEncoding, - Metadata: metadata.Metadata, - }, - Content: obj.Content, + newObject := Object{ + BucketName: dstBucket, + Name: vars["destinationObject"], + Content: append([]byte(nil), obj.Content...), + Crc32c: obj.Crc32c, + Md5Hash: obj.Md5Hash, } - - created, err := s.createObject(newObject, backend.NoConditions{}) - if err != nil { - return errToJsonResponse(err) - } - defer created.Close() - - if vars["copyType"] == "copyTo" { - return jsonResponse{data: newObjectResponse(created.ObjectAttrs)} - } - return jsonResponse{data: newObjectRewriteResponse(created.ObjectAttrs)} + s.CreateObject(newObject) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(newObjectRewriteResponse(newObject)) } func (s *Server) downloadObject(w http.ResponseWriter, r *http.Request) { - vars := unescapeMuxVars(mux.Vars(r)) - obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation")) - // Calling Close before checking err is okay on objects, and the object - // may need to be closed whether or not there's an error. - defer obj.Close() //lint:ignore SA5001 // see above + vars := mux.Vars(r) + obj, err := s.GetObject(vars["bucketName"], vars["objectName"]) if err != nil { - statusCode := http.StatusNotFound - message := http.StatusText(statusCode) - if errors.Is(err, errInvalidGeneration) { - statusCode = http.StatusBadRequest - message = err.Error() - } - http.Error(w, message, statusCode) + http.Error(w, "not found", http.StatusNotFound) return } - - var content io.Reader - content = obj.Content status := http.StatusOK - - transcoded := false - ranged := false - start := int64(0) - lastByte := int64(0) - satisfiable := true - contentLength := int64(0) - - handledTranscoding := func() bool { - // This should also be false if the Cache-Control metadata field == "no-transform", - // but we don't currently support that field. - // See https://cloud.google.com/storage/docs/transcoding - - if obj.ContentEncoding == "gzip" && !strings.Contains(r.Header.Get("accept-encoding"), "gzip") { - // GCS will transparently decompress gzipped content, see - // https://cloud.google.com/storage/docs/transcoding - // In this case, any Range header is ignored and the full content is returned. - - // If the content is not a valid gzip file, ignore errors and continue - // without transcoding. Otherwise, return decompressed content. - gzipReader, err := gzip.NewReader(content) - if err == nil { - rawContent, err := io.ReadAll(gzipReader) - if err == nil { - transcoded = true - content = bytes.NewReader(rawContent) - contentLength = int64(len(rawContent)) - obj.Size = contentLength - return true - } - } - } - return false - } - - if !handledTranscoding() { - ranged, start, lastByte, satisfiable = s.handleRange(obj, r) - contentLength = lastByte - start + 1 - } - - if ranged && satisfiable { - _, err = obj.Content.Seek(start, io.SeekStart) - if err != nil { - http.Error(w, "could not seek", http.StatusInternalServerError) - return - } - content = io.LimitReader(obj.Content, contentLength) + start, end, content := s.handleRange(obj, r) + if len(content) != len(obj.Content) { status = http.StatusPartialContent - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, lastByte, obj.Size)) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, len(obj.Content))) } w.Header().Set("Accept-Ranges", "bytes") - w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10)) - w.Header().Set("X-Goog-Generation", strconv.FormatInt(obj.Generation, 10)) - w.Header().Set("X-Goog-Hash", fmt.Sprintf("crc32c=%s,md5=%s", obj.Crc32c, obj.Md5Hash)) - w.Header().Set("Last-Modified", obj.Updated.Format(http.TimeFormat)) - w.Header().Set("ETag", obj.Etag) - for name, value := range obj.Metadata { - w.Header().Set("X-Goog-Meta-"+name, value) - } - w.Header().Set("Access-Control-Allow-Origin", "*") - - if ranged && !satisfiable { - status = http.StatusRequestedRangeNotSatisfiable - content = bytes.NewReader([]byte(fmt.Sprintf(``+ - `InvalidRange`+ - `The requested range cannot be satisfied.`+ - `
%s
`, r.Header.Get("Range")))) - w.Header().Set(contentTypeHeader, "application/xml; charset=UTF-8") - } else { - if obj.ContentType != "" { - w.Header().Set(contentTypeHeader, obj.ContentType) - } - // If content was transcoded, the underlying encoding was removed so we shouldn't report it. - if obj.ContentEncoding != "" && !transcoded { - w.Header().Set("Content-Encoding", obj.ContentEncoding) - } - // X-Goog-Stored-Content-Encoding must be set to the original encoding, - // defaulting to "identity" if no encoding was set. - storedContentEncoding := "identity" - if obj.ContentEncoding != "" { - storedContentEncoding = obj.ContentEncoding - } - w.Header().Set("X-Goog-Stored-Content-Encoding", storedContentEncoding) - } - + w.Header().Set("Content-Length", strconv.Itoa(len(content))) w.WriteHeader(status) if r.Method == http.MethodGet { - io.Copy(w, content) + w.Write(content) } } -func (s *Server) handleRange(obj StreamingObject, r *http.Request) (ranged bool, start int64, lastByte int64, satisfiable bool) { - start, end, err := parseRange(r.Header.Get("Range"), obj.Size) - if err != nil { - // If the range isn't valid, GCS returns all content. - return false, 0, obj.Size - 1, false - } - // GCS is pretty flexible when it comes to invalid ranges. A 416 http - // response is only returned when the range start is beyond the length of - // the content. Otherwise, the range is ignored. - switch { - // Invalid start. Return 416 and NO content. - // Examples: - // Length: 40, Range: bytes=50-60 - // Length: 40, Range: bytes=50- - case start >= obj.Size: - // This IS a ranged request, but it ISN'T satisfiable. - return true, 0, 0, false - // Negative range, ignore range and return all content. - // Examples: - // Length: 40, Range: bytes=30-20 - case end < start: - return false, 0, obj.Size - 1, false - // Return range. Clamp start and end. - // Examples: - // Length: 40, Range: bytes=-100 - // Length: 40, Range: bytes=0-100 - default: - if start < 0 { - start = 0 - } - if end >= obj.Size { - end = obj.Size - 1 - } - return true, start, end, true - } -} - -// parseRange parses the range header and returns the corresponding start and -// end indices in the content. The end index is inclusive. This function -// doesn't validate that the start and end indices fall within the content -// bounds. The content length is only used to handle "suffix length" and -// range-to-end ranges. -func parseRange(rangeHeaderValue string, contentLength int64) (start int64, end int64, err error) { - // For information about the range header, see: - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range - // https://httpwg.org/specs/rfc7233.html#header.range - // https://httpwg.org/specs/rfc7233.html#byte.ranges - // https://httpwg.org/specs/rfc7233.html#status.416 - // - // = - // - // The following ranges are parsed: - // "bytes=40-50" (range with given start and end) - // "bytes=40-" (range to end of content) - // "bytes=-40" (suffix length, offset from end of string) - // - // The unit MUST be "bytes". - parts := strings.SplitN(rangeHeaderValue, "=", 2) - if len(parts) != 2 { - return 0, 0, fmt.Errorf("expecting `=` in range header, got: %s", rangeHeaderValue) - } - if parts[0] != "bytes" { - return 0, 0, fmt.Errorf("invalid range unit, expecting `bytes`, got: %s", parts[0]) - } - rangeSpec := parts[1] - if len(rangeSpec) == 0 { - return 0, 0, errors.New("empty range") - } - if rangeSpec[0] == '-' { - offsetFromEnd, err := strconv.ParseInt(rangeSpec, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("invalid suffix length, got: %s", rangeSpec) - } - start = contentLength + offsetFromEnd - end = contentLength - 1 - } else { - rangeParts := strings.SplitN(rangeSpec, "-", 2) - if len(rangeParts) != 2 { - return 0, 0, fmt.Errorf("only one range supported, got: %s", rangeSpec) - } - start, err = strconv.ParseInt(rangeParts[0], 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("invalid range start, got: %s", rangeParts[0]) - } - if rangeParts[1] == "" { - end = contentLength - 1 - } else { - end, err = strconv.ParseInt(rangeParts[1], 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("invalid range end, got: %s", rangeParts[1]) +func (s *Server) handleRange(obj Object, r *http.Request) (start, end int, content []byte) { + if reqRange := r.Header.Get("Range"); reqRange != "" { + parts := strings.SplitN(reqRange, "=", 2) + if len(parts) == 2 && parts[0] == "bytes" { + rangeParts := strings.SplitN(parts[1], "-", 2) + if len(rangeParts) == 2 { + start, _ = strconv.Atoi(rangeParts[0]) + end, _ = strconv.Atoi(rangeParts[1]) + if end < 1 { + end = len(obj.Content) + } + return start, end, obj.Content[start:end] } } } - return start, end, nil -} - -func (s *Server) patchObject(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - bucketName := vars["bucketName"] - objectName := vars["objectName"] - - type acls struct { - Entity string - Role string - } - - var payload struct { - ContentType string - ContentEncoding string - Metadata map[string]string `json:"metadata"` - CustomTime string - Acl []acls - } - err := json.NewDecoder(r.Body).Decode(&payload) - if err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: "Metadata in the request couldn't decode", - } - } - - var attrsToUpdate backend.ObjectAttrs - - attrsToUpdate.ContentType = payload.ContentType - attrsToUpdate.ContentEncoding = payload.ContentEncoding - attrsToUpdate.Metadata = payload.Metadata - attrsToUpdate.CustomTime = payload.CustomTime - - if len(payload.Acl) > 0 { - attrsToUpdate.ACL = []storage.ACLRule{} - for _, aclData := range payload.Acl { - newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)} - attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl) - } - } - - backendObj, err := s.backend.PatchObject(bucketName, objectName, attrsToUpdate) - if err != nil { - return jsonResponse{ - status: http.StatusNotFound, - errorMessage: "Object not found to be PATCHed", - } - } - defer backendObj.Close() - - s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil) - return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]} -} - -func (s *Server) updateObject(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - bucketName := vars["bucketName"] - objectName := vars["objectName"] - - type acls struct { - Entity string - Role string - } - - var payload struct { - Metadata map[string]string `json:"metadata"` - ContentType string `json:"contentType"` - CustomTime string - Acl []acls - } - err := json.NewDecoder(r.Body).Decode(&payload) - if err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: "Metadata in the request couldn't decode", - } - } - - var attrsToUpdate backend.ObjectAttrs - - attrsToUpdate.Metadata = payload.Metadata - attrsToUpdate.CustomTime = payload.CustomTime - attrsToUpdate.ContentType = payload.ContentType - if len(payload.Acl) > 0 { - attrsToUpdate.ACL = []storage.ACLRule{} - for _, aclData := range payload.Acl { - newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)} - attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl) - } - } - backendObj, err := s.backend.UpdateObject(bucketName, objectName, attrsToUpdate) - if err != nil { - return jsonResponse{ - status: http.StatusNotFound, - errorMessage: "Object not found to be updated", - } - } - defer backendObj.Close() - - s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil) - return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]} -} - -func (s *Server) composeObject(r *http.Request) jsonResponse { - vars := unescapeMuxVars(mux.Vars(r)) - bucketName := vars["bucketName"] - destinationObject := vars["destinationObject"] - - var composeRequest struct { - SourceObjects []struct { - Name string - } - Destination struct { - Bucket string - ContentType string - Metadata map[string]string - } - } - - decoder := json.NewDecoder(r.Body) - err := decoder.Decode(&composeRequest) - if err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: "Error parsing request body", - } - } - - const maxComposeObjects = 32 - if len(composeRequest.SourceObjects) > maxComposeObjects { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: fmt.Sprintf("The number of source components provided (%d) exceeds the maximum (%d)", len(composeRequest.SourceObjects), maxComposeObjects), - } - } - - sourceNames := make([]string, 0, len(composeRequest.SourceObjects)) - for _, n := range composeRequest.SourceObjects { - sourceNames = append(sourceNames, n.Name) - } - - backendObj, err := s.backend.ComposeObject(bucketName, sourceNames, destinationObject, composeRequest.Destination.Metadata, composeRequest.Destination.ContentType) - if err != nil { - return jsonResponse{ - status: http.StatusInternalServerError, - errorMessage: "Error running compose", - } - } - defer backendObj.Close() - - obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0] - - s.eventManager.Trigger(&backendObj, notification.EventFinalize, nil) - - return jsonResponse{data: newObjectResponse(obj.ObjectAttrs)} + return 0, 0, obj.Content } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go index e28b84eeb73a..92164cafb105 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go @@ -4,72 +4,44 @@ package fakestorage -import ( - "time" - - "github.com/fsouza/fake-gcs-server/internal/backend" -) - -const timestampFormat = "2006-01-02T15:04:05.999999Z07:00" - -func formatTime(t time.Time) string { - if t.IsZero() { - return "" - } - return t.Format(timestampFormat) -} +import "sort" type listResponse struct { - Kind string `json:"kind"` - Items []any `json:"items,omitempty"` - Prefixes []string `json:"prefixes,omitempty"` + Kind string `json:"kind"` + Items []interface{} `json:"items"` + Prefixes []string `json:"prefixes"` } -func newListBucketsResponse(buckets []backend.Bucket, location string) listResponse { +func newListBucketsResponse(bucketNames []string) listResponse { resp := listResponse{ Kind: "storage#buckets", - Items: make([]any, len(buckets)), + Items: make([]interface{}, len(bucketNames)), } - for i, bucket := range buckets { - resp.Items[i] = newBucketResponse(bucket, location) + sort.Strings(bucketNames) + for i, name := range bucketNames { + resp.Items[i] = newBucketResponse(name) } return resp } type bucketResponse struct { - Kind string `json:"kind"` - ID string `json:"id"` - DefaultEventBasedHold bool `json:"defaultEventBasedHold"` - Name string `json:"name"` - Versioning *bucketVersioning `json:"versioning,omitempty"` - TimeCreated string `json:"timeCreated,omitempty"` - Updated string `json:"updated,omitempty"` - Location string `json:"location,omitempty"` - StorageClass string `json:"storageClass,omitempty"` + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` } -type bucketVersioning struct { - Enabled bool `json:"enabled,omitempty"` -} - -func newBucketResponse(bucket backend.Bucket, location string) bucketResponse { +func newBucketResponse(bucketName string) bucketResponse { return bucketResponse{ - Kind: "storage#bucket", - ID: bucket.Name, - Name: bucket.Name, - DefaultEventBasedHold: bucket.DefaultEventBasedHold, - Versioning: &bucketVersioning{bucket.VersioningEnabled}, - TimeCreated: formatTime(bucket.TimeCreated), - Updated: formatTime(bucket.TimeCreated), // not tracking update times yet, reporting `updated` = `timeCreated` - Location: location, - StorageClass: "STANDARD", + Kind: "storage#bucket", + ID: bucketName, + Name: bucketName, } } -func newListObjectsResponse(objs []ObjectAttrs, prefixes []string) listResponse { +func newListObjectsResponse(objs []Object, prefixes []string) listResponse { resp := listResponse{ Kind: "storage#objects", - Items: make([]any, len(objs)), + Items: make([]interface{}, len(objs)), Prefixes: prefixes, } for i, obj := range objs { @@ -78,93 +50,27 @@ func newListObjectsResponse(objs []ObjectAttrs, prefixes []string) listResponse return resp } -// objectAccessControl is copied from the Google SDK to avoid direct -// dependency. -type objectAccessControl struct { - Bucket string `json:"bucket,omitempty"` - Domain string `json:"domain,omitempty"` - Email string `json:"email,omitempty"` - Entity string `json:"entity,omitempty"` - EntityID string `json:"entityId,omitempty"` - Etag string `json:"etag,omitempty"` - Generation int64 `json:"generation,omitempty,string"` - ID string `json:"id,omitempty"` - Kind string `json:"kind,omitempty"` - Object string `json:"object,omitempty"` - ProjectTeam struct { - ProjectNumber string `json:"projectNumber,omitempty"` - Team string `json:"team,omitempty"` - } `json:"projectTeam,omitempty"` - Role string `json:"role,omitempty"` - SelfLink string `json:"selfLink,omitempty"` -} - type objectResponse struct { - Kind string `json:"kind"` - Name string `json:"name"` - ID string `json:"id"` - Bucket string `json:"bucket"` - Size int64 `json:"size,string"` - ContentType string `json:"contentType,omitempty"` - ContentEncoding string `json:"contentEncoding,omitempty"` - Crc32c string `json:"crc32c,omitempty"` - ACL []*objectAccessControl `json:"acl,omitempty"` - Md5Hash string `json:"md5Hash,omitempty"` - Etag string `json:"etag,omitempty"` - TimeCreated string `json:"timeCreated,omitempty"` - TimeDeleted string `json:"timeDeleted,omitempty"` - Updated string `json:"updated,omitempty"` - Generation int64 `json:"generation,string"` - CustomTime string `json:"customTime,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` + Kind string `json:"kind"` + Name string `json:"name"` + ID string `json:"id"` + Bucket string `json:"bucket"` + Size int64 `json:"size,string"` + // Crc32c: CRC32c checksum, same as in google storage client code + Crc32c string `json:"crc32c,omitempty"` + Md5Hash string `json:"md5hash,omitempty"` } -func newObjectResponse(obj ObjectAttrs) objectResponse { - acl := getAccessControlsListFromObject(obj) - +func newObjectResponse(obj Object) objectResponse { return objectResponse{ - Kind: "storage#object", - ID: obj.id(), - Bucket: obj.BucketName, - Name: obj.Name, - Size: obj.Size, - ContentType: obj.ContentType, - ContentEncoding: obj.ContentEncoding, - Crc32c: obj.Crc32c, - Md5Hash: obj.Md5Hash, - Etag: obj.Etag, - ACL: acl, - Metadata: obj.Metadata, - TimeCreated: formatTime(obj.Created), - TimeDeleted: formatTime(obj.Deleted), - Updated: formatTime(obj.Updated), - CustomTime: formatTime(obj.CustomTime), - Generation: obj.Generation, - } -} - -type aclListResponse struct { - Items []*objectAccessControl `json:"items"` -} - -func newACLListResponse(obj ObjectAttrs) aclListResponse { - if len(obj.ACL) == 0 { - return aclListResponse{} - } - return aclListResponse{Items: getAccessControlsListFromObject(obj)} -} - -func getAccessControlsListFromObject(obj ObjectAttrs) []*objectAccessControl { - aclItems := make([]*objectAccessControl, len(obj.ACL)) - for idx, aclRule := range obj.ACL { - aclItems[idx] = &objectAccessControl{ - Bucket: obj.BucketName, - Entity: string(aclRule.Entity), - Object: obj.Name, - Role: string(aclRule.Role), - } + Kind: "storage#object", + ID: obj.id(), + Bucket: obj.BucketName, + Name: obj.Name, + Size: int64(len(obj.Content)), + Crc32c: obj.Crc32c, + Md5Hash: obj.Md5Hash, } - return aclItems } type rewriteResponse struct { @@ -176,11 +82,11 @@ type rewriteResponse struct { Resource objectResponse `json:"resource"` } -func newObjectRewriteResponse(obj ObjectAttrs) rewriteResponse { +func newObjectRewriteResponse(obj Object) rewriteResponse { return rewriteResponse{ Kind: "storage#rewriteResponse", - TotalBytesRewritten: obj.Size, - ObjectSize: obj.Size, + TotalBytesRewritten: int64(len(obj.Content)), + ObjectSize: int64(len(obj.Content)), Done: true, RewriteToken: "", Resource: newObjectResponse(obj), diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go index 7d5f1da33aa2..165d9d7ec2ed 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go @@ -5,52 +5,30 @@ package fakestorage import ( - "bufio" - "bytes" - "compress/gzip" "context" "crypto/tls" - "errors" "fmt" - "io" - "mime" - "mime/multipart" "net" "net/http" "net/http/httptest" - "net/http/httputil" - "net/textproto" - "net/url" - "os" - "path/filepath" - "strings" "sync" "cloud.google.com/go/storage" "github.com/fsouza/fake-gcs-server/internal/backend" - "github.com/fsouza/fake-gcs-server/internal/checksum" - "github.com/fsouza/fake-gcs-server/internal/notification" - "github.com/gorilla/handlers" "github.com/gorilla/mux" - "golang.org/x/oauth2/google" "google.golang.org/api/option" ) -const defaultPublicHost = "storage.googleapis.com" - // Server is the fake server. // // It provides a fake implementation of the Google Cloud Storage API. type Server struct { - backend backend.Storage - uploads sync.Map - transport http.RoundTripper - ts *httptest.Server - handler http.Handler - options Options - externalURL string - publicHost string - eventManager notification.EventManager + backend backend.Storage + uploads map[string]Object + transport http.RoundTripper + ts *httptest.Server + mux *mux.Router + mtx sync.RWMutex } // NewServer creates a new instance of the server, pre-loaded with the given @@ -63,8 +41,6 @@ func NewServer(objects []Object) *Server { } // NewServerWithHostPort creates a new server that listens on a custom host and port -// -// Deprecated: use NewServerWithOptions. func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server, error) { return NewServerWithOptions(Options{ InitialObjects: objects, @@ -73,102 +49,30 @@ func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server, }) } -// Options are used to configure the server on creation. +// Options are used to configure the server on creation type Options struct { InitialObjects []Object StorageRoot string - Seed string - Scheme string Host string Port uint16 // when set to true, the server will not actually start a TCP listener, // client requests will get processed by an internal mocked transport. NoListener bool - - // Optional external URL, such as https://gcs.127.0.0.1.nip.io:4443 - // Returned in the Location header for resumable uploads - // The "real" value is https://www.googleapis.com, the JSON API - // The default is whatever the server is bound to, such as https://0.0.0.0:4443 - ExternalURL string - - // Optional URL for public access - // An example is "storage.gcs.127.0.0.1.nip.io:4443", which will configure - // the server to serve objects at: - // https://storage.gcs.127.0.0.1.nip.io:4443// - // https://.storage.gcs.127.0.0.1.nip.io:4443>/ - // If unset, the default is "storage.googleapis.com", the XML API - PublicHost string - - // Optional list of headers to add to the CORS header allowlist - // An example is "X-Goog-Meta-Uploader", which will allow a - // custom metadata header named "X-Goog-Meta-Uploader" to be - // sent through the browser - AllowedCORSHeaders []string - - // Destination for writing log. - Writer io.Writer - - // EventOptions contains the events that should be published and the URL - // of the Google cloud function such events should be published to. - EventOptions notification.EventManagerOptions - - // Location used for buckets in the server. - BucketsLocation string - - CertificateLocation string - - PrivateKeyLocation string } -// NewServerWithOptions creates a new server configured according to the -// provided options. +// NewServerWithOptions creates a new server with custom options func NewServerWithOptions(options Options) (*Server, error) { - s, err := newServer(options) + s, err := newServer(options.InitialObjects, options.StorageRoot) if err != nil { return nil, err } - - allowedHeaders := []string{"Content-Type", "Content-Encoding", "Range", "Content-Range"} - allowedHeaders = append(allowedHeaders, options.AllowedCORSHeaders...) - - cors := handlers.CORS( - handlers.AllowedMethods([]string{ - http.MethodHead, - http.MethodGet, - http.MethodPost, - http.MethodPut, - http.MethodPatch, - http.MethodDelete, - }), - handlers.AllowedHeaders(allowedHeaders), - handlers.AllowedOrigins([]string{"*"}), - handlers.AllowCredentials(), - handlers.ExposedHeaders([]string{"Location"}), - ) - - s.handler = cors(s.handler) - if options.Writer != nil { - s.handler = handlers.LoggingHandler(options.Writer, s.handler) - } - s.handler = requestCompressHandler(s.handler) - s.transport = &muxTransport{handler: s.handler} - - s.eventManager, err = notification.NewPubsubEventManager(options.EventOptions, options.Writer) - if err != nil { - return nil, err - } - if options.NoListener { + s.setTransportToMux() return s, nil } - s.ts = httptest.NewUnstartedServer(s.handler) - startFunc := s.ts.StartTLS - if options.Scheme == "http" { - startFunc = s.ts.Start - } - + s.ts = httptest.NewUnstartedServer(s.mux) if options.Port != 0 { addr := fmt.Sprintf("%s:%d", options.Host, options.Port) l, err := net.Listen("tcp", addr) @@ -177,254 +81,64 @@ func NewServerWithOptions(options Options) (*Server, error) { } s.ts.Listener.Close() s.ts.Listener = l + s.ts.StartTLS() + } else { + s.ts.StartTLS() } - if options.CertificateLocation != "" && options.PrivateKeyLocation != "" { - cert, err := tls.LoadX509KeyPair(options.CertificateLocation, options.PrivateKeyLocation) - if err != nil { - return nil, err - } - s.ts.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} - } - startFunc() - + s.setTransportToAddr(s.ts.Listener.Addr().String()) return s, nil } -func newServer(options Options) (*Server, error) { - if len(options.InitialObjects) > 0 && options.Seed != "" { - return nil, errors.New("please provide either a seed directory or a list of initial objects") - } - - var backendObjects []backend.StreamingObject - if len(options.InitialObjects) > 0 { - backendObjects = bufferedObjectsToBackendObjects(options.InitialObjects) - } - +func newServer(objects []Object, storageRoot string) (*Server, error) { + backendObjects := toBackendObjects(objects) var backendStorage backend.Storage var err error - if options.StorageRoot != "" { - backendStorage, err = backend.NewStorageFS(backendObjects, options.StorageRoot) + if storageRoot != "" { + backendStorage, err = backend.NewStorageFS(backendObjects, storageRoot) } else { - backendStorage, err = backend.NewStorageMemory(backendObjects) + backendStorage = backend.NewStorageMemory(backendObjects) } if err != nil { return nil, err } - publicHost := options.PublicHost - if publicHost == "" { - publicHost = defaultPublicHost - } - s := Server{ - backend: backendStorage, - uploads: sync.Map{}, - externalURL: options.ExternalURL, - publicHost: publicHost, - options: options, - eventManager: ¬ification.PubsubEventManager{}, + backend: backendStorage, + uploads: make(map[string]Object), } s.buildMuxer() - _, err = s.seed() - if err != nil { - return nil, err - } return &s, nil } -func unescapeMuxVars(vars map[string]string) map[string]string { - m := make(map[string]string) - for k, v := range vars { - r, err := url.PathUnescape(v) - if err == nil { - m[k] = r - } else { - m[k] = v - } +func (s *Server) setTransportToAddr(addr string) { + // #nosec + tlsConfig := tls.Config{InsecureSkipVerify: true} + s.transport = &http.Transport{ + TLSClientConfig: &tlsConfig, + DialTLS: func(string, string) (net.Conn, error) { + return tls.Dial("tcp", addr, &tlsConfig) + }, } - return m } -func (s *Server) buildMuxer() { - const apiPrefix = "/storage/v1" - handler := mux.NewRouter().SkipClean(true).UseEncodedPath() - - // healthcheck - handler.Path("/_internal/healthcheck").Methods(http.MethodGet).HandlerFunc(s.healthcheck) - - routers := []*mux.Router{ - handler.PathPrefix(apiPrefix).Subrouter(), - handler.MatcherFunc(s.publicHostMatcher).PathPrefix(apiPrefix).Subrouter(), - } - - for _, r := range routers { - r.Path("/b").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets)) - r.Path("/b/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets)) - r.Path("/b").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost)) - r.Path("/b/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost)) - r.Path("/b/{bucketName}").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.getBucket)) - r.Path("/b/{bucketName}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.updateBucket)) - r.Path("/b/{bucketName}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteBucket)) - r.Path("/b/{bucketName}/o").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects)) - r.Path("/b/{bucketName}/o/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects)) - r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.patchObject)) - r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjectACL)) - r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.setObjectACL)) - r.Path("/b/{bucketName}/o/{objectName:.+}/acl/{entity}").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.setObjectACL)) - r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject) - r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject)) - r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/{copyType:rewriteTo|copyTo}/b/{destinationBucket}/o/{destinationObject:.+}").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.rewriteObject)) - r.Path("/b/{bucketName}/o/{destinationObject:.+}/compose").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.composeObject)) - r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.updateObject)) - } - - // Internal / update server configuration - handler.Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig)) - handler.MatcherFunc(s.publicHostMatcher).Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig)) - handler.Path("/_internal/reseed").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.reseedServer)) - // Internal - end - - // XML API - xmlApiRouters := []*mux.Router{ - handler.Host(fmt.Sprintf("{bucketName}.%s", s.publicHost)).Subrouter(), - handler.MatcherFunc(s.publicHostMatcher).PathPrefix(`/{bucketName}`).Subrouter(), - } - for _, r := range xmlApiRouters { - r.Path("/").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects)) - r.Path("").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects)) - } - - bucketHost := fmt.Sprintf("{bucketName}.%s", s.publicHost) - handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) - handler.Path("/download/storage/v1/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) - handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject)) - handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject)) - handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent)) - handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent)) - handler.Path("/upload/resumable/{uploadId}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent)) - - // Batch endpoint - handler.MatcherFunc(s.publicHostMatcher).Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall) - handler.Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall) - - handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) - handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) - - // Form Uploads - handler.Host(s.publicHost).Path("/{bucketName}").MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject)) - handler.Host(bucketHost).MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject)) - - // Signed URLs (upload and download) - handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject)) - handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject) - handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject)) - handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject)) - - s.handler = handler +func (s *Server) setTransportToMux() { + s.transport = &muxTransport{router: s.mux} } -func (s *Server) seed() ([]backend.StreamingObject, error) { - if s.options.Seed == "" { - return nil, nil - } - - initialObjects, emptyBuckets := generateObjectsFromFiles(s.options.Seed) - - backendObjects := bufferedObjectsToBackendObjects(initialObjects) - - var err error - if s.options.StorageRoot != "" { - s.backend, err = backend.NewStorageFS(backendObjects, s.options.StorageRoot) - } else { - s.backend, err = backend.NewStorageMemory(backendObjects) - } - if err != nil { - return nil, err - } - - for _, bucketName := range emptyBuckets { - s.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName}) - } - return backendObjects, nil -} - -func (s *Server) reseedServer(r *http.Request) jsonResponse { - backendObjects, err := s.seed() - if err != nil { - return errToJsonResponse(err) - } - - return jsonResponse{data: fromBackendObjects(backendObjects)} -} - -func generateObjectsFromFiles(folder string) ([]Object, []string) { - var objects []Object - var emptyBuckets []string - if files, err := os.ReadDir(folder); err == nil { - for _, f := range files { - if !f.IsDir() { - continue - } - bucketName := f.Name() - localBucketPath := filepath.Join(folder, bucketName) - - bucketObjects, err := objectsFromBucket(localBucketPath, bucketName) - if err != nil { - continue - } - - if len(bucketObjects) < 1 { - emptyBuckets = append(emptyBuckets, bucketName) - } - objects = append(objects, bucketObjects...) - } - } - return objects, emptyBuckets -} - -func objectsFromBucket(localBucketPath, bucketName string) ([]Object, error) { - var objects []Object - err := filepath.Walk(localBucketPath, func(path string, info os.FileInfo, _ error) error { - if info.Mode().IsRegular() { - // Rel() should never return error since path always descend from localBucketPath - relPath, _ := filepath.Rel(localBucketPath, path) - objectKey := filepath.ToSlash(relPath) - fileContent, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("could not read file %q: %w", path, err) - } - objects = append(objects, Object{ - ObjectAttrs: ObjectAttrs{ - ACL: []storage.ACLRule{ - { - Entity: "projectOwner-test-project", - Role: "OWNER", - }, - }, - BucketName: bucketName, - Name: objectKey, - ContentType: mime.TypeByExtension(filepath.Ext(path)), - Crc32c: checksum.EncodedCrc32cChecksum(fileContent), - Md5Hash: checksum.EncodedMd5Hash(fileContent), - }, - Content: fileContent, - }) - } - return nil - }) - return objects, err -} - -func (s *Server) healthcheck(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -// publicHostMatcher matches incoming requests against the currently specified server publicHost. -func (s *Server) publicHostMatcher(r *http.Request, rm *mux.RouteMatch) bool { - if strings.Contains(s.publicHost, ":") || !strings.Contains(r.Host, ":") { - return r.Host == s.publicHost - } - idx := strings.IndexByte(r.Host, ':') - return r.Host[:idx] == s.publicHost +func (s *Server) buildMuxer() { + s.mux = mux.NewRouter() + s.mux.Host("storage.googleapis.com").Path("/{bucketName}/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject) + s.mux.Host("{bucketName}.storage.googleapis.com").Path("/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject) + r := s.mux.PathPrefix("/storage/v1").Subrouter() + r.Path("/b").Methods("GET").HandlerFunc(s.listBuckets) + r.Path("/b/{bucketName}").Methods("GET").HandlerFunc(s.getBucket) + r.Path("/b/{bucketName}/o").Methods("GET").HandlerFunc(s.listObjects) + r.Path("/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject) + r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("GET").HandlerFunc(s.getObject) + r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("DELETE").HandlerFunc(s.deleteObject) + r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/rewriteTo/b/{destinationBucket}/o/{destinationObject:.+}").HandlerFunc(s.rewriteObject) + s.mux.Path("/download/storage/v1/b/{bucketName}/o/{objectName}").Methods("GET").HandlerFunc(s.downloadObject) + s.mux.Path("/upload/storage/v1/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject) + s.mux.Path("/upload/resumable/{uploadId}").Methods("PUT", "POST").HandlerFunc(s.uploadFileContent) } // Stop stops the server, closing all connections. @@ -439,136 +153,20 @@ func (s *Server) Stop() { // URL returns the server URL. func (s *Server) URL() string { - if s.externalURL != "" { - return s.externalURL - } if s.ts != nil { return s.ts.URL } return "" } -// PublicURL returns the server's public download URL. -func (s *Server) PublicURL() string { - return fmt.Sprintf("%s://%s", s.scheme(), s.publicHost) -} - -func (s *Server) Backend() backend.Storage { - return s.backend -} - -func (s *Server) scheme() string { - if s.options.Scheme == "http" { - return "http" - } - return "https" -} - // HTTPClient returns an HTTP client configured to talk to the server. func (s *Server) HTTPClient() *http.Client { return &http.Client{Transport: s.transport} } -// HTTPHandler returns an HTTP handler that behaves like GCS. -func (s *Server) HTTPHandler() http.Handler { - return s.handler -} - // Client returns a GCS client configured to talk to the server. func (s *Server) Client() *storage.Client { - client, err := storage.NewClient(context.Background(), option.WithHTTPClient(s.HTTPClient()), option.WithCredentials(&google.Credentials{})) - if err != nil { - panic(err) - } + opt := option.WithHTTPClient(s.HTTPClient()) + client, _ := storage.NewClient(context.Background(), opt) return client } - -func (s *Server) handleBatchCall(w http.ResponseWriter, r *http.Request) { - reader, err := r.MultipartReader() - if err != nil { - http.Error(w, "invalid Content-Type header", http.StatusBadRequest) - return - } - - var b bytes.Buffer - mw := multipart.NewWriter(&b) - defer mw.Close() - w.Header().Set("Content-Type", "multipart/mixed; boundary="+mw.Boundary()) - - w.WriteHeader(http.StatusOK) - part, err := reader.NextPart() - for ; err == nil; part, err = reader.NextPart() { - contentID := part.Header.Get("Content-ID") - if contentID == "" { - // missing content ID, skip - continue - } - - partHeaders := textproto.MIMEHeader{} - partHeaders.Set("Content-Type", "application/http") - partHeaders.Set("Content-ID", strings.Replace(contentID, "<", "@,;:\"/[]?= -// (including space). gsutil likes to use `=` in the boundary, but incorrectly -// quotes it using single quotes. -// -// We do exclude \ and " from the regexp because those are not supported by the -// mime package. -// -// This has been reported to gsutil -// (https://github.com/GoogleCloudPlatform/gsutil/issues/1466). If that issue -// ever gets closed, we should be able to get rid of this hack. -var gsutilBoundary = regexp.MustCompile(`boundary='([^']*[()<>@,;:"/\[\]?= ]+[^']*)'`) - type multipartMetadata struct { - ContentType string `json:"contentType"` - ContentEncoding string `json:"contentEncoding"` - CustomTime time.Time `json:"customTime,omitempty"` - Name string `json:"name"` - Metadata map[string]string `json:"metadata"` -} - -type contentRange struct { - KnownRange bool // Is the range known, or "*"? - KnownTotal bool // Is the total known, or "*"? - Start int // Start of the range, -1 if unknown - End int // End of the range, -1 if unknown - Total int // Total bytes expected, -1 if unknown + Name string `json:"name"` } -type generationCondition struct { - ifGenerationMatch *int64 - ifGenerationNotMatch *int64 -} - -func (c generationCondition) ConditionsMet(activeGeneration int64) bool { - if c.ifGenerationMatch != nil && *c.ifGenerationMatch != activeGeneration { - return false - } - if c.ifGenerationNotMatch != nil && *c.ifGenerationNotMatch == activeGeneration { - return false - } - return true -} - -func (s *Server) insertObject(r *http.Request) jsonResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - - if _, err := s.backend.GetBucket(bucketName); err != nil { - return jsonResponse{status: http.StatusNotFound} +func (s *Server) insertObject(w http.ResponseWriter, r *http.Request) { + s.mtx.Lock() + defer s.mtx.Unlock() + bucketName := mux.Vars(r)["bucketName"] + if err := s.backend.GetBucket(bucketName); err != nil { + w.WriteHeader(http.StatusNotFound) + err := newErrorResponse(http.StatusNotFound, "Not found", nil) + json.NewEncoder(w).Encode(err) + return } uploadType := r.URL.Query().Get("uploadType") - if uploadType == "" && r.Header.Get("X-Goog-Upload-Protocol") == uploadTypeResumable { - uploadType = uploadTypeResumable - } - switch uploadType { - case uploadTypeMedia: - return s.simpleUpload(bucketName, r) - case uploadTypeMultipart: - return s.multipartUpload(bucketName, r) - case uploadTypeResumable: - return s.resumableUpload(bucketName, r) + case "media": + s.simpleUpload(bucketName, w, r) + case "multipart": + s.multipartUpload(bucketName, w, r) + case "resumable": + s.resumableUpload(bucketName, w, r) default: - // Support Signed URL Uploads - if r.URL.Query().Get("X-Goog-Algorithm") != "" { - switch r.Method { - case http.MethodPost: - return s.resumableUpload(bucketName, r) - case http.MethodPut: - return s.signedUpload(bucketName, r) - } - } - return jsonResponse{errorMessage: "invalid uploadType", status: http.StatusBadRequest} + http.Error(w, "invalid uploadType", http.StatusBadRequest) } } -func (s *Server) insertFormObject(r *http.Request) xmlResponse { - bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] - - if err := r.ParseMultipartForm(32 << 20); nil != err { - return xmlResponse{errorMessage: "invalid form", status: http.StatusBadRequest} - } - - // Load metadata - var name string - if keys, ok := r.MultipartForm.Value["key"]; ok { - name = keys[0] - } +func (s *Server) simpleUpload(bucketName string, w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + name := r.URL.Query().Get("name") if name == "" { - return xmlResponse{errorMessage: "missing key", status: http.StatusBadRequest} - } - var predefinedACL string - if acls, ok := r.MultipartForm.Value["acl"]; ok { - predefinedACL = acls[0] - } - var contentEncoding string - if contentEncodings, ok := r.MultipartForm.Value["Content-Encoding"]; ok { - contentEncoding = contentEncodings[0] - } - var contentType string - if contentTypes, ok := r.MultipartForm.Value["Content-Type"]; ok { - contentType = contentTypes[0] - } - successActionStatus := http.StatusNoContent - if successActionStatuses, ok := r.MultipartForm.Value["success_action_status"]; ok { - successInt, err := strconv.Atoi(successActionStatuses[0]) - if err != nil { - return xmlResponse{errorMessage: err.Error(), status: http.StatusBadRequest} - } - if successInt != http.StatusOK && successInt != http.StatusCreated && successInt != http.StatusNoContent { - return xmlResponse{errorMessage: "invalid success action status", status: http.StatusBadRequest} - } - successActionStatus = successInt + http.Error(w, "name is required for simple uploads", http.StatusBadRequest) + return } - metaData := make(map[string]string) - for key := range r.MultipartForm.Value { - lowerKey := strings.ToLower(key) - if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey { - metaData[metaDataKey] = r.MultipartForm.Value[key][0] - } - } - - // Load file - var file *multipart.FileHeader - if files, ok := r.MultipartForm.File["file"]; ok { - file = files[0] - } - if file == nil { - return xmlResponse{errorMessage: "missing file", status: http.StatusBadRequest} - } - infile, err := file.Open() + data, err := ioutil.ReadAll(r.Body) if err != nil { - return xmlResponse{errorMessage: err.Error()} - } - obj := StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: name, - ContentType: contentType, - ContentEncoding: contentEncoding, - ACL: getObjectACL(predefinedACL), - Metadata: metaData, - }, - Content: infile, + http.Error(w, err.Error(), http.StatusInternalServerError) + return } - obj, err = s.createObject(obj, backend.NoConditions{}) + obj := Object{BucketName: bucketName, Name: name, Content: data, Crc32c: encodedCrc32cChecksum(data), Md5Hash: encodedMd5Hash(data)} + err = s.createObject(obj) if err != nil { - return xmlResponse{errorMessage: err.Error()} - } - defer obj.Close() - - if successActionStatus == 201 { - objectURI := fmt.Sprintf("%s/%s%s", s.URL(), bucketName, name) - xmlBody := createXmlResponseBody(bucketName, obj.Etag, strings.TrimPrefix(name, "/"), objectURI) - return xmlResponse{status: successActionStatus, data: xmlBody} + http.Error(w, err.Error(), http.StatusInternalServerError) + return } - return xmlResponse{status: successActionStatus} + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(obj) } -func (s *Server) wrapUploadPreconditions(r *http.Request, bucketName string, objectName string) (generationCondition, error) { - result := generationCondition{ - ifGenerationMatch: nil, - ifGenerationNotMatch: nil, - } - ifGenerationMatch := r.URL.Query().Get("ifGenerationMatch") - - if ifGenerationMatch != "" { - gen, err := strconv.ParseInt(ifGenerationMatch, 10, 64) - if err != nil { - return generationCondition{}, err - } - result.ifGenerationMatch = &gen - } - - ifGenerationNotMatch := r.URL.Query().Get("ifGenerationNotMatch") - - if ifGenerationNotMatch != "" { - gen, err := strconv.ParseInt(ifGenerationNotMatch, 10, 64) - if err != nil { - return generationCondition{}, err - } - result.ifGenerationNotMatch = &gen - } +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) - return result, nil +func crc32cChecksum(content []byte) []byte { + checksummer := crc32.New(crc32cTable) + checksummer.Write(content) + return checksummer.Sum(make([]byte, 0, 4)) } -func (s *Server) simpleUpload(bucketName string, r *http.Request) jsonResponse { - defer r.Body.Close() - name := r.URL.Query().Get("name") - predefinedACL := r.URL.Query().Get("predefinedAcl") - contentEncoding := r.URL.Query().Get("contentEncoding") - customTime := r.URL.Query().Get("customTime") - if name == "" { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: "name is required for simple uploads", - } - } - obj := StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: name, - ContentType: r.Header.Get(contentTypeHeader), - ContentEncoding: contentEncoding, - CustomTime: convertTimeWithoutError(customTime), - ACL: getObjectACL(predefinedACL), - }, - Content: notImplementedSeeker{r.Body}, - } - obj, err := s.createObject(obj, backend.NoConditions{}) - if err != nil { - return errToJsonResponse(err) - } - obj.Close() - return jsonResponse{data: newObjectResponse(obj.ObjectAttrs)} +func encodedChecksum(checksum []byte) string { + return base64.StdEncoding.EncodeToString(checksum) } -type notImplementedSeeker struct { - io.ReadCloser +func encodedCrc32cChecksum(content []byte) string { + return encodedChecksum(crc32cChecksum(content)) } -func (s notImplementedSeeker) Seek(offset int64, whence int) (int64, error) { - return 0, errors.New("not implemented") +func md5Hash(b []byte) []byte { + /* #nosec G401 */ + h := md5.New() + h.Write(b) + return h.Sum(nil) } -func (s *Server) signedUpload(bucketName string, r *http.Request) jsonResponse { - defer r.Body.Close() - name := unescapeMuxVars(mux.Vars(r))["objectName"] - predefinedACL := r.URL.Query().Get("predefinedAcl") - contentEncoding := r.URL.Query().Get("contentEncoding") - customTime := r.URL.Query().Get("customTime") - - // Load data from HTTP Headers - if contentEncoding == "" { - contentEncoding = r.Header.Get("Content-Encoding") - } - - metaData := make(map[string]string) - for key := range r.Header { - lowerKey := strings.ToLower(key) - if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey { - metaData[metaDataKey] = r.Header.Get(key) - } - } - - obj := StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: name, - ContentType: r.Header.Get(contentTypeHeader), - ContentEncoding: contentEncoding, - CustomTime: convertTimeWithoutError(customTime), - ACL: getObjectACL(predefinedACL), - Metadata: metaData, - }, - Content: notImplementedSeeker{r.Body}, - } - obj, err := s.createObject(obj, backend.NoConditions{}) - if err != nil { - return errToJsonResponse(err) - } - obj.Close() - return jsonResponse{data: newObjectResponse(obj.ObjectAttrs)} +func encodedHash(hash []byte) string { + return base64.StdEncoding.EncodeToString(hash) } -func getObjectACL(predefinedACL string) []storage.ACLRule { - if predefinedACL == "publicRead" { - return []storage.ACLRule{ - { - Entity: "allUsers", - Role: "READER", - }, - } - } - - return []storage.ACLRule{ - { - Entity: "projectOwner-test-project", - Role: "OWNER", - }, - } +func encodedMd5Hash(content []byte) string { + return encodedHash(md5Hash(content)) } -func (s *Server) multipartUpload(bucketName string, r *http.Request) jsonResponse { +func (s *Server) multipartUpload(bucketName string, w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - params, err := parseContentTypeParams(r.Header.Get(contentTypeHeader)) + _, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) if err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: "invalid Content-Type header", - } + http.Error(w, "invalid Content-Type header", http.StatusBadRequest) + return } var ( metadata *multipartMetadata content []byte ) - var contentType string reader := multipart.NewReader(r.Body, params["boundary"]) - - var partReaders []io.Reader - part, err := reader.NextPart() for ; err == nil; part, err = reader.NextPart() { if metadata == nil { metadata, err = loadMetadata(part) - contentType = metadata.ContentType } else { - contentType = part.Header.Get(contentTypeHeader) content, err = loadContent(part) - partReaders = append(partReaders, bytes.NewReader(content)) } if err != nil { break } } if err != io.EOF { - return jsonResponse{errorMessage: err.Error()} - } - - objName := r.URL.Query().Get("name") - predefinedACL := r.URL.Query().Get("predefinedAcl") - if objName == "" { - objName = metadata.Name + http.Error(w, err.Error(), http.StatusInternalServerError) + return } - - conditions, err := s.wrapUploadPreconditions(r, bucketName, objName) + obj := Object{BucketName: bucketName, Name: metadata.Name, Content: content, Crc32c: encodedCrc32cChecksum(content), Md5Hash: encodedMd5Hash(content)} + err = s.createObject(obj) if err != nil { - return jsonResponse{ - status: http.StatusBadRequest, - errorMessage: err.Error(), - } - } - - obj := StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: objName, - ContentType: contentType, - ContentEncoding: metadata.ContentEncoding, - CustomTime: metadata.CustomTime, - ACL: getObjectACL(predefinedACL), - Metadata: metadata.Metadata, - }, - Content: notImplementedSeeker{io.NopCloser(io.MultiReader(partReaders...))}, + http.Error(w, err.Error(), http.StatusInternalServerError) + return } - - obj, err = s.createObject(obj, conditions) - if err != nil { - return errToJsonResponse(err) - } - defer obj.Close() - return jsonResponse{data: newObjectResponse(obj.ObjectAttrs)} -} - -func parseContentTypeParams(requestContentType string) (map[string]string, error) { - requestContentType = gsutilBoundary.ReplaceAllString(requestContentType, `boundary="$1"`) - _, params, err := mime.ParseMediaType(requestContentType) - return params, err + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(obj) } -func (s *Server) resumableUpload(bucketName string, r *http.Request) jsonResponse { - if r.URL.Query().Has("upload_id") { - return s.uploadFileContent(r) - } - predefinedACL := r.URL.Query().Get("predefinedAcl") - contentEncoding := r.URL.Query().Get("contentEncoding") - metadata := new(multipartMetadata) - if r.Body != http.NoBody { - var err error - metadata, err = loadMetadata(r.Body) - if err != nil { - return jsonResponse{errorMessage: err.Error()} - } - } +func (s *Server) resumableUpload(bucketName string, w http.ResponseWriter, r *http.Request) { objName := r.URL.Query().Get("name") if objName == "" { + metadata, err := loadMetadata(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } objName = metadata.Name } - if contentEncoding == "" { - contentEncoding = metadata.ContentEncoding - } - obj := Object{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: objName, - ContentType: metadata.ContentType, - ContentEncoding: contentEncoding, - CustomTime: metadata.CustomTime, - ACL: getObjectACL(predefinedACL), - Metadata: metadata.Metadata, - }, - } + obj := Object{BucketName: bucketName, Name: objName} uploadID, err := generateUploadID() if err != nil { - return jsonResponse{errorMessage: err.Error()} - } - s.uploads.Store(uploadID, obj) - header := make(http.Header) - location := fmt.Sprintf( - "%s/upload/storage/v1/b/%s/o?uploadType=resumable&name=%s&upload_id=%s", - s.URL(), - bucketName, - url.PathEscape(objName), - uploadID, - ) - header.Set("Location", location) - if r.Header.Get("X-Goog-Upload-Command") == "start" { - header.Set("X-Goog-Upload-URL", location) - header.Set("X-Goog-Upload-Status", "active") - } - return jsonResponse{ - data: newObjectResponse(obj.ObjectAttrs), - header: header, + http.Error(w, err.Error(), http.StatusInternalServerError) + return } + s.uploads[uploadID] = obj + w.Header().Set("Location", s.URL()+"/upload/resumable/"+uploadID) + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(obj) } -// uploadFileContent accepts a chunk of a resumable upload -// -// A resumable upload is sent in one or more chunks. The request's -// "Content-Range" header is used to determine if more data is expected. -// -// When sending streaming content, the total size is unknown until the stream -// is exhausted. The Go client always sends streaming content. The sequence of -// "Content-Range" headers for 2600-byte content sent in 1000-byte chunks are: -// -// Content-Range: bytes 0-999/* -// Content-Range: bytes 1000-1999/* -// Content-Range: bytes 2000-2599/* -// Content-Range: bytes */2600 -// -// When sending chunked content of a known size, the total size is sent as -// well. The Python client uses this method to upload files and in-memory -// content. The sequence of "Content-Range" headers for the 2600-byte content -// sent in 1000-byte chunks are: -// -// Content-Range: bytes 0-999/2600 -// Content-Range: bytes 1000-1999/2600 -// Content-Range: bytes 2000-2599/2600 -// -// The server collects the content, analyzes the "Content-Range", and returns a -// "308 Permanent Redirect" response if more chunks are expected, and a -// "200 OK" response if the upload is complete (the Go client also accepts a -// "201 Created" response). The "Range" header in the response should be set to -// the size of the content received so far, such as: -// -// Range: bytes 0-2000 -// -// The client (such as the Go client) can send a header "X-Guploader-No-308" if -// it can't process a native "308 Permanent Redirect". The in-process response -// then has a status of "200 OK", with a header "X-Http-Status-Code-Override" -// set to "308". -func (s *Server) uploadFileContent(r *http.Request) jsonResponse { - uploadID := r.URL.Query().Get("upload_id") - rawObj, ok := s.uploads.Load(uploadID) +func (s *Server) uploadFileContent(w http.ResponseWriter, r *http.Request) { + uploadID := mux.Vars(r)["uploadId"] + s.mtx.Lock() + defer s.mtx.Unlock() + obj, ok := s.uploads[uploadID] if !ok { - return jsonResponse{status: http.StatusNotFound} + http.Error(w, "upload not found", http.StatusNotFound) + return } - obj := rawObj.(Object) - // TODO: stream upload file content to and from disk (when using the FS - // backend, at least) instead of loading the entire content into memory. content, err := loadContent(r.Body) if err != nil { - return jsonResponse{errorMessage: err.Error()} + http.Error(w, err.Error(), http.StatusInternalServerError) + return } commit := true - status := http.StatusOK + status := http.StatusCreated + objLength := len(obj.Content) obj.Content = append(obj.Content, content...) - obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content) - obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content) - obj.Etag = fmt.Sprintf("%q", obj.Md5Hash) - contentTypeHeader := r.Header.Get(contentTypeHeader) - if contentTypeHeader != "" { - obj.ContentType = contentTypeHeader - } else { - obj.ContentType = "application/octet-stream" - } - responseHeader := make(http.Header) + obj.Crc32c = encodedCrc32cChecksum(obj.Content) + obj.Md5Hash = encodedMd5Hash(obj.Content) if contentRange := r.Header.Get("Content-Range"); contentRange != "" { - parsed, err := parseContentRange(contentRange) + commit, err = parseRange(contentRange, objLength, len(content), w) if err != nil { - return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest} - } - if parsed.KnownRange { - // Middle of streaming request, or any part of chunked request - responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", parsed.End)) - // Complete if the range covers the known total - commit = parsed.KnownTotal && (parsed.End+1 >= parsed.Total) - } else { - // End of a streaming request - responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", len(obj.Content))) + http.Error(w, err.Error(), http.StatusBadRequest) + return } } if commit { - s.uploads.Delete(uploadID) - streamingObject, err := s.createObject(obj.StreamingObject(), backend.NoConditions{}) + delete(s.uploads, uploadID) + err = s.createObject(obj) if err != nil { - return errToJsonResponse(err) - } - defer streamingObject.Close() - obj, err = streamingObject.BufferedObject() - if err != nil { - return errToJsonResponse(err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return } } else { - if _, no308 := r.Header["X-Guploader-No-308"]; no308 { - // Go client - responseHeader.Set("X-Http-Status-Code-Override", "308") - } else { - // Python client - status = http.StatusPermanentRedirect - } - s.uploads.Store(uploadID, obj) - } - if r.Header.Get("X-Goog-Upload-Command") == "upload, finalize" { - responseHeader.Set("X-Goog-Upload-Status", "final") - } - return jsonResponse{ - status: status, - data: newObjectResponse(obj.ObjectAttrs), - header: responseHeader, - } + status = http.StatusOK + w.Header().Set("X-Http-Status-Code-Override", "308") + s.uploads[uploadID] = obj + } + data, _ := json.Marshal(obj) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(len(data))) + w.WriteHeader(status) + w.Write(data) } -// Parse a Content-Range header -// Some possible valid header values: -// -// bytes 0-1023/4096 (first 1024 bytes of a 4096-byte document) -// bytes 1024-2047/* (second 1024 bytes of a streaming document) -// bytes */4096 (The end of 4096 byte streaming document) -// bytes 0-*/* (start and end of a streaming document as sent by nodeJS client lib) -// bytes */* (start and end of a streaming document as sent by the C++ SDK) -func parseContentRange(r string) (parsed contentRange, err error) { +func parseRange(r string, objLength, bodyLength int, w http.ResponseWriter) (finished bool, err error) { invalidErr := fmt.Errorf("invalid Content-Range: %v", r) - - // Require that units == "bytes" const bytesPrefix = "bytes " + var contentLength int if !strings.HasPrefix(r, bytesPrefix) { - return parsed, invalidErr + return false, invalidErr } - - // Split range from total length parts := strings.SplitN(r[len(bytesPrefix):], "/", 2) if len(parts) != 2 { - return parsed, invalidErr + return false, invalidErr } + var rangeStart, rangeEnd int - // Process range if parts[0] == "*" { - parsed.Start = -1 - parsed.End = -1 + rangeStart = objLength + rangeEnd = objLength + bodyLength } else { rangeParts := strings.SplitN(parts[0], "-", 2) if len(rangeParts) != 2 { - return parsed, invalidErr + return false, invalidErr } - - parsed.Start, err = strconv.Atoi(rangeParts[0]) + rangeStart, err = strconv.Atoi(rangeParts[0]) if err != nil { - return parsed, invalidErr + return false, invalidErr } - - if rangeParts[1] == "*" { - parsed.End = -1 - } else { - parsed.KnownRange = true - parsed.End, err = strconv.Atoi(rangeParts[1]) - if err != nil { - return parsed, invalidErr - } - } - } - - // Process total length - if parts[1] == "*" { - parsed.Total = -1 - } else { - parsed.KnownTotal = true - parsed.Total, err = strconv.Atoi(parts[1]) + rangeEnd, err = strconv.Atoi(rangeParts[1]) if err != nil { - return parsed, invalidErr + return false, invalidErr } } - return parsed, nil + contentLength = objLength + bodyLength + finished = rangeEnd == contentLength + w.Header().Set("Range", fmt.Sprintf("bytes=%d-%d", rangeStart, rangeEnd)) + + return finished, nil } func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) { @@ -630,7 +255,7 @@ func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) { func loadContent(rc io.ReadCloser) ([]byte, error) { defer rc.Close() - return io.ReadAll(rc) + return ioutil.ReadAll(rc) } func generateUploadID() (string, error) { diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go deleted file mode 100644 index 50d9661df84c..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go +++ /dev/null @@ -1,123 +0,0 @@ -package fakestorage - -import ( - "encoding/xml" - "net/http" - "strings" -) - -type xmlResponse struct { - status int - header http.Header - data any - errorMessage string -} - -type xmlResponseBody struct { - XMLName xml.Name `xml:"PostResponse"` - Bucket string - Etag struct { - Value string `xml:",innerxml"` - } - Key string - Location string -} - -type ListBucketResult struct { - XMLName xml.Name `xml:"ListBucketResult"` - Name string `xml:"Name"` - CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"` - Delimiter string `xml:"Delimiter"` - Prefix string `xml:"Prefix"` - KeyCount int `xml:"KeyCount"` - Contents []Contents `xml:"Contents"` -} - -type Contents struct { - XMLName xml.Name `xml:"Contents"` - Key string `xml:"Key"` - Generation int64 `xml:"Generation"` - LastModified string `xml:"LastModified"` - ETag ETag - Size int64 `xml:"Size"` -} - -type CommonPrefix struct { - Prefix string `xml:"Prefix"` -} - -type ETag struct { - Value string `xml:",innerxml"` -} - -func (e *ETag) Equals(etag string) bool { - trim := func(s string) string { - return strings.TrimPrefix(strings.TrimSuffix(s, "\""), "\"") - } - return trim(e.Value) == trim(etag) -} - -type xmlHandler = func(r *http.Request) xmlResponse - -func xmlToHTTPHandler(h xmlHandler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - resp := h(r) - w.Header().Set("Content-Type", "application/xml") - for name, values := range resp.header { - for _, value := range values { - w.Header().Add(name, value) - } - } - - status := resp.getStatus() - var data any - if status > 399 { - data = newErrorResponse(status, resp.getErrorMessage(status), nil) - } else { - data = resp.data - } - - w.WriteHeader(status) - - dataBytes, ok := data.([]byte) - if ok { - w.Write(dataBytes) - } else { - xml.NewEncoder(w).Encode(data) - } - } -} - -func createXmlResponseBody(bucketName, etag, key, location string) []byte { - responseBody := xmlResponseBody{ - Bucket: bucketName, - Etag: struct { - Value string `xml:",innerxml"` - }{etag}, - Location: location, - Key: key, - } - x, err := xml.Marshal(responseBody) - if err != nil { - return nil - } - - return []byte(xml.Header + string(x)) -} - -func (r *xmlResponse) getStatus() int { - if r.status > 0 { - return r.status - } - if r.errorMessage != "" { - return http.StatusInternalServerError - } - return http.StatusOK -} - -func (r *xmlResponse) getErrorMessage(status int) string { - if r.errorMessage != "" { - return r.errorMessage - } - return http.StatusText(status) -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go deleted file mode 100644 index e56a7aa7950a..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import "time" - -// Bucket represents the bucket that is stored within the fake server. -type Bucket struct { - Name string - VersioningEnabled bool - TimeCreated time.Time - DefaultEventBasedHold bool -} - -const bucketMetadataSuffix = ".bucketMetadata" - -type BucketAttrs struct { - DefaultEventBasedHold bool - VersioningEnabled bool -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go index 64f110682001..24b1b2cb9437 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go @@ -5,465 +5,129 @@ package backend import ( - "bytes" "encoding/json" - "errors" "fmt" - "io" - "io/fs" + "io/ioutil" "net/url" "os" + "path" "path/filepath" "strings" - "sync" - "syscall" - "time" - - "github.com/fsouza/fake-gcs-server/internal/checksum" - "github.com/pkg/xattr" ) -// storageFS is an implementation of the backend storage that stores data on disk -// +// StorageFS is an implementation of the backend storage that stores data on disk // The layout is the following: -// // - rootDir -// -// |- bucket1 -// \- bucket2 -// |- object1 -// \- object2 -// +// |- bucket1 +// \- bucket2 +// |- object1 +// \- object2 // Bucket and object names are url path escaped, so there's no special meaning of forward slashes. -type storageFS struct { +type StorageFS struct { rootDir string - mtx sync.RWMutex - mh metadataHandler } -// NewStorageFS creates an instance of the filesystem-backed storage backend. -func NewStorageFS(objects []StreamingObject, rootDir string) (Storage, error) { +// NewStorageFS creates an instance of StorageMemory +func NewStorageFS(objects []Object, rootDir string) (Storage, error) { if !strings.HasSuffix(rootDir, "/") { rootDir += "/" } - err := os.MkdirAll(rootDir, 0o700) - if err != nil { - return nil, err + s := &StorageFS{ + rootDir: rootDir, } - - var mh metadataHandler = metadataFile{} - // Use xattr for metadata if rootDir supports it. - if xattr.XATTR_SUPPORTED { - xattrHandler := metadataXattr{} - var xerr *xattr.Error - _, err = xattrHandler.read(rootDir) - if err == nil || (errors.As(err, &xerr) && xerr.Err == xattr.ENOATTR) { - mh = xattrHandler - } - } - - s := &storageFS{rootDir: rootDir, mh: mh} for _, o := range objects { - obj, err := s.CreateObject(o, NoConditions{}) + err := s.CreateObject(o) if err != nil { return nil, err } - obj.Close() } return s, nil } -// CreateBucket creates a bucket in the fs backend. A bucket is a folder in the -// root directory. -func (s *storageFS) CreateBucket(name string, bucketAttrs BucketAttrs) error { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.createBucket(name, bucketAttrs) -} - -func (s *storageFS) createBucket(name string, bucketAttrs BucketAttrs) error { - if bucketAttrs.VersioningEnabled { - return errors.New("not implemented: fs storage type does not support versioning yet") - } - path := filepath.Join(s.rootDir, url.PathEscape(name)) - err := os.MkdirAll(path, 0o700) - if err != nil { - return err - } - encoded, err := json.Marshal(bucketAttrs) - if err != nil { - return err - } - return writeFile(path+bucketMetadataSuffix, encoded, 0o600) +// CreateBucket creates a bucket +func (s *StorageFS) CreateBucket(name string) error { + return os.MkdirAll(filepath.Join(s.rootDir, url.PathEscape(name)), 0700) } -// ListBuckets returns a list of buckets from the list of directories in the -// root directory. -func (s *storageFS) ListBuckets() ([]Bucket, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - infos, err := os.ReadDir(s.rootDir) +// ListBuckets lists buckets +func (s *StorageFS) ListBuckets() ([]string, error) { + infos, err := ioutil.ReadDir(s.rootDir) if err != nil { return nil, err } - buckets := []Bucket{} + buckets := []string{} for _, info := range infos { if info.IsDir() { unescaped, err := url.PathUnescape(info.Name()) if err != nil { - return nil, fmt.Errorf("failed to unescape object name %s: %w", info.Name(), err) - } - fileInfo, err := info.Info() - if err != nil { - return nil, fmt.Errorf("failed to get file info for %s: %w", info.Name(), err) + return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err) } - buckets = append(buckets, Bucket{Name: unescaped, TimeCreated: timespecToTime(createTimeFromFileInfo(fileInfo))}) + buckets = append(buckets, unescaped) } } return buckets, nil } -func timespecToTime(ts syscall.Timespec) time.Time { - return time.Unix(int64(ts.Sec), int64(ts.Nsec)) +// GetBucket checks if a bucket exists +func (s *StorageFS) GetBucket(name string) error { + _, err := os.Stat(filepath.Join(s.rootDir, url.PathEscape(name))) + return err } -func (s *storageFS) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error { - if attrsToUpdate.VersioningEnabled { - return errors.New("not implemented: fs storage type does not support versioning yet") - } - encoded, err := json.Marshal(attrsToUpdate) +// CreateObject stores an object +func (s *StorageFS) CreateObject(obj Object) error { + err := s.CreateBucket(obj.BucketName) if err != nil { return err } - path := filepath.Join(s.rootDir, url.PathEscape(bucketName)) - return writeFile(path+bucketMetadataSuffix, encoded, 0o600) -} - -// GetBucket returns information about the given bucket, or an error if it -// doesn't exist. -func (s *storageFS) GetBucket(name string) (Bucket, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - path := filepath.Join(s.rootDir, url.PathEscape(name)) - dirInfo, err := os.Stat(path) - if err != nil { - return Bucket{}, err - } - attrs, err := getBucketAttributes(path) - if err != nil { - return Bucket{}, err - } - return Bucket{Name: name, VersioningEnabled: false, TimeCreated: timespecToTime(createTimeFromFileInfo(dirInfo)), DefaultEventBasedHold: attrs.DefaultEventBasedHold}, err -} - -func getBucketAttributes(path string) (BucketAttrs, error) { - content, err := os.ReadFile(path + bucketMetadataSuffix) + encoded, err := json.Marshal(obj) if err != nil { - if os.IsNotExist(err) { - return BucketAttrs{}, nil - } - return BucketAttrs{}, err - } - var attrs BucketAttrs - err = json.Unmarshal(content, &attrs) - if err != nil { - return BucketAttrs{}, err - } - return attrs, nil -} - -// DeleteBucket removes the bucket from the backend. -func (s *storageFS) DeleteBucket(name string) error { - objs, err := s.ListObjects(name, "", false) - if err != nil { - return BucketNotFound - } - if len(objs) > 0 { - return BucketNotEmpty + return err } - - s.mtx.Lock() - defer s.mtx.Unlock() - return os.RemoveAll(filepath.Join(s.rootDir, url.PathEscape(name))) + return ioutil.WriteFile(filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)), encoded, 0664) } -// CreateObject stores an object as a regular file on disk. The backing content -// for the object may be in the same file that's being updated, so a temporary -// file is first created and then moved into place. This also makes it so any -// object content readers currently open continue reading from the original -// file instead of the newly created file. -// -// The crc32c checksum and md5 hash of the object content is calculated when -// reading the object content. Any checksum or hash in the passed-in object -// metadata is overwritten. -func (s *storageFS) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) { - if obj.Generation > 0 { - return StreamingObject{}, errors.New("not implemented: fs storage type does not support objects generation yet") - } - - // Note: this was a quick fix for issue #701. Now that we have a way to - // persist object attributes, we should implement versioning in the - // filesystem backend and handle generations outside of the backends. - obj.Generation = time.Now().UnixNano() / 1000 - - s.mtx.Lock() - defer s.mtx.Unlock() - err := s.createBucket(obj.BucketName, BucketAttrs{VersioningEnabled: false}) - if err != nil { - return StreamingObject{}, err - } - - var activeGeneration int64 - existingObj, err := s.getObject(obj.BucketName, obj.Name) - if err != nil { - activeGeneration = 0 - } else { - activeGeneration = existingObj.Generation - } - - if !conditions.ConditionsMet(activeGeneration) { - return StreamingObject{}, PreConditionFailed - } - - path := filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), obj.Name) - if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil { - return StreamingObject{}, err - } - - // Nothing to do if this operation only creates directories - if strings.HasSuffix(obj.Name, "/") { - // TODO: populate Crc32c, Md5Hash, and Etag - return StreamingObject{obj.ObjectAttrs, noopSeekCloser{bytes.NewReader([]byte{})}}, nil - } - - var buf bytes.Buffer - hasher := checksum.NewStreamingHasher() - objectContent := io.TeeReader(obj.Content, hasher) - - if _, err = io.Copy(&buf, objectContent); err != nil { - return StreamingObject{}, err - } - - if obj.Crc32c == "" { - obj.Crc32c = hasher.EncodedCrc32cChecksum() - } - if obj.Md5Hash == "" { - obj.Md5Hash = hasher.EncodedMd5Hash() - } - if obj.Etag == "" { - obj.Etag = fmt.Sprintf("%q", obj.Md5Hash) - } - - // TODO: Handle if metadata is not present more gracefully? - encoded, err := json.Marshal(obj.ObjectAttrs) +// ListObjects lists the objects in a given bucket with a given prefix and delimeter +func (s *StorageFS) ListObjects(bucketName string) ([]Object, error) { + infos, err := ioutil.ReadDir(path.Join(s.rootDir, url.PathEscape(bucketName))) if err != nil { - return StreamingObject{}, err - } - - if err := writeFile(path, buf.Bytes(), 0o600); err != nil { - return StreamingObject{}, err - } - - if err = s.mh.write(path, encoded); err != nil { - return StreamingObject{}, err + return nil, err } - - err = openObjectAndSetSize(&obj, path) - - return obj, err -} - -// ListObjects lists the objects in a given bucket with a given prefix and -// delimeter. -func (s *storageFS) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - - objects := []ObjectAttrs{} - bucketPath := filepath.Join(s.rootDir, url.PathEscape(bucketName)) - if err := filepath.Walk(bucketPath, func(path string, info fs.FileInfo, err error) error { + objects := []Object{} + for _, info := range infos { + unescaped, err := url.PathUnescape(info.Name()) if err != nil { - return err - } - - objName, _ := filepath.Rel(bucketPath, path) - if s.mh.isSpecialFile(info.Name()) { - return nil - } - if info.IsDir() { - return nil + return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err) } - if prefix != "" && !strings.HasPrefix(objName, prefix) { - return nil - } - objAttrs, err := s.getObjectAttrs(bucketName, objName) + object, err := s.GetObject(bucketName, unescaped) if err != nil { - return err + return nil, err } - objects = append(objects, objAttrs) - return nil - }); err != nil { - return nil, err + objects = append(objects, object) } return objects, nil } -// GetObject get an object by bucket and name. -func (s *storageFS) GetObject(bucketName, objectName string) (StreamingObject, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.getObject(bucketName, objectName) -} - -// GetObjectWithGeneration retrieves an specific version of the object. Not -// implemented for this backend. -func (s *storageFS) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) { - obj, err := s.GetObject(bucketName, objectName) - if err != nil { - return obj, err - } - if obj.Generation != generation { - return obj, fmt.Errorf("generation mismatch, object generation is %v, requested generation is %v (note: filesystem backend does not support versioning)", obj.Generation, generation) - } - return obj, nil -} - -func (s *storageFS) getObject(bucketName, objectName string) (StreamingObject, error) { - attrs, err := s.getObjectAttrs(bucketName, objectName) - if err != nil { - return StreamingObject{}, err - } - - obj := StreamingObject{ObjectAttrs: attrs} - path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName) - err = openObjectAndSetSize(&obj, path) - - return obj, err -} - -func openObjectAndSetSize(obj *StreamingObject, path string) error { - info, err := os.Stat(path) +// GetObject get an object by bucket and name +func (s *StorageFS) GetObject(bucketName, objectName string) (Object, error) { + encoded, err := ioutil.ReadFile(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName))) if err != nil { - return err + return Object{}, err } - - obj.Content = newLazyReader(path) - obj.Size = info.Size() - - return nil -} - -func (s *storageFS) getObjectAttrs(bucketName, objectName string) (ObjectAttrs, error) { - path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName) - encoded, err := s.mh.read(path) - if err != nil { - return ObjectAttrs{}, err - } - - var attrs ObjectAttrs - if err = json.Unmarshal(encoded, &attrs); err != nil { - return ObjectAttrs{}, err - } - - info, err := os.Stat(path) + var obj Object + err = json.Unmarshal(encoded, &obj) if err != nil { - return ObjectAttrs{}, fmt.Errorf("failed to stat: %w", err) + return Object{}, err } - - attrs.Name = filepath.ToSlash(objectName) - attrs.BucketName = bucketName - attrs.Size = info.Size() - return attrs, nil + obj.Name = objectName + obj.BucketName = bucketName + return obj, nil } -// DeleteObject deletes an object by bucket and name. -func (s *storageFS) DeleteObject(bucketName, objectName string) error { - s.mtx.Lock() - defer s.mtx.Unlock() +// DeleteObject deletes an object by bucket and name +func (s *StorageFS) DeleteObject(bucketName, objectName string) error { if objectName == "" { - return errors.New("can't delete object with empty name") + return fmt.Errorf("can't delete object with empty name") } - path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName) - if err := s.mh.remove(path); err != nil { - return err - } - return os.Remove(path) -} - -func (s *storageFS) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { - obj, err := s.GetObject(bucketName, objectName) - if err != nil { - return StreamingObject{}, err - } - defer obj.Close() - - obj.patch(attrsToUpdate) - obj.Generation = 0 // reset generation id - return s.CreateObject(obj, NoConditions{}) -} - -func (s *storageFS) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { - obj, err := s.GetObject(bucketName, objectName) - if err != nil { - return StreamingObject{}, err - } - defer obj.Close() - - if attrsToUpdate.Metadata != nil { - obj.Metadata = map[string]string{} - } - obj.patch(attrsToUpdate) - obj.Generation = 0 // reset generation id - return s.CreateObject(obj, NoConditions{}) -} - -type concatenatedContent struct { - io.Reader -} - -func (c concatenatedContent) Close() error { - return errors.New("not implemented") -} - -func (c concatenatedContent) Seek(offset int64, whence int) (int64, error) { - return 0, errors.New("not implemented") -} - -func concatObjectReaders(objects []StreamingObject) io.ReadSeekCloser { - readers := make([]io.Reader, len(objects)) - for i := range objects { - readers[i] = objects[i].Content - } - return concatenatedContent{io.MultiReader(readers...)} -} - -func (s *storageFS) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) { - var sourceObjects []StreamingObject - for _, n := range objectNames { - obj, err := s.GetObject(bucketName, n) - if err != nil { - return StreamingObject{}, err - } - defer obj.Close() - sourceObjects = append(sourceObjects, obj) - } - - dest := StreamingObject{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: destinationName, - ContentType: contentType, - Created: time.Now().String(), - }, - } - - dest.Content = concatObjectReaders(sourceObjects) - dest.Metadata = metadata - - result, err := s.CreateObject(dest, NoConditions{}) - if err != nil { - return result, err - } - - return result, nil + return os.Remove(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName))) } diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go deleted file mode 100644 index 8c30a3149213..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "io" - "os" - "sync" -) - -type lazyReader struct { - filename string - once *sync.Once - f *os.File - err error -} - -func newLazyReader(filename string) io.ReadSeekCloser { - return &lazyReader{ - filename: filename, - once: &sync.Once{}, - } -} - -func (r *lazyReader) open() { - r.f, r.err = os.Open(r.filename) -} - -func (r *lazyReader) Read(p []byte) (int, error) { - r.once.Do(r.open) - if r.err != nil { - return 0, r.err - } - return r.f.Read(p) -} - -func (r *lazyReader) Seek(offset int64, whence int) (int64, error) { - r.once.Do(r.open) - if r.err != nil { - return 0, r.err - } - return r.f.Seek(offset, whence) -} - -func (r *lazyReader) Close() error { - r.once.Do(r.open) - if r.err != nil { - return r.err - } - return r.f.Close() -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go index 5075bf19653a..257843ad3630 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go @@ -7,386 +7,118 @@ package backend import ( "errors" "fmt" - "io" - "strings" "sync" - "time" - - "github.com/fsouza/fake-gcs-server/internal/checksum" ) -const timestampFormat = "2006-01-02T15:04:05.999999Z07:00" - -// storageMemory is an implementation of the backend storage that stores data -// in memory. -type storageMemory struct { - buckets map[string]bucketInMemory +// StorageMemory is an implementation of the backend storage that stores data in memory +type StorageMemory struct { + buckets map[string][]Object mtx sync.RWMutex } -type bucketInMemory struct { - Bucket - // maybe we can refactor how the memory backend works? no need to store - // Object instances. - activeObjects []Object - archivedObjects []Object -} - -func newBucketInMemory(name string, versioningEnabled bool, bucketAttrs BucketAttrs) bucketInMemory { - return bucketInMemory{Bucket{name, versioningEnabled, time.Now(), bucketAttrs.DefaultEventBasedHold}, []Object{}, []Object{}} -} - -func (bm *bucketInMemory) addObject(obj Object) Object { - if obj.Crc32c == "" { - obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content) - } - if obj.Md5Hash == "" { - obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content) - } - if obj.Etag == "" { - obj.Etag = fmt.Sprintf("%q", obj.Md5Hash) - } - if obj.Size == 0 { - obj.Size = int64(len(obj.Content)) - } - obj.Generation = getNewGenerationIfZero(obj.Generation) - index := findObject(obj, bm.activeObjects, false) - if index >= 0 { - if bm.VersioningEnabled { - bm.activeObjects[index].Deleted = time.Now().Format(timestampFormat) - bm.cpToArchive(bm.activeObjects[index]) - } - bm.activeObjects[index] = obj - } else { - bm.activeObjects = append(bm.activeObjects, obj) - } - - return obj -} - -func getNewGenerationIfZero(generation int64) int64 { - if generation == 0 { - return time.Now().UnixNano() / 1000 - } - return generation -} - -func (bm *bucketInMemory) deleteObject(obj Object, matchGeneration bool) { - index := findObject(obj, bm.activeObjects, matchGeneration) - if index < 0 { - return - } - if bm.VersioningEnabled { - obj.Deleted = time.Now().Format(timestampFormat) - bm.mvToArchive(obj) - } else { - bm.deleteFromObjectList(obj, true) - } -} - -func (bm *bucketInMemory) cpToArchive(obj Object) { - bm.archivedObjects = append(bm.archivedObjects, obj) -} - -func (bm *bucketInMemory) mvToArchive(obj Object) { - bm.cpToArchive(obj) - bm.deleteFromObjectList(obj, true) -} - -func (bm *bucketInMemory) deleteFromObjectList(obj Object, active bool) { - objects := bm.activeObjects - if !active { - objects = bm.archivedObjects - } - index := findObject(obj, objects, !active) - objects[index] = objects[len(objects)-1] - if active { - bm.activeObjects = objects[:len(objects)-1] - } else { - bm.archivedObjects = objects[:len(objects)-1] - } -} - -// findObject looks for an object in the given list and return the index where it -// was found, or -1 if the object doesn't exist. -func findObject(obj Object, objectList []Object, matchGeneration bool) int { - for i, o := range objectList { - if matchGeneration && obj.ID() == o.ID() { - return i - } - if !matchGeneration && obj.IDNoGen() == o.IDNoGen() { - return i - } - } - return -1 -} - -// findLastObjectGeneration looks for an object in the given list and return the index where it -// was found, or -1 if the object doesn't exist. -func findLastObjectGeneration(obj Object, objectList []Object) int64 { - highScore := int64(0) - for _, o := range objectList { - if obj.IDNoGen() == o.IDNoGen() && o.Generation > highScore { - highScore = o.Generation - } - } - return highScore -} - -// NewStorageMemory creates an instance of StorageMemory. -func NewStorageMemory(objects []StreamingObject) (Storage, error) { - s := &storageMemory{ - buckets: make(map[string]bucketInMemory), +// NewStorageMemory creates an instance of StorageMemory +func NewStorageMemory(objects []Object) Storage { + s := &StorageMemory{ + buckets: make(map[string][]Object), } for _, o := range objects { - bufferedObject, err := o.BufferedObject() - if err != nil { - return nil, err - } - s.CreateBucket(o.BucketName, BucketAttrs{false, false}) - bucket := s.buckets[o.BucketName] - bucket.addObject(bufferedObject) - s.buckets[o.BucketName] = bucket - } - return s, nil -} - -func (s *storageMemory) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error { - bucketInMemory, err := s.getBucketInMemory(bucketName) - if err != nil { - return err + s.buckets[o.BucketName] = append(s.buckets[o.BucketName], o) } - bucketInMemory.DefaultEventBasedHold = attrsToUpdate.DefaultEventBasedHold - bucketInMemory.VersioningEnabled = attrsToUpdate.VersioningEnabled - s.buckets[bucketName] = bucketInMemory - return nil + return s } -// CreateBucket creates a bucket. -func (s *storageMemory) CreateBucket(name string, bucketAttrs BucketAttrs) error { +// CreateBucket creates a bucket +func (s *StorageMemory) CreateBucket(name string) error { s.mtx.Lock() defer s.mtx.Unlock() - bucket, err := s.getBucketInMemory(name) - if err == nil { - if bucket.VersioningEnabled != bucketAttrs.VersioningEnabled { - return fmt.Errorf("a bucket named %s already exists, but with different properties", name) - } - return nil + if _, ok := s.buckets[name]; !ok { + s.buckets[name] = nil } - s.buckets[name] = newBucketInMemory(name, bucketAttrs.VersioningEnabled, bucketAttrs) return nil } -// ListBuckets lists buckets currently registered in the backend. -func (s *storageMemory) ListBuckets() ([]Bucket, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - buckets := []Bucket{} - for _, bucketInMemory := range s.buckets { - buckets = append(buckets, Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, false}) +// ListBuckets lists buckets +func (s *StorageMemory) ListBuckets() ([]string, error) { + s.mtx.Lock() + defer s.mtx.Unlock() + buckets := []string{} + for bucket := range s.buckets { + buckets = append(buckets, bucket) } return buckets, nil } -// GetBucket retrieves the bucket information from the backend. -func (s *storageMemory) GetBucket(name string) (Bucket, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - bucketInMemory, err := s.getBucketInMemory(name) - return Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, bucketInMemory.DefaultEventBasedHold}, err -} +// GetBucket checks if a bucket exists +func (s *StorageMemory) GetBucket(name string) error { + s.mtx.Lock() + defer s.mtx.Unlock() -func (s *storageMemory) getBucketInMemory(name string) (bucketInMemory, error) { - if bucketInMemory, found := s.buckets[name]; found { - return bucketInMemory, nil + if _, ok := s.buckets[name]; !ok { + return fmt.Errorf("no bucket named %s", name) } - return bucketInMemory{}, fmt.Errorf("no bucket named %s", name) + return nil } -// DeleteBucket removes the bucket from the backend. -func (s *storageMemory) DeleteBucket(name string) error { - objs, err := s.ListObjects(name, "", false) - if err != nil { - return BucketNotFound - } - if len(objs) > 0 { - return BucketNotEmpty - } - +// CreateObject stores an object +func (s *StorageMemory) CreateObject(obj Object) error { s.mtx.Lock() defer s.mtx.Unlock() - delete(s.buckets, name) + + index := s.findObject(obj) + if index < 0 { + s.buckets[obj.BucketName] = append(s.buckets[obj.BucketName], obj) + } else { + s.buckets[obj.BucketName][index] = obj + } return nil } -// CreateObject stores an object in the backend. -func (s *storageMemory) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - bucketInMemory, err := s.getBucketInMemory(obj.BucketName) - if err != nil { - bucketInMemory = newBucketInMemory(obj.BucketName, false, BucketAttrs{}) - } - bufferedObj, err := obj.BufferedObject() - currentGeneration := findLastObjectGeneration(bufferedObj, bucketInMemory.activeObjects) - if !conditions.ConditionsMet(currentGeneration) { - return StreamingObject{}, PreConditionFailed - } - if err != nil { - return StreamingObject{}, err +// findObject looks for an object in its bucket and return the index where it +// was found, or -1 if the object doesn't exist. +// +// It doesn't lock the mutex, callers must lock the mutex before calling this +// method. +func (s *StorageMemory) findObject(obj Object) int { + for i, o := range s.buckets[obj.BucketName] { + if obj.ID() == o.ID() { + return i + } } - newObj := bucketInMemory.addObject(bufferedObj) - s.buckets[obj.BucketName] = bucketInMemory - return newObj.StreamingObject(), nil + return -1 } -// ListObjects lists the objects in a given bucket with a given prefix and -// delimiter. -func (s *storageMemory) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) { +// ListObjects lists the objects in a given bucket with a given prefix and delimeter +func (s *StorageMemory) ListObjects(bucketName string) ([]Object, error) { s.mtx.RLock() defer s.mtx.RUnlock() - bucketInMemory, err := s.getBucketInMemory(bucketName) - if err != nil { - return []ObjectAttrs{}, err - } - objAttrs := make([]ObjectAttrs, 0, len(bucketInMemory.activeObjects)) - for _, obj := range bucketInMemory.activeObjects { - if prefix != "" && !strings.HasPrefix(obj.Name, prefix) { - continue - } - objAttrs = append(objAttrs, obj.ObjectAttrs) - } - if !versions { - return objAttrs, nil + objects, ok := s.buckets[bucketName] + if !ok { + return nil, errors.New("bucket not found") } - - archvObjs := make([]ObjectAttrs, 0, len(bucketInMemory.archivedObjects)) - for _, obj := range bucketInMemory.archivedObjects { - if prefix != "" && !strings.HasPrefix(obj.Name, prefix) { - continue - } - archvObjs = append(archvObjs, obj.ObjectAttrs) - } - return append(objAttrs, archvObjs...), nil + return objects, nil } -func (s *storageMemory) GetObject(bucketName, objectName string) (StreamingObject, error) { - return s.GetObjectWithGeneration(bucketName, objectName, 0) -} - -// GetObjectWithGeneration retrieves a specific version of the object. -func (s *storageMemory) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) { +// GetObject get an object by bucket and name +func (s *StorageMemory) GetObject(bucketName, objectName string) (Object, error) { + obj := Object{BucketName: bucketName, Name: objectName} s.mtx.RLock() defer s.mtx.RUnlock() - bucketInMemory, err := s.getBucketInMemory(bucketName) - if err != nil { - return StreamingObject{}, err - } - matchGeneration := false - obj := Object{ObjectAttrs: ObjectAttrs{BucketName: bucketName, Name: objectName}} - listToConsider := bucketInMemory.activeObjects - if generation != 0 { - matchGeneration = true - obj.Generation = generation - listToConsider = append(listToConsider, bucketInMemory.archivedObjects...) - } - index := findObject(obj, listToConsider, matchGeneration) + index := s.findObject(obj) if index < 0 { - return obj.StreamingObject(), errors.New("object not found") + return obj, errors.New("object not found") } - - return listToConsider[index].StreamingObject(), nil + return s.buckets[bucketName][index], nil } -func (s *storageMemory) DeleteObject(bucketName, objectName string) error { - obj, err := s.GetObject(bucketName, objectName) - if err != nil { - return err - } - s.mtx.Lock() - defer s.mtx.Unlock() - bucketInMemory, err := s.getBucketInMemory(bucketName) - if err != nil { - return err - } - bufferedObject, err := obj.BufferedObject() - if err != nil { - return err +// DeleteObject deletes an object by bucket and name +func (s *StorageMemory) DeleteObject(bucketName, objectName string) error { + obj := Object{BucketName: bucketName, Name: objectName} + index := s.findObject(obj) + if index < 0 { + return fmt.Errorf("no such object in bucket %s: %s", bucketName, objectName) } - bucketInMemory.deleteObject(bufferedObject, true) - s.buckets[bucketName] = bucketInMemory + bucket := s.buckets[obj.BucketName] + bucket[index] = bucket[len(bucket)-1] + s.buckets[obj.BucketName] = bucket[:len(bucket)-1] return nil } - -func (s *storageMemory) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { - obj, err := s.GetObject(bucketName, objectName) - if err != nil { - return StreamingObject{}, err - } - - obj.patch(attrsToUpdate) - s.CreateObject(obj, NoConditions{}) - return obj, nil -} - -// UpdateObject replaces an object metadata, custom time, and acl. -func (s *storageMemory) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { - obj, err := s.GetObject(bucketName, objectName) - if err != nil { - return StreamingObject{}, err - } - - if attrsToUpdate.Metadata != nil { - obj.Metadata = map[string]string{} - } - obj.patch(attrsToUpdate) - s.CreateObject(obj, NoConditions{}) - return obj, nil -} - -func (s *storageMemory) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) { - var data []byte - for _, n := range objectNames { - obj, err := s.GetObject(bucketName, n) - if err != nil { - return StreamingObject{}, err - } - objectContent, err := io.ReadAll(obj.Content) - if err != nil { - return StreamingObject{}, err - } - data = append(data, objectContent...) - } - - var dest Object - streamingDest, err := s.GetObject(bucketName, destinationName) - if err != nil { - dest = Object{ - ObjectAttrs: ObjectAttrs{ - BucketName: bucketName, - Name: destinationName, - ContentType: contentType, - Created: time.Now().String(), - }, - } - } else { - dest, err = streamingDest.BufferedObject() - if err != nil { - return StreamingObject{}, err - } - } - - dest.Content = data - dest.Crc32c = "" - dest.Md5Hash = "" - dest.Etag = "" - dest.Size = 0 - dest.Metadata = metadata - - result, err := s.CreateObject(dest.StreamingObject(), NoConditions{}) - if err != nil { - return result, err - } - - return result, nil -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go deleted file mode 100644 index 6d9d2313d27d..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -type metadataHandler interface { - write(path string, encoded []byte) error - read(path string) ([]byte, error) - remove(path string) error - isSpecialFile(path string) bool - rename(pathSrc, pathDst string) error -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go deleted file mode 100644 index 94cce654a8c6..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "os" - "strings" -) - -const metadataSuffix = ".metadata" - -type metadataFile struct{} - -func (m metadataFile) write(path string, encoded []byte) error { - return writeFile(path+metadataSuffix, encoded, 0o600) -} - -func (m metadataFile) read(path string) ([]byte, error) { - return os.ReadFile(path + metadataSuffix) -} - -func (m metadataFile) isSpecialFile(path string) bool { - return strings.HasSuffix(path, metadataSuffix) -} - -func (m metadataFile) remove(path string) error { - return os.Remove(path + metadataSuffix) -} - -func (m metadataFile) rename(pathSrc, pathDst string) error { - return os.Rename(pathSrc+metadataSuffix, pathDst+metadataSuffix) -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go deleted file mode 100644 index 9d40580120be..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "github.com/pkg/xattr" -) - -const xattrKey = "user.metadata" - -type metadataXattr struct{} - -func (m metadataXattr) write(path string, encoded []byte) error { - return xattr.Set(path, xattrKey, encoded) -} - -func (m metadataXattr) read(path string) ([]byte, error) { - return xattr.Get(path, xattrKey) -} - -func (m metadataXattr) isSpecialFile(path string) bool { - return false -} - -func (m metadataXattr) remove(path string) error { - return nil -} - -func (m metadataXattr) rename(pathSrc, pathDst string) error { - return nil -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go index 7dc742e9d1d3..e0ca2b12ec57 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go @@ -4,102 +4,16 @@ package backend -import ( - "bytes" - "fmt" - "io" - "reflect" - - "cloud.google.com/go/storage" -) - -// ObjectAttrs represents the meta-data without its contents. -type ObjectAttrs struct { - BucketName string `json:"-"` - Name string `json:"-"` - Size int64 `json:"-"` - ContentType string - ContentEncoding string - Crc32c string - Md5Hash string - Etag string - ACL []storage.ACLRule - Metadata map[string]string - Created string - Deleted string - Updated string - CustomTime string - Generation int64 -} - -// ID is used for comparing objects. -func (o *ObjectAttrs) ID() string { - return fmt.Sprintf("%s#%d", o.IDNoGen(), o.Generation) -} - -// IDNoGen does not consider the generation field. -func (o *ObjectAttrs) IDNoGen() string { - return fmt.Sprintf("%s/%s", o.BucketName, o.Name) -} - // Object represents the object that is stored within the fake server. type Object struct { - ObjectAttrs - Content []byte -} - -type noopSeekCloser struct { - io.ReadSeeker -} - -func (n noopSeekCloser) Close() error { - return nil -} - -func (o Object) StreamingObject() StreamingObject { - return StreamingObject{ - ObjectAttrs: o.ObjectAttrs, - Content: noopSeekCloser{bytes.NewReader(o.Content)}, - } -} - -type StreamingObject struct { - ObjectAttrs - Content io.ReadSeekCloser -} - -func (o *StreamingObject) Close() error { - if o != nil && o.Content != nil { - return o.Content.Close() - } - return nil -} - -// Convert this StreamingObject to a (buffered) Object. -func (o *StreamingObject) BufferedObject() (Object, error) { - data, err := io.ReadAll(o.Content) - return Object{ - ObjectAttrs: o.ObjectAttrs, - Content: data, - }, err + BucketName string `json:"-"` + Name string `json:"-"` + Content []byte + Crc32c string + Md5Hash string } -func (o *StreamingObject) patch(attrsToUpdate ObjectAttrs) { - currObjValues := reflect.ValueOf(&(o.ObjectAttrs)).Elem() - currObjType := currObjValues.Type() - newObjValues := reflect.ValueOf(attrsToUpdate) - for i := 0; i < newObjValues.NumField(); i++ { - if reflect.Value.IsZero(newObjValues.Field(i)) { - continue - } else if currObjType.Field(i).Name == "Metadata" { - if o.Metadata == nil { - o.Metadata = map[string]string{} - } - for k, v := range attrsToUpdate.Metadata { - o.Metadata[k] = v - } - } else { - currObjValues.Field(i).Set(newObjValues.Field(i)) - } - } +// ID is useful for comparing objects +func (o *Object) ID() string { + return o.BucketName + "/" + o.Name } diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go index da8e8e51e212..c77583462fdb 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go @@ -2,43 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package backend proides the backends used by fake-gcs-server. package backend -type Conditions interface { - ConditionsMet(activeGeneration int64) bool -} - -type NoConditions struct{} - -func (NoConditions) ConditionsMet(int64) bool { - return true -} - -// Storage is the generic interface for implementing the backend storage of the -// server. +// Storage is the generic interface for implementing the backend storage of the server type Storage interface { - CreateBucket(name string, bucketAttrs BucketAttrs) error - ListBuckets() ([]Bucket, error) - GetBucket(name string) (Bucket, error) - UpdateBucket(name string, attrsToUpdate BucketAttrs) error - DeleteBucket(name string) error - CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) - ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) - GetObject(bucketName, objectName string) (StreamingObject, error) - GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) + CreateBucket(name string) error + ListBuckets() ([]string, error) + GetBucket(name string) error + CreateObject(obj Object) error + ListObjects(bucketName string) ([]Object, error) + GetObject(bucketName, objectName string) (Object, error) DeleteObject(bucketName, objectName string) error - PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) - UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) - ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) } - -type Error string - -func (e Error) Error() string { return string(e) } - -const ( - BucketNotFound = Error("bucket not found") - BucketNotEmpty = Error("bucket must be empty prior to deletion") - PreConditionFailed = Error("Precondition failed") -) diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_darwin.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_darwin.go deleted file mode 100644 index cb3998a95ccc..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_darwin.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "os" - "syscall" -) - -func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec { - if statT, ok := input.Sys().(*syscall.Stat_t); ok { - return statT.Ctimespec - } - return syscall.Timespec{} -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go deleted file mode 100644 index 0f959e9b74c6..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "os" - "syscall" -) - -func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec { - if statT, ok := input.Sys().(*syscall.Stat_t); ok { - // not true: Ctime is not created time, but not creating a file to persist this metadata, yet... - return statT.Ctim - } - return syscall.Timespec{} -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go deleted file mode 100644 index 54c7bc9b0bad..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "os" - "syscall" -) - -func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec { - if statT, ok := input.Sys().(*syscall.Win32FileAttributeData); ok { - nsec := statT.CreationTime.Nanoseconds() - return syscall.NsecToTimespec(nsec) - } - return syscall.Timespec{} -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go deleted file mode 100644 index 2e5e510fbc3d..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows - -package backend - -import ( - "os" - - "github.com/google/renameio/v2" -) - -func writeFile(filename string, data []byte, perm os.FileMode) error { - return renameio.WriteFile(filename, data, perm) -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go deleted file mode 100644 index 2d6600c80302..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package backend - -import ( - "os" -) - -func writeFile(filename string, data []byte, perm os.FileMode) error { - return os.WriteFile(filename, data, perm) -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go deleted file mode 100644 index c247336d8e65..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2021 Francisco Souza. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package checksum - -import ( - "crypto/md5" - "encoding/base64" - "hash" - "hash/crc32" -) - -var crc32cTable = crc32.MakeTable(crc32.Castagnoli) - -func crc32cChecksum(content []byte) []byte { - checksummer := crc32.New(crc32cTable) - checksummer.Write(content) - return checksummer.Sum(make([]byte, 0, 4)) -} - -func EncodedChecksum(checksum []byte) string { - return base64.StdEncoding.EncodeToString(checksum) -} - -func EncodedCrc32cChecksum(content []byte) string { - return EncodedChecksum(crc32cChecksum(content)) -} - -func MD5Hash(b []byte) []byte { - h := md5.New() - h.Write(b) - return h.Sum(nil) -} - -func EncodedHash(hash []byte) string { - return base64.StdEncoding.EncodeToString(hash) -} - -func EncodedMd5Hash(content []byte) string { - return EncodedHash(MD5Hash(content)) -} - -type StreamingHasher struct { - crc32 hash.Hash32 - md5 hash.Hash -} - -func NewStreamingHasher() *StreamingHasher { - return &StreamingHasher{ - crc32: crc32.New(crc32cTable), - md5: md5.New(), - } -} - -func (s *StreamingHasher) Write(p []byte) (n int, err error) { - n, err = s.crc32.Write(p) - if err != nil { - return n, err - } - return s.md5.Write(p) -} - -func (s *StreamingHasher) EncodedCrc32cChecksum() string { - return EncodedChecksum(s.crc32.Sum(nil)) -} - -func (s *StreamingHasher) EncodedMd5Hash() string { - return EncodedHash(s.md5.Sum(nil)) -} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go deleted file mode 100644 index f20ac8c87a40..000000000000 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go +++ /dev/null @@ -1,222 +0,0 @@ -package notification - -import ( - "context" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" - "time" - - "cloud.google.com/go/pubsub" - "github.com/fsouza/fake-gcs-server/internal/backend" -) - -// EventType is the type of event to trigger. The descriptions of the events -// can be found here: -// https://cloud.google.com/storage/docs/pubsub-notifications#events. -type EventType string - -const ( - // EventFinalize is triggered when an object is added. - EventFinalize EventType = "OBJECT_FINALIZE" - // EventDelete is triggered when an object is deleted. - EventDelete = "OBJECT_DELETE" - // EventMetadata is triggered when an object's metadata is changed. - EventMetadata = "OBJECT_METADATA_UPDATE" - // EventArchive bucket versioning must be enabled. is triggered when an object becomes the non current version - EventArchive = "OBJECT_ARCHIVE" -) - -// EventNotificationOptions contains flags for events, that if true, will create -// trigger notifications when they occur. -type EventNotificationOptions struct { - Finalize bool - Delete bool - MetadataUpdate bool - Archive bool -} - -// EventManagerOptions determines what events are triggered and where. -type EventManagerOptions struct { - // ProjectID is the project ID containing the pubsub topic. - ProjectID string - // TopicName is the pubsub topic name to publish events on. - TopicName string - // Bucket is the name of the bucket to publish events from. - Bucket string - // ObjectPrefix, if not empty, only objects having this prefix will generate - // trigger events. - ObjectPrefix string - // NotifyOn determines what events to trigger. - NotifyOn EventNotificationOptions -} - -type EventManager interface { - Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string) -} - -// PubsubEventManager checks if an event should be published. -type PubsubEventManager struct { - // publishSynchronously is a flag that if true, events will be published - // synchronously and not in a goroutine. It is used during tests to prevent - // race conditions. - publishSynchronously bool - // notifyOn determines what events are triggered. - notifyOn EventNotificationOptions - // writer is where logs are written to. - writer io.Writer - // bucket, if not empty, only objects from this bucker will generate trigger events. - bucket string - // objectPrefix, if not empty, only objects having this prefix will generate - // trigger events. - objectPrefix string - // publisher is used to publish events on. - publisher eventPublisher -} - -func NewPubsubEventManager(options EventManagerOptions, w io.Writer) (*PubsubEventManager, error) { - manager := &PubsubEventManager{ - writer: w, - notifyOn: options.NotifyOn, - bucket: options.Bucket, - objectPrefix: options.ObjectPrefix, - } - if options.ProjectID != "" && options.TopicName != "" { - ctx := context.Background() - client, err := pubsub.NewClient(ctx, options.ProjectID) - if err != nil { - return nil, fmt.Errorf("error creating pubsub client: %v", err) - } - manager.publisher = client.Topic(options.TopicName) - } - return manager, nil -} - -// eventPublisher is the interface to publish triggered events. -type eventPublisher interface { - Publish(ctx context.Context, msg *pubsub.Message) *pubsub.PublishResult -} - -// Trigger checks if an event should be triggered. If so, it publishes the -// event to a pubsub queue. -func (m *PubsubEventManager) Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string) { - if m.publisher == nil { - return - } - if m.bucket != "" && o.BucketName != m.bucket { - return - } - if m.objectPrefix != "" && !strings.HasPrefix(o.Name, m.objectPrefix) { - return - } - switch eventType { - case EventFinalize: - if !m.notifyOn.Finalize { - return - } - case EventDelete: - if !m.notifyOn.Delete { - return - } - case EventMetadata: - if !m.notifyOn.MetadataUpdate { - return - } - case EventArchive: - if !m.notifyOn.Archive { - return - } - } - eventTime := time.Now().Format(time.RFC3339) - publishFunc := func() { - err := m.publish(o, eventType, eventTime, extraEventAttr) - if m.writer != nil { - if err != nil { - fmt.Fprintf(m.writer, "error publishing event: %v", err) - } else { - fmt.Fprintf(m.writer, "sent event %s for object %s\n", string(eventType), o.ID()) - } - } - } - if m.publishSynchronously { - publishFunc() - } else { - go publishFunc() - } -} - -func (m *PubsubEventManager) publish(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) error { - ctx := context.Background() - data, attributes, err := generateEvent(o, eventType, eventTime, extraEventAttr) - if err != nil { - return err - } - if r := m.publisher.Publish(ctx, &pubsub.Message{ - Data: data, - Attributes: attributes, - }); r != nil { - _, err = r.Get(ctx) - return err - } - return nil -} - -// gcsEvent is the payload of a GCS event. Note that all properties are string-quoted. -// The description of the full object can be found here: -// https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations. -type gcsEvent struct { - Kind string `json:"kind"` - ID string `json:"id"` - Name string `json:"name"` - Bucket string `json:"bucket"` - Generation int64 `json:"generation,string,omitempty"` - ContentType string `json:"contentType"` - ContentEncoding string `json:"contentEncoding,omitempty"` - Created string `json:"timeCreated,omitempty"` - Updated string `json:"updated,omitempty"` - StorageClass string `json:"storageClass"` - Size int64 `json:"size,string"` - MD5Hash string `json:"md5Hash,omitempty"` - CRC32c string `json:"crc32c,omitempty"` - MetaData map[string]string `json:"metadata,omitempty"` -} - -func generateEvent(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) ([]byte, map[string]string, error) { - payload := gcsEvent{ - Kind: "storage#object", - ID: o.ID(), - Name: o.Name, - Bucket: o.BucketName, - Generation: o.Generation, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - Created: o.Created, - Updated: o.Updated, - StorageClass: "STANDARD", - Size: o.Size, - MD5Hash: o.Md5Hash, - CRC32c: o.Crc32c, - MetaData: o.Metadata, - } - attributes := map[string]string{ - "bucketId": o.BucketName, - "eventTime": eventTime, - "eventType": string(eventType), - "objectGeneration": strconv.FormatInt(o.Generation, 10), - "objectId": o.Name, - "payloadFormat": "JSON_API_V1", - } - for k, v := range extraEventAttr { - if _, exists := attributes[k]; exists { - return nil, nil, fmt.Errorf("cannot overwrite duplicate event attribute %s", k) - } - attributes[k] = v - } - data, err := json.Marshal(&payload) - if err != nil { - return nil, nil, err - } - return data, attributes, nil -} diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig deleted file mode 100644 index c6b74c3e0d0c..000000000000 --- a/vendor/github.com/gorilla/handlers/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore deleted file mode 100644 index 577a89e81383..000000000000 --- a/vendor/github.com/gorilla/handlers/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Output of the go test coverage tool -coverage.coverprofile diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE deleted file mode 100644 index bb9d80bc9b6b..000000000000 --- a/vendor/github.com/gorilla/handlers/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile deleted file mode 100644 index 003b784f7edb..000000000000 --- a/vendor/github.com/gorilla/handlers/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: verify -verify: sec govulncheck lint test - -.PHONY: lint -lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint #####" - golangci-lint run -v - -.PHONY: sec -sec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec #####" - gosec ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck #####" - govulncheck ./... - -.PHONY: test -test: - @echo "##### Running tests #####" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md deleted file mode 100644 index 02555b2642c5..000000000000 --- a/vendor/github.com/gorilla/handlers/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# gorilla/handlers - -![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) -[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) -[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) - -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's `net/http` package (or any framework supporting `http.Handler`), including: - -* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log - Format](http://httpd.apache.org/docs/2.2/logs.html#common). -* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log - Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by - both Apache and nginx. -* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses. -* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted - content types. -* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a - `map[string]http.Handler` -* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the - `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` - headers when running a Go server behind a HTTP reverse proxy. -* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple - domains (i.e. multiple CNAME aliases). -* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics. - -Other handlers are documented [on the Gorilla -website](https://www.gorillatoolkit.org/pkg/handlers). - -## Example - -A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: - -```go -import ( - "net/http" - "github.com/gorilla/handlers" -) - -func main() { - r := http.NewServeMux() - - // Only log requests to our admin dashboard to stdout - r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) - r.HandleFunc("/", ShowIndex) - - // Wrap our server with our gzip handler to gzip compress all responses. - http.ListenAndServe(":8000", handlers.CompressHandler(r)) -} -``` - -## License - -BSD licensed. See the included LICENSE file for details. - diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go deleted file mode 100644 index 7121f5307bec..000000000000 --- a/vendor/github.com/gorilla/handlers/canonical.go +++ /dev/null @@ -1,73 +0,0 @@ -package handlers - -import ( - "net/http" - "net/url" - "strings" -) - -type canonical struct { - h http.Handler - domain string - code int -} - -// CanonicalHost is HTTP middleware that re-directs requests to the canonical -// domain. It accepts a domain and a status code (e.g. 301 or 302) and -// re-directs clients to this domain. The existing request path is maintained. -// -// Note: If the provided domain is considered invalid by url.Parse or otherwise -// returns an empty scheme or host, clients are not re-directed. -// -// Example: -// -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) -func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { - fn := func(h http.Handler) http.Handler { - return canonical{h, domain, code} - } - - return fn -} - -func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { - dest, err := url.Parse(c.domain) - if err != nil { - // Call the next handler if the provided domain fails to parse. - c.h.ServeHTTP(w, r) - return - } - - if dest.Scheme == "" || dest.Host == "" { - // Call the next handler if the scheme or host are empty. - // Note that url.Parse won't fail on in this case. - c.h.ServeHTTP(w, r) - return - } - - if !strings.EqualFold(cleanHost(r.Host), dest.Host) { - // Re-build the destination URL - dest := dest.Scheme + "://" + dest.Host + r.URL.Path - if r.URL.RawQuery != "" { - dest += "?" + r.URL.RawQuery - } - http.Redirect(w, r, dest, c.code) - return - } - - c.h.ServeHTTP(w, r) -} - -// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. -// This is backported from Go 1.5 (in response to issue #11206) and attempts to -// mitigate malformed Host headers that do not match the format in RFC7230. -func cleanHost(in string) string { - if i := strings.IndexAny(in, " /"); i != -1 { - return in[:i] - } - return in -} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go deleted file mode 100644 index d6f589503b5e..000000000000 --- a/vendor/github.com/gorilla/handlers/compress.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "compress/flate" - "compress/gzip" - "io" - "net/http" - "strings" - - "github.com/felixge/httpsnoop" -) - -const acceptEncoding string = "Accept-Encoding" - -type compressResponseWriter struct { - compressor io.Writer - w http.ResponseWriter -} - -func (cw *compressResponseWriter) WriteHeader(c int) { - cw.w.Header().Del("Content-Length") - cw.w.WriteHeader(c) -} - -func (cw *compressResponseWriter) Write(b []byte) (int, error) { - h := cw.w.Header() - if h.Get("Content-Type") == "" { - h.Set("Content-Type", http.DetectContentType(b)) - } - h.Del("Content-Length") - - return cw.compressor.Write(b) -} - -func (cw *compressResponseWriter) ReadFrom(r io.Reader) (int64, error) { - return io.Copy(cw.compressor, r) -} - -type flusher interface { - Flush() error -} - -func (cw *compressResponseWriter) Flush() { - // Flush compressed data if compressor supports it. - if f, ok := cw.compressor.(flusher); ok { - _ = f.Flush() - } - // Flush HTTP response. - if f, ok := cw.w.(http.Flusher); ok { - f.Flush() - } -} - -// CompressHandler gzip compresses HTTP responses for clients that support it -// via the 'Accept-Encoding' header. -// -// Compressing TLS traffic may leak the page contents to an attacker if the -// page contains user input: http://security.stackexchange.com/a/102015/12208 -func CompressHandler(h http.Handler) http.Handler { - return CompressHandlerLevel(h, gzip.DefaultCompression) -} - -// CompressHandlerLevel gzip compresses HTTP responses with specified compression level -// for clients that support it via the 'Accept-Encoding' header. -// -// The compression level should be gzip.DefaultCompression, gzip.NoCompression, -// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive. -// gzip.DefaultCompression is used in case of invalid compression level. -func CompressHandlerLevel(h http.Handler, level int) http.Handler { - if level < gzip.DefaultCompression || level > gzip.BestCompression { - level = gzip.DefaultCompression - } - - const ( - gzipEncoding = "gzip" - flateEncoding = "deflate" - ) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // detect what encoding to use - var encoding string - for _, curEnc := range strings.Split(r.Header.Get(acceptEncoding), ",") { - curEnc = strings.TrimSpace(curEnc) - if curEnc == gzipEncoding || curEnc == flateEncoding { - encoding = curEnc - break - } - } - - // always add Accept-Encoding to Vary to prevent intermediate caches corruption - w.Header().Add("Vary", acceptEncoding) - - // if we weren't able to identify an encoding we're familiar with, pass on the - // request to the handler and return - if encoding == "" { - h.ServeHTTP(w, r) - return - } - - if r.Header.Get("Upgrade") != "" { - h.ServeHTTP(w, r) - return - } - - // wrap the ResponseWriter with the writer for the chosen encoding - var encWriter io.WriteCloser - if encoding == gzipEncoding { - encWriter, _ = gzip.NewWriterLevel(w, level) - } else if encoding == flateEncoding { - encWriter, _ = flate.NewWriter(w, level) - } - defer encWriter.Close() - - w.Header().Set("Content-Encoding", encoding) - r.Header.Del(acceptEncoding) - - cw := &compressResponseWriter{ - w: w, - compressor: encWriter, - } - - w = httpsnoop.Wrap(w, httpsnoop.Hooks{ - Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { - return cw.Write - }, - WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { - return cw.WriteHeader - }, - Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { - return cw.Flush - }, - ReadFrom: func(rff httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc { - return cw.ReadFrom - }, - }) - - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go deleted file mode 100644 index 8af9c096e5e4..000000000000 --- a/vendor/github.com/gorilla/handlers/cors.go +++ /dev/null @@ -1,352 +0,0 @@ -package handlers - -import ( - "net/http" - "strconv" - "strings" -) - -// CORSOption represents a functional option for configuring the CORS middleware. -type CORSOption func(*cors) error - -type cors struct { - h http.Handler - allowedHeaders []string - allowedMethods []string - allowedOrigins []string - allowedOriginValidator OriginValidator - exposedHeaders []string - maxAge int - ignoreOptions bool - allowCredentials bool - optionStatusCode int -} - -// OriginValidator takes an origin string and returns whether or not that origin is allowed. -type OriginValidator func(string) bool - -var ( - defaultCorsOptionStatusCode = http.StatusOK - defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} - defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} - // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). -) - -const ( - corsOptionMethod string = http.MethodOptions - corsAllowOriginHeader string = "Access-Control-Allow-Origin" - corsExposeHeadersHeader string = "Access-Control-Expose-Headers" - corsMaxAgeHeader string = "Access-Control-Max-Age" - corsAllowMethodsHeader string = "Access-Control-Allow-Methods" - corsAllowHeadersHeader string = "Access-Control-Allow-Headers" - corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials" - corsRequestMethodHeader string = "Access-Control-Request-Method" - corsRequestHeadersHeader string = "Access-Control-Request-Headers" - corsOriginHeader string = "Origin" - corsVaryHeader string = "Vary" - corsOriginMatchAll string = "*" -) - -func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { - origin := r.Header.Get(corsOriginHeader) - if !ch.isOriginAllowed(origin) { - if r.Method != corsOptionMethod || ch.ignoreOptions { - ch.h.ServeHTTP(w, r) - } - - return - } - - if r.Method == corsOptionMethod { - if ch.ignoreOptions { - ch.h.ServeHTTP(w, r) - return - } - - if _, ok := r.Header[corsRequestMethodHeader]; !ok { - w.WriteHeader(http.StatusBadRequest) - return - } - - method := r.Header.Get(corsRequestMethodHeader) - if !ch.isMatch(method, ch.allowedMethods) { - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",") - allowedHeaders := []string{} - for _, v := range requestHeaders { - canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) - if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) { - continue - } - - if !ch.isMatch(canonicalHeader, ch.allowedHeaders) { - w.WriteHeader(http.StatusForbidden) - return - } - - allowedHeaders = append(allowedHeaders, canonicalHeader) - } - - if len(allowedHeaders) > 0 { - w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ",")) - } - - if ch.maxAge > 0 { - w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge)) - } - - if !ch.isMatch(method, defaultCorsMethods) { - w.Header().Set(corsAllowMethodsHeader, method) - } - } else if len(ch.exposedHeaders) > 0 { - w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) - } - - if ch.allowCredentials { - w.Header().Set(corsAllowCredentialsHeader, "true") - } - - if len(ch.allowedOrigins) > 1 { - w.Header().Set(corsVaryHeader, corsOriginHeader) - } - - returnOrigin := origin - if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 { - returnOrigin = "*" - } else { - for _, o := range ch.allowedOrigins { - // A configuration of * is different than explicitly setting an allowed - // origin. Returning arbitrary origin headers in an access control allow - // origin header is unsafe and is not required by any use case. - if o == corsOriginMatchAll { - returnOrigin = "*" - break - } - } - } - w.Header().Set(corsAllowOriginHeader, returnOrigin) - - if r.Method == corsOptionMethod { - w.WriteHeader(ch.optionStatusCode) - return - } - ch.h.ServeHTTP(w, r) -} - -// CORS provides Cross-Origin Resource Sharing middleware. -// Example: -// -// import ( -// "net/http" -// -// "github.com/gorilla/handlers" -// "github.com/gorilla/mux" -// ) -// -// func main() { -// r := mux.NewRouter() -// r.HandleFunc("/users", UserEndpoint) -// r.HandleFunc("/projects", ProjectEndpoint) -// -// // Apply the CORS middleware to our top-level router, with the defaults. -// http.ListenAndServe(":8000", handlers.CORS()(r)) -// } -func CORS(opts ...CORSOption) func(http.Handler) http.Handler { - return func(h http.Handler) http.Handler { - ch := parseCORSOptions(opts...) - ch.h = h - return ch - } -} - -func parseCORSOptions(opts ...CORSOption) *cors { - ch := &cors{ - allowedMethods: defaultCorsMethods, - allowedHeaders: defaultCorsHeaders, - allowedOrigins: []string{}, - optionStatusCode: defaultCorsOptionStatusCode, - } - - for _, option := range opts { - _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? - } - - return ch -} - -// -// Functional options for configuring CORS. -// - -// AllowedHeaders adds the provided headers to the list of allowed headers in a -// CORS request. -// This is an append operation so the headers Accept, Accept-Language, -// and Content-Language are always allowed. -// Content-Type must be explicitly declared if accepting Content-Types other than -// application/x-www-form-urlencoded, multipart/form-data, or text/plain. -func AllowedHeaders(headers []string) CORSOption { - return func(ch *cors) error { - for _, v := range headers { - normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) - if normalizedHeader == "" { - continue - } - - if !ch.isMatch(normalizedHeader, ch.allowedHeaders) { - ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader) - } - } - - return nil - } -} - -// AllowedMethods can be used to explicitly allow methods in the -// Access-Control-Allow-Methods header. -// This is a replacement operation so you must also -// pass GET, HEAD, and POST if you wish to support those methods. -func AllowedMethods(methods []string) CORSOption { - return func(ch *cors) error { - ch.allowedMethods = []string{} - for _, v := range methods { - normalizedMethod := strings.ToUpper(strings.TrimSpace(v)) - if normalizedMethod == "" { - continue - } - - if !ch.isMatch(normalizedMethod, ch.allowedMethods) { - ch.allowedMethods = append(ch.allowedMethods, normalizedMethod) - } - } - - return nil - } -} - -// AllowedOrigins sets the allowed origins for CORS requests, as used in the -// 'Allow-Access-Control-Origin' HTTP header. -// Note: Passing in a []string{"*"} will allow any domain. -func AllowedOrigins(origins []string) CORSOption { - return func(ch *cors) error { - for _, v := range origins { - if v == corsOriginMatchAll { - ch.allowedOrigins = []string{corsOriginMatchAll} - return nil - } - } - - ch.allowedOrigins = origins - return nil - } -} - -// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the -// 'Allow-Access-Control-Origin' HTTP header. -func AllowedOriginValidator(fn OriginValidator) CORSOption { - return func(ch *cors) error { - ch.allowedOriginValidator = fn - return nil - } -} - -// OptionStatusCode sets a custom status code on the OPTIONS requests. -// Default behaviour sets it to 200 to reflect best practices. This is option is not mandatory -// and can be used if you need a custom status code (i.e 204). -// -// More informations on the spec: -// https://fetch.spec.whatwg.org/#cors-preflight-fetch -func OptionStatusCode(code int) CORSOption { - return func(ch *cors) error { - ch.optionStatusCode = code - return nil - } -} - -// ExposedHeaders can be used to specify headers that are available -// and will not be stripped out by the user-agent. -func ExposedHeaders(headers []string) CORSOption { - return func(ch *cors) error { - ch.exposedHeaders = []string{} - for _, v := range headers { - normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) - if normalizedHeader == "" { - continue - } - - if !ch.isMatch(normalizedHeader, ch.exposedHeaders) { - ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader) - } - } - - return nil - } -} - -// MaxAge determines the maximum age (in seconds) between preflight requests. A -// maximum of 10 minutes is allowed. An age above this value will default to 10 -// minutes. -func MaxAge(age int) CORSOption { - return func(ch *cors) error { - // Maximum of 10 minutes. - if age > 600 { - age = 600 - } - - ch.maxAge = age - return nil - } -} - -// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead -// passing them through to the next handler. This is useful when your application -// or framework has a pre-existing mechanism for responding to OPTIONS requests. -func IgnoreOptions() CORSOption { - return func(ch *cors) error { - ch.ignoreOptions = true - return nil - } -} - -// AllowCredentials can be used to specify that the user agent may pass -// authentication details along with the request. -func AllowCredentials() CORSOption { - return func(ch *cors) error { - ch.allowCredentials = true - return nil - } -} - -func (ch *cors) isOriginAllowed(origin string) bool { - if origin == "" { - return false - } - - if ch.allowedOriginValidator != nil { - return ch.allowedOriginValidator(origin) - } - - if len(ch.allowedOrigins) == 0 { - return true - } - - for _, allowedOrigin := range ch.allowedOrigins { - if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll { - return true - } - } - - return false -} - -func (ch *cors) isMatch(needle string, haystack []string) bool { - for _, v := range haystack { - if v == needle { - return true - } - } - - return false -} diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go deleted file mode 100644 index 944e5a8ae998..000000000000 --- a/vendor/github.com/gorilla/handlers/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's net/http package (or any framework supporting http.Handler). - -The package includes handlers for logging in standardised formats, compressing -HTTP responses, validating content types and other useful tools for manipulating -requests and responses. -*/ -package handlers diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go deleted file mode 100644 index 9b92fce3333e..000000000000 --- a/vendor/github.com/gorilla/handlers/handlers.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "bufio" - "fmt" - "net" - "net/http" - "sort" - "strings" -) - -// MethodHandler is an http.Handler that dispatches to a handler whose key in the -// MethodHandler's map matches the name of the HTTP request's method, eg: GET -// -// If the request's method is OPTIONS and OPTIONS is not a key in the map then -// the handler responds with a status of 200 and sets the Allow header to a -// comma-separated list of available methods. -// -// If the request's method doesn't match any of its keys the handler responds -// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a -// comma-separated list of available methods. -type MethodHandler map[string]http.Handler - -func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if handler, ok := h[req.Method]; ok { - handler.ServeHTTP(w, req) - } else { - allow := []string{} - for k := range h { - allow = append(allow, k) - } - sort.Strings(allow) - w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == http.MethodOptions { - w.WriteHeader(http.StatusOK) - } else { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } - } -} - -// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP -// status code and body size. -type responseLogger struct { - w http.ResponseWriter - status int - size int -} - -func (l *responseLogger) Write(b []byte) (int, error) { - size, err := l.w.Write(b) - l.size += size - return size, err -} - -func (l *responseLogger) WriteHeader(s int) { - l.w.WriteHeader(s) - l.status = s -} - -func (l *responseLogger) Status() int { - return l.status -} - -func (l *responseLogger) Size() int { - return l.size -} - -func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { - conn, rw, err := l.w.(http.Hijacker).Hijack() - if err == nil && l.status == 0 { - // The status will be StatusSwitchingProtocols if there was no error and - // WriteHeader has not been called yet - l.status = http.StatusSwitchingProtocols - } - return conn, rw, err -} - -// isContentType validates the Content-Type header matches the supplied -// contentType. That is, its type and subtype match. -func isContentType(h http.Header, contentType string) bool { - ct := h.Get("Content-Type") - if i := strings.IndexRune(ct, ';'); i != -1 { - ct = ct[0:i] - } - return ct == contentType -} - -// ContentTypeHandler wraps and returns a http.Handler, validating the request -// content type is compatible with the contentTypes list. It writes a HTTP 415 -// error if that fails. -// -// Only PUT, POST, and PATCH requests are considered. -func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { - h.ServeHTTP(w, r) - return - } - - for _, ct := range contentTypes { - if isContentType(r.Header, ct) { - h.ServeHTTP(w, r) - return - } - } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", - r.Header.Get("Content-Type"), - contentTypes), - http.StatusUnsupportedMediaType) - }) -} - -const ( - // HTTPMethodOverrideHeader is a commonly used - // http header to override a request method. - HTTPMethodOverrideHeader = "X-HTTP-Method-Override" - // HTTPMethodOverrideFormKey is a commonly used - // HTML form key to override a request method. - HTTPMethodOverrideFormKey = "_method" -) - -// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for -// the X-HTTP-Method-Override header or the _method form key, and overrides (if -// valid) request.Method with its value. -// -// This is especially useful for HTTP clients that don't support many http verbs. -// It isn't secure to override e.g a GET to a POST, so only POST requests are -// considered. Likewise, the override method can only be a "write" method: PUT, -// PATCH or DELETE. -// -// Form method takes precedence over header method. -func HTTPMethodOverrideHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodPost { - om := r.FormValue(HTTPMethodOverrideFormKey) - if om == "" { - om = r.Header.Get(HTTPMethodOverrideHeader) - } - if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { - r.Method = om - } - } - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go deleted file mode 100644 index 2badb6fbff84..000000000000 --- a/vendor/github.com/gorilla/handlers/logging.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "io" - "net" - "net/http" - "net/url" - "strconv" - "time" - "unicode/utf8" - - "github.com/felixge/httpsnoop" -) - -// Logging - -// LogFormatterParams is the structure any formatter will be handed when time to log comes. -type LogFormatterParams struct { - Request *http.Request - URL url.URL - TimeStamp time.Time - StatusCode int - Size int -} - -// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. -type LogFormatter func(writer io.Writer, params LogFormatterParams) - -// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its -// friends - -type loggingHandler struct { - writer io.Writer - handler http.Handler - formatter LogFormatter -} - -func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger, w := makeLogger(w) - url := *req.URL - - h.handler.ServeHTTP(w, req) - if req.MultipartForm != nil { - err := req.MultipartForm.RemoveAll() - if err != nil { - return - } - } - - params := LogFormatterParams{ - Request: req, - URL: url, - TimeStamp: t, - StatusCode: logger.Status(), - Size: logger.Size(), - } - - h.formatter(h.writer, params) -} - -func makeLogger(w http.ResponseWriter) (*responseLogger, http.ResponseWriter) { - logger := &responseLogger{w: w, status: http.StatusOK} - return logger, httpsnoop.Wrap(w, httpsnoop.Hooks{ - Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { - return logger.Write - }, - WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { - return logger.WriteHeader - }, - }) -} - -const lowerhex = "0123456789abcdef" - -func appendQuoted(buf []byte, s string) []byte { - var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 - r := rune(s[0]) - width = 1 - if r >= utf8.RuneSelf { - r, width = utf8.DecodeRuneInString(s) - } - if width == 1 && r == utf8.RuneError { - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - continue - } - if r == rune('"') || r == '\\' { // always backslashed - buf = append(buf, '\\') - buf = append(buf, byte(r)) - continue - } - if strconv.IsPrint(r) { - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - continue - } - switch r { - case '\a': - buf = append(buf, `\a`...) - case '\b': - buf = append(buf, `\b`...) - case '\f': - buf = append(buf, `\f`...) - case '\n': - buf = append(buf, `\n`...) - case '\r': - buf = append(buf, `\r`...) - case '\t': - buf = append(buf, `\t`...) - case '\v': - buf = append(buf, `\v`...) - default: - switch { - case r < ' ': - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - case r > utf8.MaxRune: - r = 0xFFFD - fallthrough - case r < 0x10000: - buf = append(buf, `\u`...) - for s := 12; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - default: - buf = append(buf, `\U`...) - for s := 28; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - } - } - } - return buf -} - -// buildCommonLogLine builds a log entry for req in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { - username := "-" - if url.User != nil { - if name := url.User.Username(); name != "" { - username = name - } - } - - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - - uri := req.RequestURI - - // Requests using the CONNECT method over HTTP/2.0 must use - // the authority field (aka r.Host) to identify the target. - // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT - if req.ProtoMajor == 2 && req.Method == "CONNECT" { - uri = req.Host - } - if uri == "" { - uri = url.RequestURI() - } - - buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) - buf = append(buf, host...) - buf = append(buf, " - "...) - buf = append(buf, username...) - buf = append(buf, " ["...) - buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) - buf = append(buf, `] "`...) - buf = append(buf, req.Method...) - buf = append(buf, " "...) - buf = appendQuoted(buf, uri) - buf = append(buf, " "...) - buf = append(buf, req.Proto...) - buf = append(buf, `" `...) - buf = append(buf, strconv.Itoa(status)...) - buf = append(buf, " "...) - buf = append(buf, strconv.Itoa(size)...) - return buf -} - -// writeLog writes a log entry for req to w in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeLog(writer io.Writer, params LogFormatterParams) { - buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) - buf = append(buf, '\n') - _, _ = writer.Write(buf) -} - -// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeCombinedLog(writer io.Writer, params LogFormatterParams) { - buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) - buf = append(buf, ` "`...) - buf = appendQuoted(buf, params.Request.Referer()) - buf = append(buf, `" "`...) - buf = appendQuoted(buf, params.Request.UserAgent()) - buf = append(buf, '"', '\n') - _, _ = writer.Write(buf) -} - -// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Combined Log Format. -// -// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. -// -// LoggingHandler always sets the ident field of the log to -. -func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { - return loggingHandler{out, h, writeCombinedLog} -} - -// LoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Common Log Format (CLF). -// -// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -// -// Example: -// -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("This is a catch-all route")) -// }) -// loggedRouter := handlers.LoggingHandler(os.Stdout, r) -// http.ListenAndServe(":1123", loggedRouter) -func LoggingHandler(out io.Writer, h http.Handler) http.Handler { - return loggingHandler{out, h, writeLog} -} - -// CustomLoggingHandler provides a way to supply a custom log formatter -// while taking advantage of the mechanisms in this package. -func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { - return loggingHandler{out, h, f} -} diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go deleted file mode 100644 index 281d753e95a2..000000000000 --- a/vendor/github.com/gorilla/handlers/proxy_headers.go +++ /dev/null @@ -1,120 +0,0 @@ -package handlers - -import ( - "net/http" - "regexp" - "strings" -) - -var ( - // De-facto standard header keys. - xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") - xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host") - xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto") - xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme") - xRealIP = http.CanonicalHeaderKey("X-Real-IP") -) - -var ( - // RFC7239 defines a new "Forwarded: " header designed to replace the - // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. - forwarded = http.CanonicalHeaderKey("Forwarded") - // Allows for a sub-match of the first value after 'for=' to the next - // comma, semi-colon or space. The match is case-insensitive. - forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) - // Allows for a sub-match for the first instance of scheme (http|https) - // prefixed by 'proto='. The match is case-insensitive. - protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) -) - -// ProxyHeaders inspects common reverse proxy headers and sets the corresponding -// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP -// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme -// for the scheme (http|https), X-Forwarded-Host for the host and the RFC7239 -// Forwarded header, which may include both client IPs and schemes. -// -// NOTE: This middleware should only be used when behind a reverse -// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are -// configured not to) strip these headers from client requests, or where these -// headers are accepted "as is" from a remote client (e.g. when Go is not behind -// a proxy), can manifest as a vulnerability if your application uses these -// headers for validating the 'trustworthiness' of a request. -func ProxyHeaders(h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - // Set the remote IP with the value passed from the proxy. - if fwd := getIP(r); fwd != "" { - r.RemoteAddr = fwd - } - - // Set the scheme (proto) with the value passed from the proxy. - if scheme := getScheme(r); scheme != "" { - r.URL.Scheme = scheme - } - // Set the host with the value passed by the proxy - if r.Header.Get(xForwardedHost) != "" { - r.Host = r.Header.Get(xForwardedHost) - } - // Call the next handler in the chain. - h.ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 -// Forwarded headers (in that order). -func getIP(r *http.Request) string { - var addr string - - switch { - case r.Header.Get(xForwardedFor) != "": - fwd := r.Header.Get(xForwardedFor) - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ", ") - if s == -1 { - s = len(fwd) - } - addr = fwd[:s] - case r.Header.Get(xRealIP) != "": - addr = r.Header.Get(xRealIP) - case r.Header.Get(forwarded) != "": - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) - } - } - - return addr -} - -// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 -// Forwarded headers (in that order). -func getScheme(r *http.Request) string { - var scheme string - - // Retrieve the scheme from X-Forwarded-Proto. - if proto := r.Header.Get(xForwardedProto); proto != "" { - scheme = strings.ToLower(proto) - } else if proto = r.Header.Get(xForwardedScheme); proto != "" { - scheme = strings.ToLower(proto) - } else if proto = r.Header.Get(forwarded); proto != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'proto=' capture, which we ignore. In the case of multiple proto - // parameters (invalid) we only extract the first. - if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { - scheme = strings.ToLower(match[1]) - } - } - - return scheme -} diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go deleted file mode 100644 index 0d4f955ecbda..000000000000 --- a/vendor/github.com/gorilla/handlers/recovery.go +++ /dev/null @@ -1,98 +0,0 @@ -package handlers - -import ( - "log" - "net/http" - "runtime/debug" -) - -// RecoveryHandlerLogger is an interface used by the recovering handler to print logs. -type RecoveryHandlerLogger interface { - Println(...interface{}) -} - -type recoveryHandler struct { - handler http.Handler - logger RecoveryHandlerLogger - printStack bool -} - -// RecoveryOption provides a functional approach to define -// configuration for a handler; such as setting the logging -// whether or not to print stack traces on panic. -type RecoveryOption func(http.Handler) - -func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { - for _, option := range opts { - option(h) - } - - return h -} - -// RecoveryHandler is HTTP middleware that recovers from a panic, -// logs the panic, writes http.StatusInternalServerError, and -// continues to the next handler. -// -// Example: -// -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// panic("Unexpected error!") -// }) -// -// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) -func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { - return func(h http.Handler) http.Handler { - r := &recoveryHandler{handler: h} - return parseRecoveryOptions(r, opts...) - } -} - -// RecoveryLogger is a functional option to override -// the default logger. -func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { - return func(h http.Handler) { - r := h.(*recoveryHandler) //nolint:errcheck //TODO: - // @bharat-rajani should return type-assertion error but would break the API? - r.logger = logger - } -} - -// PrintRecoveryStack is a functional option to enable -// or disable printing stack traces on panic. -func PrintRecoveryStack(shouldPrint bool) RecoveryOption { - return func(h http.Handler) { - r := h.(*recoveryHandler) //nolint:errcheck //TODO: - // @bharat-rajani should return type-assertion error but would break the API? - r.printStack = shouldPrint - } -} - -func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - defer func() { - if err := recover(); err != nil { - w.WriteHeader(http.StatusInternalServerError) - h.log(err) - } - }() - - h.handler.ServeHTTP(w, req) -} - -func (h recoveryHandler) log(v ...interface{}) { - if h.logger != nil { - h.logger.Println(v...) - } else { - log.Println(v...) - } - - if h.printStack { - stack := string(debug.Stack()) - if h.logger != nil { - h.logger.Println(stack) - } else { - log.Println(stack) - } - } -} diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore deleted file mode 100644 index d8b32652e5a9..000000000000 --- a/vendor/github.com/pkg/xattr/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.DS_Store - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE deleted file mode 100644 index 99d2e9dc8ff2..000000000000 --- a/vendor/github.com/pkg/xattr/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2012 Dave Cheney. All rights reserved. -Copyright (c) 2014 Kuba Podgórski. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md deleted file mode 100644 index 0662c0208c57..000000000000 --- a/vendor/github.com/pkg/xattr/README.md +++ /dev/null @@ -1,45 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) -[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) -[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild) -[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) - -xattr -===== -Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris). - -"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) - -`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. - -The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they -do not reference a symlink that appears at the end of a path. See -[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. - -### Example -```go - const path = "/tmp/myfile" - const prefix = "user." - - if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { - log.Fatal(err) - } - - var list []string - if list, err = xattr.List(path); err != nil { - log.Fatal(err) - } - - var data []byte - if data, err = xattr.Get(path, prefix+"test"); err != nil { - log.Fatal(err) - } - - if err = xattr.Remove(path, prefix+"test"); err != nil { - log.Fatal(err) - } - - // One can also specify the flags parameter to be passed to the OS. - if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { - log.Fatal(err) - } -``` diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go deleted file mode 100644 index e34e274d5137..000000000000 --- a/vendor/github.com/pkg/xattr/xattr.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Package xattr provides support for extended attributes on linux, darwin and freebsd. -Extended attributes are name:value pairs associated permanently with files and directories, -similar to the environment strings associated with a process. -An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. -More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . - -All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" -variant will not follow a symlink at the end of the path, and "F" variant accepts -a file descriptor instead of a path. - -Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are -symlinks: -Get will follow "symlink1" and "symlink2" and operate on the target of -"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". -*/ -package xattr - -import ( - "os" - "syscall" -) - -// Error records an error and the operation, file path and attribute that caused it. -type Error struct { - Op string - Path string - Name string - Err error -} - -func (e *Error) Unwrap() error { return e.Err } - -func (e *Error) Error() (errstr string) { - if e.Op != "" { - errstr += e.Op - } - if e.Path != "" { - if errstr != "" { - errstr += " " - } - errstr += e.Path - } - if e.Name != "" { - if errstr != "" { - errstr += " " - } - errstr += e.Name - } - if e.Err != nil { - if errstr != "" { - errstr += ": " - } - errstr += e.Err.Error() - } - return -} - -// Get retrieves extended attribute data associated with path. It will follow -// all symlinks along the path. -func Get(path, name string) ([]byte, error) { - return get(path, name, func(name string, data []byte) (int, error) { - return getxattr(path, name, data) - }) -} - -// LGet is like Get but does not follow a symlink at the end of the path. -func LGet(path, name string) ([]byte, error) { - return get(path, name, func(name string, data []byte) (int, error) { - return lgetxattr(path, name, data) - }) -} - -// FGet is like Get but accepts a os.File instead of a file path. -func FGet(f *os.File, name string) ([]byte, error) { - return get(f.Name(), name, func(name string, data []byte) (int, error) { - return fgetxattr(f, name, data) - }) -} - -type getxattrFunc func(name string, data []byte) (int, error) - -// get contains the buffer allocation logic used by both Get and LGet. -func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { - const ( - // Start with a 1 KB buffer for the xattr value - initialBufSize = 1024 - - // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's - // much smaller: documented at 64 KB. However, at least on TrueNAS SCALE, a - // Debian-based Linux distro, it can be larger. - maxBufSize = 64 * 1024 * 1024 - - // Function name as reported in error messages - myname = "xattr.get" - ) - - size := initialBufSize - for { - data := make([]byte, size) - read, err := getxattrFunc(name, data) - - // If the buffer was too small to fit the value, Linux and MacOS react - // differently: - // Linux: returns an ERANGE error and "-1" bytes. However, the TrueNAS - // SCALE distro sometimes returns E2BIG. - // MacOS: truncates the value and returns "size" bytes. If the value - // happens to be exactly as big as the buffer, we cannot know if it was - // truncated, and we retry with a bigger buffer. Contrary to documentation, - // MacOS never seems to return ERANGE! - // To keep the code simple, we always check both conditions, and sometimes - // double the buffer size without it being strictly necessary. - if err == syscall.ERANGE || err == syscall.E2BIG || read == size { - // The buffer was too small. Try again. - size <<= 1 - if size >= maxBufSize { - return nil, &Error{myname, path, name, syscall.EOVERFLOW} - } - continue - } - if err != nil { - return nil, &Error{myname, path, name, err} - } - return data[:read], nil - } -} - -// Set associates name and data together as an attribute of path. -func Set(path, name string, data []byte) error { - if err := setxattr(path, name, data, 0); err != nil { - return &Error{"xattr.Set", path, name, err} - } - return nil -} - -// LSet is like Set but does not follow a symlink at -// the end of the path. -func LSet(path, name string, data []byte) error { - if err := lsetxattr(path, name, data, 0); err != nil { - return &Error{"xattr.LSet", path, name, err} - } - return nil -} - -// FSet is like Set but accepts a os.File instead of a file path. -func FSet(f *os.File, name string, data []byte) error { - if err := fsetxattr(f, name, data, 0); err != nil { - return &Error{"xattr.FSet", f.Name(), name, err} - } - return nil -} - -// SetWithFlags associates name and data together as an attribute of path. -// Forwards the flags parameter to the syscall layer. -func SetWithFlags(path, name string, data []byte, flags int) error { - if err := setxattr(path, name, data, flags); err != nil { - return &Error{"xattr.SetWithFlags", path, name, err} - } - return nil -} - -// LSetWithFlags is like SetWithFlags but does not follow a symlink at -// the end of the path. -func LSetWithFlags(path, name string, data []byte, flags int) error { - if err := lsetxattr(path, name, data, flags); err != nil { - return &Error{"xattr.LSetWithFlags", path, name, err} - } - return nil -} - -// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. -func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { - if err := fsetxattr(f, name, data, flags); err != nil { - return &Error{"xattr.FSetWithFlags", f.Name(), name, err} - } - return nil -} - -// Remove removes the attribute associated with the given path. -func Remove(path, name string) error { - if err := removexattr(path, name); err != nil { - return &Error{"xattr.Remove", path, name, err} - } - return nil -} - -// LRemove is like Remove but does not follow a symlink at the end of the -// path. -func LRemove(path, name string) error { - if err := lremovexattr(path, name); err != nil { - return &Error{"xattr.LRemove", path, name, err} - } - return nil -} - -// FRemove is like Remove but accepts a os.File instead of a file path. -func FRemove(f *os.File, name string) error { - if err := fremovexattr(f, name); err != nil { - return &Error{"xattr.FRemove", f.Name(), name, err} - } - return nil -} - -// List retrieves a list of names of extended attributes associated -// with the given path in the file system. -func List(path string) ([]string, error) { - return list(path, func(data []byte) (int, error) { - return listxattr(path, data) - }) -} - -// LList is like List but does not follow a symlink at the end of the -// path. -func LList(path string) ([]string, error) { - return list(path, func(data []byte) (int, error) { - return llistxattr(path, data) - }) -} - -// FList is like List but accepts a os.File instead of a file path. -func FList(f *os.File) ([]string, error) { - return list(f.Name(), func(data []byte) (int, error) { - return flistxattr(f, data) - }) -} - -type listxattrFunc func(data []byte) (int, error) - -// list contains the buffer allocation logic used by both List and LList. -func list(path string, listxattrFunc listxattrFunc) ([]string, error) { - myname := "xattr.list" - // find size. - size, err := listxattrFunc(nil) - if err != nil { - return nil, &Error{myname, path, "", err} - } - if size > 0 { - // `size + 1` because of ERANGE error when reading - // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). - buf := make([]byte, size+1) - // Read into buffer of that size. - read, err := listxattrFunc(buf) - if err != nil { - return nil, &Error{myname, path, "", err} - } - return stringsFromByteSlice(buf[:read]), nil - } - return []string{}, nil -} - -// bytePtrFromSlice returns a pointer to array of bytes and a size. -func bytePtrFromSlice(data []byte) (ptr *byte, size int) { - size = len(data) - if size > 0 { - ptr = &data[0] - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go deleted file mode 100644 index f4a3f9539049..000000000000 --- a/vendor/github.com/pkg/xattr/xattr_bsd.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build freebsd || netbsd -// +build freebsd netbsd - -package xattr - -import ( - "os" - "syscall" - "unsafe" -) - -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - EXTATTR_NAMESPACE_USER = 1 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENOATTR -) - -func getxattr(path string, name string, data []byte) (int, error) { - return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return getxattr(f.Name(), name, data) -} - -// sysGet is called by getxattr and lgetxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_get_file( - const char *path, - int attrnamespace, - const char *attrname, - void *data, - size_t nbytes); - - ssize_t extattr_get_link( - const char *path, - int attrnamespace, - const char *attrname, - void *data, - size_t nbytes); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) - if err != syscall.Errno(0) { - return int(r0), err - } - return int(r0), nil -} - -func setxattr(path string, name string, data []byte, flags int) error { - return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return setxattr(f.Name(), name, data, flags) -} - -// sysSet is called by setxattr and lsetxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysSet(syscallNum uintptr, path string, name string, data []byte) error { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_set_file( - const char *path, - int attrnamespace, - const char *attrname, - const void *data, - size_t nbytes - ); - - ssize_t extattr_set_link( - const char *path, - int attrnamespace, - const char *attrname, - const void *data, - size_t nbytes - ); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) - if err != syscall.Errno(0) { - return err - } - if int(r0) != nbytes { - return syscall.E2BIG - } - return nil -} - -func removexattr(path string, name string) error { - return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) -} - -func lremovexattr(path string, name string) error { - return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) -} - -func fremovexattr(f *os.File, name string) error { - return removexattr(f.Name(), name) -} - -// sysSet is called by removexattr and lremovexattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysRemove(syscallNum uintptr, path string, name string) error { - /* - int extattr_delete_file( - const char *path, - int attrnamespace, - const char *attrname - ); - - int extattr_delete_link( - const char *path, - int attrnamespace, - const char *attrname - ); - */ - _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - ) - if err != syscall.Errno(0) { - return err - } - return nil -} - -func listxattr(path string, data []byte) (int, error) { - return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return listxattr(f.Name(), data) -} - -// sysSet is called by listxattr and llistxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysList(syscallNum uintptr, path string, data []byte) (int, error) { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_list_file( - const char *path, - int attrnamespace, - void *data, - size_t nbytes - ); - - ssize_t extattr_list_link( - const char *path, - int attrnamespace, - void *data, - size_t nbytes - ); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) - if err != syscall.Errno(0) { - return int(r0), err - } - return int(r0), nil -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On FreeBSD, each entry consists of a single byte containing the length -// of the attribute name, followed by the attribute name. -// The name is _not_ terminated by NULL. -func stringsFromByteSlice(buf []byte) (result []string) { - index := 0 - for index < len(buf) { - next := index + 1 + int(buf[index]) - result = append(result, string(buf[index+1:next])) - index = next - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go deleted file mode 100644 index ee7a501dae5c..000000000000 --- a/vendor/github.com/pkg/xattr/xattr_darwin.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build darwin -// +build darwin - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - XATTR_NOFOLLOW = 0x0001 - XATTR_CREATE = 0x0002 - XATTR_REPLACE = 0x0004 - XATTR_NOSECURITY = 0x0008 - XATTR_NODEFAULT = 0x0010 - XATTR_SHOWCOMPRESSION = 0x0020 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENOATTR -) - -func getxattr(path string, name string, data []byte) (int, error) { - return unix.Getxattr(path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return unix.Lgetxattr(path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return getxattr(f.Name(), name, data) -} - -func setxattr(path string, name string, data []byte, flags int) error { - return unix.Setxattr(path, name, data, flags) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return unix.Lsetxattr(path, name, data, flags) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return setxattr(f.Name(), name, data, flags) -} - -func removexattr(path string, name string) error { - return unix.Removexattr(path, name) -} - -func lremovexattr(path string, name string) error { - return unix.Lremovexattr(path, name) -} - -func fremovexattr(f *os.File, name string) error { - return removexattr(f.Name(), name) -} - -func listxattr(path string, data []byte) (int, error) { - return unix.Listxattr(path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return unix.Llistxattr(path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return listxattr(f.Name(), data) -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go deleted file mode 100644 index 879085ee5d45..000000000000 --- a/vendor/github.com/pkg/xattr/xattr_linux.go +++ /dev/null @@ -1,142 +0,0 @@ -//go:build linux -// +build linux - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - XATTR_CREATE = unix.XATTR_CREATE - XATTR_REPLACE = unix.XATTR_REPLACE - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENODATA -) - -// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system -// calls. This function works around this by retrying system calls until they -// stop returning EINTR. -// -// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce. -func ignoringEINTR(fn func() error) (err error) { - for { - err = fn() - if err != unix.EINTR { - break - } - } - return err -} - -func getxattr(path string, name string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Getxattr(path, name, data) - return err - }) - return r, err -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Lgetxattr(path, name, data) - return err - }) - return r, err -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Fgetxattr(int(f.Fd()), name, data) - return err - }) - return r, err -} - -func setxattr(path string, name string, data []byte, flags int) error { - return ignoringEINTR(func() (err error) { - return unix.Setxattr(path, name, data, flags) - }) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return ignoringEINTR(func() (err error) { - return unix.Lsetxattr(path, name, data, flags) - }) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return ignoringEINTR(func() (err error) { - return unix.Fsetxattr(int(f.Fd()), name, data, flags) - }) -} - -func removexattr(path string, name string) error { - return ignoringEINTR(func() (err error) { - return unix.Removexattr(path, name) - }) -} - -func lremovexattr(path string, name string) error { - return ignoringEINTR(func() (err error) { - return unix.Lremovexattr(path, name) - }) -} - -func fremovexattr(f *os.File, name string) error { - return ignoringEINTR(func() (err error) { - return unix.Fremovexattr(int(f.Fd()), name) - }) -} - -func listxattr(path string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Listxattr(path, data) - return err - }) - return r, err -} - -func llistxattr(path string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Llistxattr(path, data) - return err - }) - return r, err -} - -func flistxattr(f *os.File, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Flistxattr(int(f.Fd()), data) - return err - }) - return r, err -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go deleted file mode 100644 index 7c98b4afbac2..000000000000 --- a/vendor/github.com/pkg/xattr/xattr_solaris.go +++ /dev/null @@ -1,175 +0,0 @@ -//go:build solaris -// +build solaris - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENODATA -) - -func getxattr(path string, name string, data []byte) (int, error) { - f, err := openNonblock(path) - if err != nil { - return 0, err - } - defer func() { - _ = f.Close() - }() - return fgetxattr(f, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return 0, unix.ENOTSUP -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0) - if err != nil { - return 0, err - } - defer func() { - _ = unix.Close(fd) - }() - return unix.Read(fd, data) -} - -func setxattr(path string, name string, data []byte, flags int) error { - f, err := openNonblock(path) - if err != nil { - return err - } - err = fsetxattr(f, name, data, flags) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return unix.ENOTSUP -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - mode := unix.O_WRONLY | unix.O_XATTR - if flags&XATTR_REPLACE != 0 { - mode |= unix.O_TRUNC - } else if flags&XATTR_CREATE != 0 { - mode |= unix.O_CREAT | unix.O_EXCL - } else { - mode |= unix.O_CREAT | unix.O_TRUNC - } - fd, err := unix.Openat(int(f.Fd()), name, mode, 0666) - if err != nil { - return err - } - if _, err = unix.Write(fd, data); err != nil { - _ = unix.Close(fd) - return err - } - return unix.Close(fd) -} - -func removexattr(path string, name string) error { - mode := unix.O_RDONLY | unix.O_XATTR | unix.O_NONBLOCK | unix.O_CLOEXEC - fd, err := unix.Open(path, mode, 0) - if err != nil { - return err - } - f := os.NewFile(uintptr(fd), path) - defer func() { - _ = f.Close() - }() - return fremovexattr(f, name) -} - -func lremovexattr(path string, name string) error { - return unix.ENOTSUP -} - -func fremovexattr(f *os.File, name string) error { - fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0) - if err != nil { - return err - } - defer func() { - _ = unix.Close(fd) - }() - return unix.Unlinkat(fd, name, 0) -} - -func listxattr(path string, data []byte) (int, error) { - f, err := openNonblock(path) - if err != nil { - return 0, err - } - defer func() { - _ = f.Close() - }() - return flistxattr(f, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return 0, unix.ENOTSUP -} - -func flistxattr(f *os.File, data []byte) (int, error) { - fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0) - if err != nil { - return 0, unix.ENOTSUP - } - xf := os.NewFile(uintptr(fd), f.Name()) - defer func() { - _ = xf.Close() - }() - names, err := xf.Readdirnames(-1) - if err != nil { - return 0, err - } - var buf []byte - for _, name := range names { - buf = append(buf, append([]byte(name), '\000')...) - } - if data == nil { - return len(buf), nil - } - return copy(data, buf), nil -} - -// Like os.Open, but passes O_NONBLOCK to the open(2) syscall. -func openNonblock(path string) (*os.File, error) { - fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC|unix.O_NONBLOCK, 0) - if err != nil { - return nil, err - } - return os.NewFile(uintptr(fd), path), err -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// We simulate Linux/Darwin, where each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go deleted file mode 100644 index 8886fbdc4216..000000000000 --- a/vendor/github.com/pkg/xattr/xattr_unsupported.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build !linux && !freebsd && !netbsd && !darwin && !solaris -// +build !linux,!freebsd,!netbsd,!darwin,!solaris - -package xattr - -import ( - "os" - "syscall" -) - -const ( - // We need to use the default for non supported operating systems - ENOATTR = syscall.Errno(0x59) -) - -// XATTR_SUPPORTED will be true if the current platform is supported -const XATTR_SUPPORTED = false - -func getxattr(path string, name string, data []byte) (int, error) { - return 0, nil -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return 0, nil -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return 0, nil -} - -func setxattr(path string, name string, data []byte, flags int) error { - return nil -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return nil -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return nil -} - -func removexattr(path string, name string) error { - return nil -} - -func lremovexattr(path string, name string) error { - return nil -} - -func fremovexattr(f *os.File, name string) error { - return nil -} - -func listxattr(path string, data []byte) (int, error) { - return 0, nil -} - -func llistxattr(path string, data []byte) (int, error) { - return 0, nil -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return 0, nil -} - -// dummy -func stringsFromByteSlice(buf []byte) (result []string) { - return []string{} -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 68bf8a0c86ce..899bf5c8383f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -742,12 +742,10 @@ github.com/fluent/fluent-bit-go/output # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/fsouza/fake-gcs-server v1.47.7 -## explicit; go 1.20 +# github.com/fsouza/fake-gcs-server v1.7.0 +## explicit github.com/fsouza/fake-gcs-server/fakestorage github.com/fsouza/fake-gcs-server/internal/backend -github.com/fsouza/fake-gcs-server/internal/checksum -github.com/fsouza/fake-gcs-server/internal/notification # github.com/gabriel-vasile/mimetype v1.4.3 ## explicit; go 1.20 github.com/gabriel-vasile/mimetype @@ -973,9 +971,6 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1 github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination -# github.com/gorilla/handlers v1.5.2 -## explicit; go 1.20 -github.com/gorilla/handlers # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux @@ -1382,9 +1377,6 @@ github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/xattr v0.4.10 -## explicit; go 1.14 -github.com/pkg/xattr # github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 ## explicit; go 1.20 github.com/planetscale/vtprotobuf/protohelpers