diff --git a/Dockerfile.release b/Dockerfile.release index 67c4cb773b581..2f7b052ad4ddc 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -5,8 +5,8 @@ ARG TARGETARCH LABEL name="MinIO" \ vendor="MinIO Inc " \ maintainer="MinIO Inc " \ - version="RELEASE.2021-02-14T04-01-33Z" \ - release="RELEASE.2021-02-14T04-01-33Z" \ + version="RELEASE.2021-02-11T08-23-43Z" \ + release="RELEASE.2021-02-11T08-23-43Z" \ summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \ description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads." diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 0baaa6e2a2fbe..e8f0472e42ed0 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -110,9 +110,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // -- IAM APIs -- // Add policy IAM - adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}") + adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}") // Add user IAM + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler)) adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}") diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 3aa46bae3dcb3..a30c704ba38fd 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -427,12 +427,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, deleteObjectsFn = api.CacheAPI().DeleteObjects } - // Return Malformed XML as S3 spec if the list of objects is empty - if len(deleteObjects.Objects) == 0 { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r)) - return - } - var objectsToDelete = map[ObjectToDelete]int{} getObjectInfoFn := objectAPI.GetObjectInfo if api.CacheAPI() != nil { diff --git a/cmd/bucket-listobjects-handlers.go b/cmd/bucket-listobjects-handlers.go index 80cd57d1b06a3..f6be47b8a74fe 100644 --- a/cmd/bucket-listobjects-handlers.go +++ b/cmd/bucket-listobjects-handlers.go @@ -26,6 +26,7 @@ import ( "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/sync/errgroup" ) @@ -294,6 +295,10 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http return proxyRequest(ctx, w, r, ep) } +func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) { + return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints))) +} + // ListObjectsV1Handler - GET Bucket (List Objects) Version 1. // -------------------------- // This implementation of the GET operation returns some or all (up to 10000) @@ -332,6 +337,15 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http return } + // Forward the request using Source IP or bucket + forwardStr := handlers.GetSourceIPFromHeaders(r) + if forwardStr == "" { + forwardStr = bucket + } + if proxyRequestByStringHash(ctx, w, r, forwardStr) { + return + } + listObjects := objectAPI.ListObjects // Inititate a list objects operation based on the input params. diff --git a/cmd/config-current.go b/cmd/config-current.go index 211dfeee103ed..71b41253afa25 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -18,7 +18,6 @@ package cmd import ( "context" - "crypto/tls" "fmt" "strings" "sync" @@ -296,10 +295,7 @@ func validateConfig(s config.Config, setDriveCounts []int) error { } } { - kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2( - &tls.Config{ - RootCAs: globalRootCAs, - }, defaultDialTimeout)()) + kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport()) if err != nil { return err } @@ -475,10 +471,7 @@ func lookupConfigs(s config.Config, setDriveCounts []int) { } } - kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2( - &tls.Config{ - RootCAs: globalRootCAs, - }, defaultDialTimeout)()) + kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport()) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err)) } diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index 6886c791c44d7..ba396bec4098f 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -103,6 +103,7 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) { } // We set file info only if its valid. + o.ModTime = m.Stat.ModTime o.Size = m.Stat.Size o.ETag = extractETag(m.Meta) o.ContentType = m.Meta["content-type"] @@ -121,12 +122,6 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) { o.Expires = t.UTC() } } - if mtime, ok := m.Meta["last-modified"]; ok { - if t, e = time.Parse(http.TimeFormat, mtime); e == nil { - o.ModTime = t.UTC() - } - } - // etag/md5Sum has already been extracted. We need to // remove to avoid it from appearing as part of user-defined metadata o.UserDefined = cleanMetadata(m.Meta) @@ -511,7 +506,9 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c } // get metadata of part.1 if full file has been cached. partial = true - if _, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)); err == nil { + fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)) + if err == nil { + meta.Stat.ModTime = atime.Get(fi) partial = false } return meta, partial, meta.Hits, nil @@ -573,6 +570,7 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met } } m.Stat.Size = actualSize + m.Stat.ModTime = UTCNow() if !incHitsOnly { // reset meta m.Meta = meta diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index 049de51b15441..385583c3e0a0c 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -200,7 +200,6 @@ func getMetadata(objInfo ObjectInfo) map[string]string { if !objInfo.Expires.Equal(timeSentinel) { metadata["expires"] = objInfo.Expires.Format(http.TimeFormat) } - metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat) for k, v := range objInfo.UserDefined { metadata[k] = v } diff --git a/cmd/disk-cache_test.go b/cmd/disk-cache_test.go index 161b4e39525d0..a6a1ccbdd8c39 100644 --- a/cmd/disk-cache_test.go +++ b/cmd/disk-cache_test.go @@ -18,7 +18,6 @@ package cmd import ( "testing" - "time" ) // Tests ToObjectInfo function. @@ -28,7 +27,7 @@ func TestCacheMetadataObjInfo(t *testing.T) { if objInfo.Size != 0 { t.Fatal("Unexpected object info value for Size", objInfo.Size) } - if !objInfo.ModTime.Equal(time.Time{}) { + if !objInfo.ModTime.Equal(timeSentinel) { t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime) } if objInfo.IsDir { diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index c260e0633a2ad..9856f8e2bf1ba 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -24,7 +24,6 @@ import ( "sort" "strconv" "strings" - "sync" "time" "github.com/minio/minio-go/v7/pkg/set" @@ -92,47 +91,12 @@ func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir stri // Clean-up the old multipart uploads. Should be run in a Go routine. func (er erasureObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) { // run multiple cleanup's local to this server. - var wg sync.WaitGroup for _, disk := range er.getLoadBalancedLocalDisks() { if disk != nil { - wg.Add(1) - go func(disk StorageAPI) { - defer wg.Done() - er.cleanupStaleUploadsOnDisk(ctx, disk, expiry) - }(disk) + er.cleanupStaleUploadsOnDisk(ctx, disk, expiry) + return } } - wg.Wait() -} - -func (er erasureObjects) renameAll(ctx context.Context, bucket, prefix string) { - var wg sync.WaitGroup - for _, disk := range er.getDisks() { - if disk == nil { - continue - } - wg.Add(1) - go func(disk StorageAPI) { - defer wg.Done() - disk.RenameFile(ctx, bucket, prefix, minioMetaTmpBucket, mustGetUUID()) - }(disk) - } - wg.Wait() -} - -func (er erasureObjects) deleteAll(ctx context.Context, bucket, prefix string) { - var wg sync.WaitGroup - for _, disk := range er.getDisks() { - if disk == nil { - continue - } - wg.Add(1) - go func(disk StorageAPI) { - defer wg.Done() - disk.Delete(ctx, bucket, prefix, true) - }(disk) - } - wg.Wait() } // Remove the old multipart uploads on the given disk. @@ -154,7 +118,7 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto continue } if now.Sub(fi.ModTime) > expiry { - er.renameAll(ctx, minioMetaMultipartBucket, uploadIDPath) + er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, fi.Erasure.DataBlocks+1) } } } @@ -163,12 +127,12 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto return } for _, tmpDir := range tmpDirs { - vi, err := disk.StatVol(ctx, pathJoin(minioMetaTmpBucket, tmpDir)) + fi, err := disk.ReadVersion(ctx, minioMetaTmpBucket, tmpDir, "", false) if err != nil { continue } - if now.Sub(vi.Created) > expiry { - er.deleteAll(ctx, minioMetaTmpBucket, tmpDir) + if now.Sub(fi.ModTime) > expiry { + er.deleteObject(ctx, minioMetaTmpBucket, tmpDir, fi.Erasure.DataBlocks+1) } } } diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 487b12f095619..897534cd02303 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -372,14 +372,12 @@ func (er erasureObjects) getObject(ctx context.Context, bucket, object string, s // GetObjectInfo - reads object metadata and replies back ObjectInfo. func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) { - if !opts.NoLock { - // Lock the object before reading. - lk := er.NewNSLock(bucket, object) - if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil { - return ObjectInfo{}, err - } - defer lk.RUnlock() + // Lock the object before reading. + lk := er.NewNSLock(bucket, object) + if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil { + return ObjectInfo{}, err } + defer lk.RUnlock() return er.getObjectInfo(ctx, bucket, object, opts) } diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index d795fc00993b1..5ca9c0e22cc79 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -511,71 +511,16 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object object = encodeDirObject(object) - if z.SinglePool() { - return z.serverPools[0].GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) - } - - var unlockOnDefer bool - var nsUnlocker = func() {} - defer func() { - if unlockOnDefer { - nsUnlocker() - } - }() - - // Acquire lock - if lockType != noLock { - lock := z.NewNSLock(bucket, object) - switch lockType { - case writeLock: - if err = lock.GetLock(ctx, globalOperationTimeout); err != nil { - return nil, err - } - nsUnlocker = lock.Unlock - case readLock: - if err = lock.GetRLock(ctx, globalOperationTimeout); err != nil { - return nil, err - } - nsUnlocker = lock.RUnlock - } - unlockOnDefer = true - } - - errs := make([]error, len(z.serverPools)) - grs := make([]*GetObjectReader, len(z.serverPools)) - - lockType = noLock // do not take locks at lower levels - var wg sync.WaitGroup - for i, pool := range z.serverPools { - wg.Add(1) - go func(i int, pool *erasureSets) { - defer wg.Done() - grs[i], errs[i] = pool.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) - }(i, pool) - } - wg.Wait() - - var found int = -1 - for i, err := range errs { - if err == nil { - found = i - break - } - if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - for _, grr := range grs { - if grr != nil { - grr.Close() - } + for _, pool := range z.serverPools { + gr, err = pool.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) + if err != nil { + if isErrObjectNotFound(err) || isErrVersionNotFound(err) { + continue } return gr, err } + return gr, nil } - - if found >= 0 { - return grs[found], nil - } - - object = decodeDirObject(object) if opts.VersionID != "" { return gr, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID} } @@ -588,6 +533,7 @@ func (z *erasureServerPools) GetObject(ctx context.Context, bucket, object strin } object = encodeDirObject(object) + for _, pool := range z.serverPools { if err := pool.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts); err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { @@ -609,49 +555,16 @@ func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object s } object = encodeDirObject(object) - - if z.SinglePool() { - return z.serverPools[0].GetObjectInfo(ctx, bucket, object, opts) - } - - // Lock the object before reading. - lk := z.NewNSLock(bucket, object) - if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil { - return ObjectInfo{}, err - } - defer lk.RUnlock() - - errs := make([]error, len(z.serverPools)) - objInfos := make([]ObjectInfo, len(z.serverPools)) - - opts.NoLock = true // avoid taking locks at lower levels for multi-pool setups. - var wg sync.WaitGroup - for i, pool := range z.serverPools { - wg.Add(1) - go func(i int, pool *erasureSets) { - defer wg.Done() - objInfos[i], errs[i] = pool.GetObjectInfo(ctx, bucket, object, opts) - }(i, pool) - } - wg.Wait() - - var found int = -1 - for i, err := range errs { - if err == nil { - found = i - break - } - if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - // some errors such as MethodNotAllowed for delete marker - // should be returned upwards. - return objInfos[i], err + for _, pool := range z.serverPools { + objInfo, err = pool.GetObjectInfo(ctx, bucket, object, opts) + if err != nil { + if isErrObjectNotFound(err) || isErrVersionNotFound(err) { + continue + } + return objInfo, err } + return objInfo, nil } - - if found >= 0 { - return objInfos[found], nil - } - object = decodeDirObject(object) if opts.VersionID != "" { return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID} @@ -687,6 +600,7 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob } object = encodeDirObject(object) + if z.SinglePool() { return z.serverPools[0].DeleteObject(ctx, bucket, object, opts) } @@ -1207,24 +1121,22 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, fo // data is not distributed across sets. // Errors are logged but individual disk failures are not returned. func (z *erasureServerPools) deleteAll(ctx context.Context, bucket, prefix string) { + var wg sync.WaitGroup for _, servers := range z.serverPools { for _, set := range servers.sets { - set.deleteAll(ctx, bucket, prefix) - } - } -} - -// renameAll will rename bucket+prefix unconditionally across all disks to -// minioMetaTmpBucket + unique uuid, -// Note that set distribution is ignored so it should only be used in cases where -// data is not distributed across sets. Errors are logged but individual -// disk failures are not returned. -func (z *erasureServerPools) renameAll(ctx context.Context, bucket, prefix string) { - for _, servers := range z.serverPools { - for _, set := range servers.sets { - set.renameAll(ctx, bucket, prefix) + for _, disk := range set.getDisks() { + if disk == nil { + continue + } + wg.Add(1) + go func(disk StorageAPI) { + defer wg.Done() + disk.Delete(ctx, bucket, prefix, true) + }(disk) + } } } + wg.Wait() } // This function is used to undo a successful DeleteBucket operation. @@ -1676,13 +1588,27 @@ func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object s return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts) } - // We don't know the size here set 1GiB atleast. - idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30) - if err != nil { - return ObjectInfo{}, err + for _, pool := range z.serverPools { + objInfo, err := pool.PutObjectTags(ctx, bucket, object, tags, opts) + if err != nil { + if isErrObjectNotFound(err) || isErrVersionNotFound(err) { + continue + } + return ObjectInfo{}, err + } + return objInfo, nil + } + if opts.VersionID != "" { + return ObjectInfo{}, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + return ObjectInfo{}, ObjectNotFound{ + Bucket: bucket, + Object: object, } - - return z.serverPools[idx].PutObjectTags(ctx, bucket, object, tags, opts) } // DeleteObjectTags - delete object tags from an existing object @@ -1691,14 +1617,27 @@ func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, objec if z.SinglePool() { return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts) } - - // We don't know the size here set 1GiB atleast. - idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30) - if err != nil { - return ObjectInfo{}, err + for _, pool := range z.serverPools { + objInfo, err := pool.DeleteObjectTags(ctx, bucket, object, opts) + if err != nil { + if isErrObjectNotFound(err) || isErrVersionNotFound(err) { + continue + } + return ObjectInfo{}, err + } + return objInfo, nil + } + if opts.VersionID != "" { + return ObjectInfo{}, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + return ObjectInfo{}, ObjectNotFound{ + Bucket: bucket, + Object: object, } - - return z.serverPools[idx].DeleteObjectTags(ctx, bucket, object, opts) } // GetObjectTags - get object tags from an existing object @@ -1707,44 +1646,27 @@ func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object s if z.SinglePool() { return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts) } - - // We don't know the size here set 1GiB atleast. - idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30) - if err != nil { - return nil, err - } - - return z.serverPools[idx].GetObjectTags(ctx, bucket, object, opts) -} - -// TransitionObject - transition object content to target tier. -func (z *erasureServerPools) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - object = encodeDirObject(object) - if z.SinglePool() { - return z.serverPools[0].TransitionObject(ctx, bucket, object, opts) - } - // We don't know the size here set 1GiB atleast. - idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30) - if err != nil { - return err + for _, pool := range z.serverPools { + tags, err := pool.GetObjectTags(ctx, bucket, object, opts) + if err != nil { + if isErrObjectNotFound(err) || isErrVersionNotFound(err) { + continue + } + return tags, err + } + return tags, nil } - - return z.serverPools[idx].TransitionObject(ctx, bucket, object, opts) -} - -// RestoreTransitionedObject - restore transitioned object content locally on this cluster. -func (z *erasureServerPools) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - object = encodeDirObject(object) - if z.SinglePool() { - return z.serverPools[0].RestoreTransitionedObject(ctx, bucket, object, opts) + if opts.VersionID != "" { + return nil, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } } - // We don't know the size here set 1GiB atleast. - idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30) - if err != nil { - return err + return nil, ObjectNotFound{ + Bucket: bucket, + Object: object, } - - return z.serverPools[idx].RestoreTransitionedObject(ctx, bucket, object, opts) } // TransitionObject - transition object content to target tier. diff --git a/cmd/healthinfo_linux.go b/cmd/healthinfo_linux.go index 1871591b29ac5..8db9de5e6dd4a 100644 --- a/cmd/healthinfo_linux.go +++ b/cmd/healthinfo_linux.go @@ -37,10 +37,7 @@ func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo { addr = GetLocalPeer(globalEndpoints) } - srvrOsInfo := madmin.ServerOsInfo{Addr: addr} - var err error - - srvrOsInfo.Info, err = host.InfoWithContext(ctx) + info, err := host.InfoWithContext(ctx) if err != nil { return madmin.ServerOsInfo{ Addr: addr, @@ -48,18 +45,23 @@ func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo { } } - srvrOsInfo.Sensors, err = host.SensorsTemperaturesWithContext(ctx) + sensors, err := host.SensorsTemperaturesWithContext(ctx) if err != nil { - // Set error only when it's not of WARNINGS type - if _, isWarning := err.(*host.Warnings); !isWarning { - srvrOsInfo.Error = fmt.Sprintf("sensors-temp: %v", err) + return madmin.ServerOsInfo{ + Addr: addr, + Error: fmt.Sprintf("sensors-temp: %v", err), } } // ignore user err, as it cannot be obtained reliably inside containers - srvrOsInfo.Users, _ = host.UsersWithContext(ctx) + users, _ := host.UsersWithContext(ctx) - return srvrOsInfo + return madmin.ServerOsInfo{ + Addr: addr, + Info: info, + Sensors: sensors, + Users: users, + } } func getLocalDiskHwInfo(ctx context.Context, r *http.Request) madmin.ServerDiskHwInfo { diff --git a/cmd/http/server.go b/cmd/http/server.go index 42bcf664e801e..49c38ad49f2b5 100644 --- a/cmd/http/server.go +++ b/cmd/http/server.go @@ -191,7 +191,7 @@ func NewServer(addrs []string, handler http.Handler, getCert certs.GetCertificat // TLS hardening PreferServerCipherSuites: true, MinVersion: tls.VersionTLS12, - NextProtos: []string{"http/1.1", "h2"}, + NextProtos: []string{"h2", "http/1.1"}, } tlsConfig.GetCertificate = getCert } diff --git a/cmd/metacache-bucket.go b/cmd/metacache-bucket.go index 77fc55b420c4b..779c74ac3f4d0 100644 --- a/cmd/metacache-bucket.go +++ b/cmd/metacache-bucket.go @@ -64,7 +64,7 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache { ez, ok := objAPI.(*erasureServerPools) if ok { ctx := context.Background() - ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(bucket, slashSeparator)) + ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(bucket, slashSeparator)) } } return &bucketMetacache{ @@ -292,7 +292,7 @@ func (b *bucketMetacache) cleanup() { caches, rootIdx := b.cloneCaches() for id, cache := range caches { - if b.transient && time.Since(cache.lastUpdate) > 10*time.Minute && time.Since(cache.lastHandout) > 10*time.Minute { + if b.transient && time.Since(cache.lastUpdate) > 15*time.Minute && time.Since(cache.lastHandout) > 15*time.Minute { // Keep transient caches only for 15 minutes. remove[id] = struct{}{} continue @@ -361,7 +361,7 @@ func (b *bucketMetacache) cleanup() { }) // Keep first metacacheMaxEntries... for _, cache := range remainCaches[metacacheMaxEntries:] { - if time.Since(cache.lastHandout) > 30*time.Minute { + if time.Since(cache.lastHandout) > time.Hour { remove[cache.id] = struct{}{} } } @@ -409,6 +409,7 @@ func (b *bucketMetacache) updateCacheEntry(update metacache) (metacache, error) defer b.mu.Unlock() existing, ok := b.caches[update.id] if !ok { + logger.Info("updateCacheEntry: bucket %s list id %v not found", b.bucket, update.id) return update, errFileNotFound } existing.update(update) @@ -464,7 +465,7 @@ func (b *bucketMetacache) deleteAll() { b.updated = true if !b.transient { // Delete all. - ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(b.bucket, slashSeparator)) + ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(b.bucket, slashSeparator)) b.caches = make(map[string]metacache, 10) b.cachesRoot = make(map[string][]string, 10) return @@ -476,7 +477,7 @@ func (b *bucketMetacache) deleteAll() { wg.Add(1) go func(cache metacache) { defer wg.Done() - ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(cache.bucket, cache.id)) + ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(cache.bucket, cache.id)) }(b.caches[id]) } wg.Wait() diff --git a/cmd/metacache-entries.go b/cmd/metacache-entries.go index c1b525ec5ce8e..a8dccea4b3e20 100644 --- a/cmd/metacache-entries.go +++ b/cmd/metacache-entries.go @@ -330,23 +330,16 @@ func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, aft } fiv, err := entry.fileInfoVersions(bucket) - if err != nil { - continue - } - - fiVersions := fiv.Versions if afterV != "" { - vidMarkerIdx := fiv.findVersionIndex(afterV) - if vidMarkerIdx >= 0 { - fiVersions = fiVersions[vidMarkerIdx+1:] - } + // Forward first entry to specified version + fiv.forwardPastVersion(afterV) afterV = "" } - - for _, version := range fiVersions { - versions = append(versions, version.ToObjectInfo(bucket, entry.name)) + if err == nil { + for _, version := range fiv.Versions { + versions = append(versions, version.ToObjectInfo(bucket, entry.name)) + } } - continue } diff --git a/cmd/metacache-manager.go b/cmd/metacache-manager.go index 480b065708135..5e48b3972a132 100644 --- a/cmd/metacache-manager.go +++ b/cmd/metacache-manager.go @@ -92,6 +92,7 @@ func (m *metacacheManager) initManager() { } m.mu.Unlock() } + m.getTransient().deleteAll() }() } @@ -123,11 +124,11 @@ func (m *metacacheManager) updateCacheEntry(update metacache) (metacache, error) } b, ok := m.buckets[update.bucket] - m.mu.RUnlock() if ok { + m.mu.RUnlock() return b.updateCacheEntry(update) } - + m.mu.RUnlock() // We should have either a trashed bucket or this return metacache{}, errVolumeNotFound } diff --git a/cmd/metacache-server-pool.go b/cmd/metacache-server-pool.go index a29330e2b2e9d..4193176fdb1ec 100644 --- a/cmd/metacache-server-pool.go +++ b/cmd/metacache-server-pool.go @@ -19,9 +19,7 @@ package cmd import ( "context" "errors" - "fmt" "io" - "os" "path" "strings" "sync" @@ -30,24 +28,6 @@ import ( "github.com/minio/minio/cmd/logger" ) -func renameAllBucketMetacache(epPath string) error { - // Rename all previous `.minio.sys/buckets//.metacache` to - // to `.minio.sys/tmp/` for deletion. - return readDirFilterFn(pathJoin(epPath, minioMetaBucket, bucketMetaPrefix), func(name string, typ os.FileMode) error { - if typ == os.ModeDir { - tmpMetacacheOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID()) - if err := renameAll(pathJoin(epPath, minioMetaBucket, metacachePrefixForID(name, slashSeparator)), - tmpMetacacheOld); err != nil && err != errFileNotFound { - return fmt.Errorf("unable to rename (%s -> %s) %w", - pathJoin(epPath, minioMetaBucket+metacachePrefixForID(minioMetaBucket, slashSeparator)), - tmpMetacacheOld, - osErrToFileErr(err)) - } - } - return nil - }) -} - // listPath will return the requested entries. // If no more entries are in the listing io.EOF is returned, // otherwise nil or an unexpected error is returned. diff --git a/cmd/metacache.go b/cmd/metacache.go index 71c4d4413a15a..c33b1862ac5e0 100644 --- a/cmd/metacache.go +++ b/cmd/metacache.go @@ -123,7 +123,7 @@ func (m *metacache) matches(o *listPathOptions, extend time.Duration) bool { } if time.Since(m.lastUpdate) > metacacheMaxRunningAge+extend { // Cache ended within bloom cycle, but we can extend the life. - o.debugf("cache %s ended (%v) and beyond extended life (%v)", m.id, m.lastUpdate, metacacheMaxRunningAge+extend) + o.debugf("cache %s ended (%v) and beyond extended life (%v)", m.id, m.lastUpdate, extend+metacacheMaxRunningAge) return false } } @@ -151,8 +151,8 @@ func (m *metacache) worthKeeping(currentCycle uint64) bool { // Cycle is too old to be valuable. return false case cache.status == scanStateError || cache.status == scanStateNone: - // Remove failed listings after 5 minutes. - return time.Since(cache.lastUpdate) < 5*time.Minute + // Remove failed listings after 10 minutes. + return time.Since(cache.lastUpdate) < 10*time.Minute } return true } @@ -170,9 +170,8 @@ func (m *metacache) canBeReplacedBy(other *metacache) bool { if m.status == scanStateStarted && time.Since(m.lastUpdate) < metacacheMaxRunningAge { return false } - // Keep it around a bit longer. - if time.Since(m.lastHandout) < 30*time.Minute || time.Since(m.lastUpdate) < metacacheMaxRunningAge { + if time.Since(m.lastHandout) < time.Hour || time.Since(m.lastUpdate) < metacacheMaxRunningAge { return false } diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index dad660ee03a3d..d775bc0beab8b 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -258,7 +258,7 @@ func cleanMetadata(metadata map[string]string) map[string]string { // Remove STANDARD StorageClass metadata = removeStandardStorageClass(metadata) // Clean meta etag keys 'md5Sum', 'etag', "expires", "x-amz-tagging". - return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires", xhttp.AmzObjectTagging, "last-modified") + return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires", xhttp.AmzObjectTagging) } // Filter X-Amz-Storage-Class field only if it is set to STANDARD. diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index abdf027ccb641..c93e0555568bc 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -125,8 +125,16 @@ func formatErasureCleanupTmpLocalEndpoints(endpoints Endpoints) error { osErrToFileErr(err)) } - // Renames and schedules for puring all bucket metacache. - renameAllBucketMetacache(epPath) + // Move .minio.sys/buckets/.minio.sys/metacache transient list cache + // folder to speed up startup routines. + tmpMetacacheOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID()) + if err := renameAll(pathJoin(epPath, minioMetaBucket, metacachePrefixForID(minioMetaBucket, "")), + tmpMetacacheOld); err != nil && err != errFileNotFound { + return fmt.Errorf("unable to rename (%s -> %s) %w", + pathJoin(epPath, minioMetaBucket+metacachePrefixForID(minioMetaBucket, "")), + tmpMetacacheOld, + osErrToFileErr(err)) + } // Removal of tmp-old folder is backgrounded completely. go removeAll(pathJoin(epPath, minioMetaTmpBucket+"-old")) diff --git a/cmd/storage-datatypes.go b/cmd/storage-datatypes.go index 722773a760999..a64d473ea10f5 100644 --- a/cmd/storage-datatypes.go +++ b/cmd/storage-datatypes.go @@ -85,18 +85,18 @@ type FileInfoVersions struct { Versions []FileInfo } -// findVersionIndex will return the version index where the version -// was found. Returns -1 if not found. -func (f *FileInfoVersions) findVersionIndex(v string) int { - if f == nil || v == "" { - return -1 +// forwardPastVersion will truncate the result to only contain versions after 'v'. +// If v is empty or the version isn't found no changes will be made. +func (f *FileInfoVersions) forwardPastVersion(v string) { + if v == "" { + return } for i, ver := range f.Versions { if ver.VersionID == v { - return i + f.Versions = f.Versions[i+1:] + return } } - return -1 } // FileInfo - represents file stat information. diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index dcb1eca78eadd..37bb279ce9c5f 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -29,6 +29,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/minio/minio/cmd/http" xhttp "github.com/minio/minio/cmd/http" @@ -119,6 +120,8 @@ type storageRESTClient struct { endpoint Endpoint restClient *rest.Client diskID string + + diskInfoCache timedValue } // Wrapper to restClient.Call to handle network errors, in case of network error the connection is makred disconnected @@ -215,18 +218,27 @@ func (client *storageRESTClient) SetDiskID(id string) { // DiskInfo - fetch disk information for a remote disk. func (client *storageRESTClient) DiskInfo(ctx context.Context) (info DiskInfo, err error) { - respBody, err := client.call(ctx, storageRESTMethodDiskInfo, nil, nil, -1) - if err != nil { - return info, err - } - defer http.DrainBody(respBody) - if err = msgp.Decode(respBody, &info); err != nil { - return info, err - } - if info.Error != "" { - return info, toStorageErr(errors.New(info.Error)) - } - return info, nil + client.diskInfoCache.Once.Do(func() { + client.diskInfoCache.TTL = time.Second + client.diskInfoCache.Update = func() (interface{}, error) { + var info DiskInfo + respBody, err := client.call(ctx, storageRESTMethodDiskInfo, nil, nil, -1) + if err != nil { + return info, err + } + defer http.DrainBody(respBody) + if err = msgp.Decode(respBody, &info); err != nil { + return info, err + } + if info.Error != "" { + return info, toStorageErr(errors.New(info.Error)) + } + return info, nil + } + }) + v, err := client.diskInfoCache.Get() + info = v.(DiskInfo) + return info, err } // MakeVolBulk - create multiple volumes in a bulk operation. diff --git a/cmd/utils.go b/cmd/utils.go index d8499d931ed8c..40cc4c39df100 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -474,26 +474,21 @@ func newInternodeHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) DisableCompression: true, } - // https://github.com/golang/go/issues/23559 - // https://github.com/golang/go/issues/42534 - // https://github.com/golang/go/issues/43989 - // https://github.com/golang/go/issues/33425 - // https://github.com/golang/go/issues/29246 - // if tlsConfig != nil { - // trhttp2, _ := http2.ConfigureTransports(tr) - // if trhttp2 != nil { - // // ReadIdleTimeout is the timeout after which a health check using ping - // // frame will be carried out if no frame is received on the - // // connection. 5 minutes is sufficient time for any idle connection. - // trhttp2.ReadIdleTimeout = 5 * time.Minute - // // PingTimeout is the timeout after which the connection will be closed - // // if a response to Ping is not received. - // trhttp2.PingTimeout = dialTimeout - // // DisableCompression, if true, prevents the Transport from - // // requesting compression with an "Accept-Encoding: gzip" - // trhttp2.DisableCompression = true - // } - // } + if tlsConfig != nil { + trhttp2, _ := http2.ConfigureTransports(tr) + if trhttp2 != nil { + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the + // connection. 5 minutes is sufficient time for any idle connection. + trhttp2.ReadIdleTimeout = 5 * time.Minute + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + trhttp2.PingTimeout = dialTimeout + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + trhttp2.DisableCompression = true + } + } return func() http.RoundTripper { return tr @@ -524,7 +519,7 @@ func newCustomHTTPProxyTransport(tlsConfig *tls.Config, dialTimeout time.Duratio } } -func newCustomHTTPTransportWithHTTP2(tlsConfig *tls.Config, dialTimeout time.Duration) func() *http.Transport { +func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) func() *http.Transport { // For more details about various values used here refer // https://golang.org/pkg/net/http/#Transport documentation tr := &http.Transport{ @@ -532,7 +527,7 @@ func newCustomHTTPTransportWithHTTP2(tlsConfig *tls.Config, dialTimeout time.Dur DialContext: xhttp.DialContextWithDNSCache(globalDNSCache, xhttp.NewInternodeDialContext(dialTimeout)), MaxIdleConnsPerHost: 1024, IdleConnTimeout: 15 * time.Second, - ResponseHeaderTimeout: 1 * time.Minute, + ResponseHeaderTimeout: 3 * time.Minute, // Set conservative timeouts for MinIO internode. TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, @@ -547,7 +542,10 @@ func newCustomHTTPTransportWithHTTP2(tlsConfig *tls.Config, dialTimeout time.Dur if trhttp2 != nil { // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the - // connection. 5 minutes is sufficient time for any idle connection. + // connection. 5 minutes is above maximum sane scrape interval, + // we should not have this small overhead on the scrape connections. + // For other cases, this is used to validate that the connection can + // still be used. trhttp2.ReadIdleTimeout = 5 * time.Minute // PingTimeout is the timeout after which the connection will be closed // if a response to Ping is not received. @@ -563,50 +561,6 @@ func newCustomHTTPTransportWithHTTP2(tlsConfig *tls.Config, dialTimeout time.Dur } } -func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) func() *http.Transport { - // For more details about various values used here refer - // https://golang.org/pkg/net/http/#Transport documentation - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: xhttp.DialContextWithDNSCache(globalDNSCache, xhttp.NewInternodeDialContext(dialTimeout)), - MaxIdleConnsPerHost: 1024, - IdleConnTimeout: 15 * time.Second, - ResponseHeaderTimeout: 3 * time.Minute, // Set conservative timeouts for MinIO internode. - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // Go net/http automatically unzip if content-type is - // gzip disable this feature, as we are always interested - // in raw stream. - DisableCompression: true, - } - - // https://github.com/golang/go/issues/23559 - // https://github.com/golang/go/issues/42534 - // https://github.com/golang/go/issues/43989 - // https://github.com/golang/go/issues/33425 - // https://github.com/golang/go/issues/29246 - // if tlsConfig != nil { - // trhttp2, _ := http2.ConfigureTransports(tr) - // if trhttp2 != nil { - // // ReadIdleTimeout is the timeout after which a health check using ping - // // frame will be carried out if no frame is received on the - // // connection. 5 minutes is sufficient time for any idle connection. - // trhttp2.ReadIdleTimeout = 5 * time.Minute - // // PingTimeout is the timeout after which the connection will be closed - // // if a response to Ping is not received. - // trhttp2.PingTimeout = dialTimeout - // // DisableCompression, if true, prevents the Transport from - // // requesting compression with an "Accept-Encoding: gzip" - // trhttp2.DisableCompression = true - // } - // } - - return func() *http.Transport { - return tr - } -} - // NewGatewayHTTPTransport returns a new http configuration // used while communicating with the cloud backends. func NewGatewayHTTPTransport() *http.Transport { @@ -807,45 +761,38 @@ type timedValue struct { // Managed values. value interface{} lastUpdate time.Time - mu sync.RWMutex + mu sync.Mutex } // Get will return a cached value or fetch a new one. // If the Update function returns an error the value is forwarded as is and not cached. func (t *timedValue) Get() (interface{}, error) { - v := t.get() - if v != nil { - return v, nil + t.mu.Lock() + defer t.mu.Unlock() + if t.TTL <= 0 { + t.TTL = time.Second + } + if t.value != nil { + if time.Since(t.lastUpdate) < t.TTL { + v := t.value + return v, nil + } + t.value = nil } - v, err := t.Update() if err != nil { return v, err } - - t.update(v) + t.value = v + t.lastUpdate = time.Now() return v, nil } -func (t *timedValue) get() (v interface{}) { - ttl := t.TTL - if ttl <= 0 { - ttl = time.Second - } - t.mu.RLock() - defer t.mu.RUnlock() - v = t.value - if time.Since(t.lastUpdate) < ttl { - return v - } - return nil -} - -func (t *timedValue) update(v interface{}) { +// Invalidate the value in the cache. +func (t *timedValue) Invalidate() { t.mu.Lock() - defer t.mu.Unlock() - t.value = v - t.lastUpdate = time.Now() + t.value = nil + t.mu.Unlock() } // On MinIO a directory object is stored as a regular object with "__XLDIR__" suffix. diff --git a/docs/orchestration/docker-compose/docker-compose.yaml b/docs/orchestration/docker-compose/docker-compose.yaml index 4da17ae970e80..1786ce0c28af3 100644 --- a/docs/orchestration/docker-compose/docker-compose.yaml +++ b/docs/orchestration/docker-compose/docker-compose.yaml @@ -5,7 +5,7 @@ version: '3.7' # it through port 9000. services: minio1: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z volumes: - data1-1:/data1 - data1-2:/data2 @@ -22,7 +22,7 @@ services: retries: 3 minio2: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z volumes: - data2-1:/data1 - data2-2:/data2 @@ -39,7 +39,7 @@ services: retries: 3 minio3: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z volumes: - data3-1:/data1 - data3-2:/data2 @@ -56,7 +56,7 @@ services: retries: 3 minio4: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z volumes: - data4-1:/data1 - data4-2:/data2 diff --git a/docs/orchestration/docker-swarm/docker-compose-secrets.yaml b/docs/orchestration/docker-swarm/docker-compose-secrets.yaml index ae70bb9936ed3..7ac663f0d3462 100644 --- a/docs/orchestration/docker-swarm/docker-compose-secrets.yaml +++ b/docs/orchestration/docker-swarm/docker-compose-secrets.yaml @@ -2,7 +2,7 @@ version: '3.7' services: minio1: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio1 volumes: - minio1-data:/export @@ -29,7 +29,7 @@ services: retries: 3 minio2: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio2 volumes: - minio2-data:/export @@ -56,7 +56,7 @@ services: retries: 3 minio3: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio3 volumes: - minio3-data:/export @@ -83,7 +83,7 @@ services: retries: 3 minio4: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio4 volumes: - minio4-data:/export diff --git a/docs/orchestration/docker-swarm/docker-compose.yaml b/docs/orchestration/docker-swarm/docker-compose.yaml index c081d44087a0f..0e1a34c83452a 100644 --- a/docs/orchestration/docker-swarm/docker-compose.yaml +++ b/docs/orchestration/docker-swarm/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3.7' services: minio1: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio1 volumes: - minio1-data:/export @@ -33,7 +33,7 @@ services: retries: 3 minio2: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio2 volumes: - minio2-data:/export @@ -64,7 +64,7 @@ services: retries: 3 minio3: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio3 volumes: - minio3-data:/export @@ -95,7 +95,7 @@ services: retries: 3 minio4: - image: minio/minio:RELEASE.2021-02-14T04-01-33Z + image: minio/minio:RELEASE.2021-02-11T08-23-43Z hostname: minio4 volumes: - minio4-data:/export diff --git a/pkg/bucket/policy/condition/key.go b/pkg/bucket/policy/condition/key.go index 2538e3cb0269e..39f8b554cdad7 100644 --- a/pkg/bucket/policy/condition/key.go +++ b/pkg/bucket/policy/condition/key.go @@ -124,7 +124,6 @@ var AllSupportedKeys = append([]Key{ S3Prefix, S3Delimiter, S3MaxKeys, - S3VersionID, S3ObjectLockRemainingRetentionDays, S3ObjectLockMode, S3ObjectLockLegalHold, @@ -144,8 +143,6 @@ var AllSupportedKeys = append([]Key{ // CommonKeys - is list of all common condition keys. var CommonKeys = append([]Key{ - S3XAmzContentSha256, - S3LocationConstraint, AWSReferer, AWSSourceIP, AWSUserAgent, @@ -155,6 +152,7 @@ var CommonKeys = append([]Key{ AWSPrincipalType, AWSUserID, AWSUsername, + S3XAmzContentSha256, LDAPUser, }, JWTKeys...) @@ -243,13 +241,6 @@ func (set KeySet) Add(key Key) { set[key] = struct{}{} } -// Merge merges two key sets, duplicates are overwritten -func (set KeySet) Merge(mset KeySet) { - for k, v := range mset { - set[k] = v - } -} - // Difference - returns a key set contains difference of two keys. // Example: // keySet1 := ["one", "two", "three"] diff --git a/pkg/iam/policy/action.go b/pkg/iam/policy/action.go index 5f6c3a7f1946d..12a92a207e94c 100644 --- a/pkg/iam/policy/action.go +++ b/pkg/iam/policy/action.go @@ -266,45 +266,23 @@ var supportedObjectActions = map[Action]struct{}{ // isObjectAction - returns whether action is object type or not. func (action Action) isObjectAction() bool { - for supAction := range supportedObjectActions { - if action.Match(supAction) { - return true - } - } - return false + _, ok := supportedObjectActions[action] + return ok } -// Match - matches action name with action patter. +// Match - matches object name with resource pattern. func (action Action) Match(a Action) bool { return wildcard.Match(string(action), string(a)) } // IsValid - checks if action is valid or not. func (action Action) IsValid() bool { - for supAction := range supportedActions { - if action.Match(supAction) { - return true - } - } - return false + _, ok := supportedActions[action] + return ok } -type actionConditionKeyMap map[Action]condition.KeySet - -func (a actionConditionKeyMap) Lookup(action Action) (condition.KeySet, bool) { - var ckeysMerged = condition.KeySet{} - var found bool - for act, ckey := range a { - if action.Match(act) { - ckeysMerged.Merge(ckey) - found = true - } - } - return ckeysMerged, found -} - -// iamActionConditionKeyMap - holds mapping of supported condition key for an action. -var iamActionConditionKeyMap = actionConditionKeyMap{ +// actionConditionKeyMap - holds mapping of supported condition key for an action. +var actionConditionKeyMap = map[Action]condition.KeySet{ AllActions: condition.NewKeySet(condition.AllSupportedKeys...), AbortMultipartUploadAction: condition.NewKeySet(condition.CommonKeys...), @@ -313,6 +291,8 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ DeleteBucketPolicyAction: condition.NewKeySet(condition.CommonKeys...), + DeleteObjectAction: condition.NewKeySet(condition.CommonKeys...), + GetBucketLocationAction: condition.NewKeySet(condition.CommonKeys...), GetBucketNotificationAction: condition.NewKeySet(condition.CommonKeys...), @@ -323,7 +303,6 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ append([]condition.Key{ condition.S3XAmzServerSideEncryption, condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3VersionID, }, condition.CommonKeys...)...), HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), @@ -356,11 +335,6 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ PutBucketPolicyAction: condition.NewKeySet(condition.CommonKeys...), - DeleteObjectAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - PutObjectAction: condition.NewKeySet( append([]condition.Key{ condition.S3XAmzCopySource, @@ -368,7 +342,6 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ condition.S3XAmzServerSideEncryptionCustomerAlgorithm, condition.S3XAmzMetadataDirective, condition.S3XAmzStorageClass, - condition.S3VersionID, condition.S3ObjectLockRetainUntilDate, condition.S3ObjectLockMode, condition.S3ObjectLockLegalHold, @@ -378,32 +351,21 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ // LockLegalHold is not supported with PutObjectRetentionAction PutObjectRetentionAction: condition.NewKeySet( append([]condition.Key{ - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, condition.S3ObjectLockRemainingRetentionDays, condition.S3ObjectLockRetainUntilDate, condition.S3ObjectLockMode, - condition.S3VersionID, - }, condition.CommonKeys...)...), - GetObjectRetentionAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3VersionID, }, condition.CommonKeys...)...), + + GetObjectRetentionAction: condition.NewKeySet(condition.CommonKeys...), PutObjectLegalHoldAction: condition.NewKeySet( append([]condition.Key{ - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, condition.S3ObjectLockLegalHold, - condition.S3VersionID, }, condition.CommonKeys...)...), GetObjectLegalHoldAction: condition.NewKeySet(condition.CommonKeys...), // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html BypassGovernanceRetentionAction: condition.NewKeySet( append([]condition.Key{ - condition.S3VersionID, condition.S3ObjectLockRemainingRetentionDays, condition.S3ObjectLockRetainUntilDate, condition.S3ObjectLockMode, @@ -414,24 +376,11 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ PutBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...), GetBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...), PutBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...), + PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), + GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), + DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), - PutObjectTaggingAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - GetObjectTaggingAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - DeleteObjectTaggingAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - - PutObjectVersionTaggingAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), + PutObjectVersionTaggingAction: condition.NewKeySet(condition.CommonKeys...), GetObjectVersionAction: condition.NewKeySet( append([]condition.Key{ condition.S3VersionID, @@ -448,22 +397,10 @@ var iamActionConditionKeyMap = actionConditionKeyMap{ append([]condition.Key{ condition.S3VersionID, }, condition.CommonKeys...)...), - GetReplicationConfigurationAction: condition.NewKeySet(condition.CommonKeys...), - PutReplicationConfigurationAction: condition.NewKeySet(condition.CommonKeys...), - ReplicateObjectAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - ReplicateDeleteAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - ReplicateTagsAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), - GetObjectVersionForReplicationAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3VersionID, - }, condition.CommonKeys...)...), + GetReplicationConfigurationAction: condition.NewKeySet(condition.CommonKeys...), + PutReplicationConfigurationAction: condition.NewKeySet(condition.CommonKeys...), + ReplicateObjectAction: condition.NewKeySet(condition.CommonKeys...), + ReplicateDeleteAction: condition.NewKeySet(condition.CommonKeys...), + ReplicateTagsAction: condition.NewKeySet(condition.CommonKeys...), + GetObjectVersionForReplicationAction: condition.NewKeySet(condition.CommonKeys...), } diff --git a/pkg/iam/policy/actionset.go b/pkg/iam/policy/actionset.go index ac75d98794827..d780b28af9e24 100644 --- a/pkg/iam/policy/actionset.go +++ b/pkg/iam/policy/actionset.go @@ -43,15 +43,6 @@ func (actionSet ActionSet) Match(action Action) bool { if r.Match(action) { return true } - - // This is a special case where GetObjectVersion - // means GetObject is enabled implicitly. - switch r { - case GetObjectVersionAction: - if action == GetObjectAction { - return true - } - } } return false diff --git a/pkg/iam/policy/policy_test.go b/pkg/iam/policy/policy_test.go index ed41946f116bb..19fe8a120f92e 100644 --- a/pkg/iam/policy/policy_test.go +++ b/pkg/iam/policy/policy_test.go @@ -20,9 +20,7 @@ import ( "encoding/json" "net" "reflect" - "strings" "testing" - "time" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/pkg/bucket/policy" @@ -434,259 +432,6 @@ func TestPolicyIsValid(t *testing.T) { } } -// Parse config with location constraints -func TestPolicyParseConfig(t *testing.T) { - policy1LocationConstraint := `{ - "Version":"2012-10-17", - "Statement":[ - { - "Sid":"statement1", - "Effect":"Allow", - "Action": "s3:CreateBucket", - "Resource": "arn:aws:s3:::*", - "Condition": { - "StringLike": { - "s3:LocationConstraint": "us-east-1" - } - } - }, - { - "Sid":"statement2", - "Effect":"Deny", - "Action": "s3:CreateBucket", - "Resource": "arn:aws:s3:::*", - "Condition": { - "StringNotLike": { - "s3:LocationConstraint": "us-east-1" - } - } - } - ] -}` - policy2Condition := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "statement1", - "Effect": "Allow", - "Action": "s3:GetObjectVersion", - "Resource": "arn:aws:s3:::test/HappyFace.jpg" - }, - { - "Sid": "statement2", - "Effect": "Deny", - "Action": "s3:GetObjectVersion", - "Resource": "arn:aws:s3:::test/HappyFace.jpg", - "Condition": { - "StringNotEquals": { - "s3:versionid": "AaaHbAQitwiL_h47_44lRO2DDfLlBO5e" - } - } - } - ] -}` - - policy3ConditionActionRegex := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "statement2", - "Effect": "Allow", - "Action": "s3:Get*", - "Resource": "arn:aws:s3:::test/HappyFace.jpg", - "Condition": { - "StringEquals": { - "s3:versionid": "AaaHbAQitwiL_h47_44lRO2DDfLlBO5e" - } - } - } - ] -}` - - policy4ConditionAction := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "statement2", - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test/HappyFace.jpg", - "Condition": { - "StringEquals": { - "s3:versionid": "AaaHbAQitwiL_h47_44lRO2DDfLlBO5e" - } - } - } - ] -}` - - policy5ConditionCurrenTime := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:Get*", - "s3:Put*" - ], - "Resource": [ - "arn:aws:s3:::test/*" - ], - "Condition": { - "DateGreaterThan": { - "aws:CurrentTime": [ - "2017-02-28T00:00:00Z" - ] - } - } - } - ] -}` - - policy5ConditionCurrenTimeLesser := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:Get*", - "s3:Put*" - ], - "Resource": [ - "arn:aws:s3:::test/*" - ], - "Condition": { - "DateLessThan": { - "aws:CurrentTime": [ - "2017-02-28T00:00:00Z" - ] - } - } - } - ] -}` - - tests := []struct { - p string - args Args - allowed bool - }{ - { - p: policy1LocationConstraint, - allowed: true, - args: Args{ - AccountName: "allowed", - Action: CreateBucketAction, - BucketName: "test", - ConditionValues: map[string][]string{"LocationConstraint": {"us-east-1"}}, - }, - }, - { - p: policy1LocationConstraint, - allowed: false, - args: Args{ - AccountName: "disallowed", - Action: CreateBucketAction, - BucketName: "test", - ConditionValues: map[string][]string{"LocationConstraint": {"us-east-2"}}, - }, - }, - { - p: policy2Condition, - allowed: true, - args: Args{ - AccountName: "allowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{"versionid": {"AaaHbAQitwiL_h47_44lRO2DDfLlBO5e"}}, - }, - }, - { - p: policy2Condition, - allowed: false, - args: Args{ - AccountName: "disallowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{"versionid": {"AaaHbAQitwiL_h47_44lRO2DDfLlBO5f"}}, - }, - }, - { - p: policy3ConditionActionRegex, - allowed: true, - args: Args{ - AccountName: "allowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{"versionid": {"AaaHbAQitwiL_h47_44lRO2DDfLlBO5e"}}, - }, - }, - { - p: policy3ConditionActionRegex, - allowed: false, - args: Args{ - AccountName: "disallowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{"versionid": {"AaaHbAQitwiL_h47_44lRO2DDfLlBO5f"}}, - }, - }, - { - p: policy4ConditionAction, - allowed: true, - args: Args{ - AccountName: "allowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{"versionid": {"AaaHbAQitwiL_h47_44lRO2DDfLlBO5e"}}, - }, - }, - { - p: policy5ConditionCurrenTime, - allowed: true, - args: Args{ - AccountName: "allowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{ - "CurrentTime": {time.Now().Format(time.RFC3339)}, - }, - }, - }, - { - p: policy5ConditionCurrenTimeLesser, - allowed: false, - args: Args{ - AccountName: "disallowed", - Action: GetObjectAction, - BucketName: "test", - ObjectName: "HappyFace.jpg", - ConditionValues: map[string][]string{ - "CurrentTime": {time.Now().Format(time.RFC3339)}, - }, - }, - }, - } - for _, test := range tests { - test := test - t.Run(test.args.AccountName, func(t *testing.T) { - ip, err := ParseConfig(strings.NewReader(test.p)) - if err != nil { - t.Error(err) - } - if got := ip.IsAllowed(test.args); got != test.allowed { - t.Errorf("Expected %t, got %t", test.allowed, got) - } - }) - } -} - func TestPolicyUnmarshalJSONAndValidate(t *testing.T) { case1Data := []byte(`{ "ID": "MyPolicyForMyBucket1", diff --git a/pkg/iam/policy/statement.go b/pkg/iam/policy/statement.go index 024b197ed3aef..74f8874f3e10b 100644 --- a/pkg/iam/policy/statement.go +++ b/pkg/iam/policy/statement.go @@ -114,13 +114,8 @@ func (statement Statement) isValid() error { return Errorf("unsupported Resource found %v for action %v", statement.Resources, action) } - condKeys, ok := iamActionConditionKeyMap.Lookup(action) - if !ok { - return Errorf("conditions are not supported for action %v", action) - } - keys := statement.Conditions.Keys() - keyDiff := keys.Difference(condKeys) + keyDiff := keys.Difference(actionConditionKeyMap[action]) if !keyDiff.IsEmpty() { return Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action) }