Skip to content

Commit

Permalink
Revert "Rebase to upstream master (#9)"
Browse files Browse the repository at this point in the history
This reverts commit 0fe802d.
  • Loading branch information
poornas authored Feb 16, 2021
1 parent 0fe802d commit e9928a4
Show file tree
Hide file tree
Showing 31 changed files with 271 additions and 787 deletions.
4 changes: 2 additions & 2 deletions Dockerfile.release
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ ARG TARGETARCH
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="RELEASE.2021-02-14T04-01-33Z" \
release="RELEASE.2021-02-14T04-01-33Z" \
version="RELEASE.2021-02-11T08-23-43Z" \
release="RELEASE.2021-02-11T08-23-43Z" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."

Expand Down
3 changes: 2 additions & 1 deletion cmd/admin-router.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// -- IAM APIs --

// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")

// Add user IAM

adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))

adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
Expand Down
6 changes: 0 additions & 6 deletions cmd/bucket-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -427,12 +427,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteObjectsFn = api.CacheAPI().DeleteObjects
}

// Return Malformed XML as S3 spec if the list of objects is empty
if len(deleteObjects.Objects) == 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
return
}

var objectsToDelete = map[ObjectToDelete]int{}
getObjectInfoFn := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
Expand Down
14 changes: 14 additions & 0 deletions cmd/bucket-listobjects-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/minio/minio/cmd/logger"

"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/sync/errgroup"
)

Expand Down Expand Up @@ -294,6 +295,10 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
return proxyRequest(ctx, w, r, ep)
}

func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
}

// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
Expand Down Expand Up @@ -332,6 +337,15 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}

// Forward the request using Source IP or bucket
forwardStr := handlers.GetSourceIPFromHeaders(r)
if forwardStr == "" {
forwardStr = bucket
}
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
return
}

listObjects := objectAPI.ListObjects

// Inititate a list objects operation based on the input params.
Expand Down
11 changes: 2 additions & 9 deletions cmd/config-current.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package cmd

import (
"context"
"crypto/tls"
"fmt"
"strings"
"sync"
Expand Down Expand Up @@ -296,10 +295,7 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
}
}
{
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
&tls.Config{
RootCAs: globalRootCAs,
}, defaultDialTimeout)())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
return err
}
Expand Down Expand Up @@ -475,10 +471,7 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
}
}

kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
&tls.Config{
RootCAs: globalRootCAs,
}, defaultDialTimeout)())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
}
Expand Down
12 changes: 5 additions & 7 deletions cmd/disk-cache-backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
}

// We set file info only if its valid.
o.ModTime = m.Stat.ModTime
o.Size = m.Stat.Size
o.ETag = extractETag(m.Meta)
o.ContentType = m.Meta["content-type"]
Expand All @@ -121,12 +122,6 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
o.Expires = t.UTC()
}
}
if mtime, ok := m.Meta["last-modified"]; ok {
if t, e = time.Parse(http.TimeFormat, mtime); e == nil {
o.ModTime = t.UTC()
}
}

// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of user-defined metadata
o.UserDefined = cleanMetadata(m.Meta)
Expand Down Expand Up @@ -511,7 +506,9 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
}
// get metadata of part.1 if full file has been cached.
partial = true
if _, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)); err == nil {
fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile))
if err == nil {
meta.Stat.ModTime = atime.Get(fi)
partial = false
}
return meta, partial, meta.Hits, nil
Expand Down Expand Up @@ -573,6 +570,7 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
}
}
m.Stat.Size = actualSize
m.Stat.ModTime = UTCNow()
if !incHitsOnly {
// reset meta
m.Meta = meta
Expand Down
1 change: 0 additions & 1 deletion cmd/disk-cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,6 @@ func getMetadata(objInfo ObjectInfo) map[string]string {
if !objInfo.Expires.Equal(timeSentinel) {
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
}
metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat)
for k, v := range objInfo.UserDefined {
metadata[k] = v
}
Expand Down
3 changes: 1 addition & 2 deletions cmd/disk-cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package cmd

import (
"testing"
"time"
)

// Tests ToObjectInfo function.
Expand All @@ -28,7 +27,7 @@ func TestCacheMetadataObjInfo(t *testing.T) {
if objInfo.Size != 0 {
t.Fatal("Unexpected object info value for Size", objInfo.Size)
}
if !objInfo.ModTime.Equal(time.Time{}) {
if !objInfo.ModTime.Equal(timeSentinel) {
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
}
if objInfo.IsDir {
Expand Down
48 changes: 6 additions & 42 deletions cmd/erasure-multipart.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"

"github.com/minio/minio-go/v7/pkg/set"
Expand Down Expand Up @@ -92,47 +91,12 @@ func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir stri
// Clean-up the old multipart uploads. Should be run in a Go routine.
func (er erasureObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) {
// run multiple cleanup's local to this server.
var wg sync.WaitGroup
for _, disk := range er.getLoadBalancedLocalDisks() {
if disk != nil {
wg.Add(1)
go func(disk StorageAPI) {
defer wg.Done()
er.cleanupStaleUploadsOnDisk(ctx, disk, expiry)
}(disk)
er.cleanupStaleUploadsOnDisk(ctx, disk, expiry)
return
}
}
wg.Wait()
}

func (er erasureObjects) renameAll(ctx context.Context, bucket, prefix string) {
var wg sync.WaitGroup
for _, disk := range er.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(disk StorageAPI) {
defer wg.Done()
disk.RenameFile(ctx, bucket, prefix, minioMetaTmpBucket, mustGetUUID())
}(disk)
}
wg.Wait()
}

func (er erasureObjects) deleteAll(ctx context.Context, bucket, prefix string) {
var wg sync.WaitGroup
for _, disk := range er.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(disk StorageAPI) {
defer wg.Done()
disk.Delete(ctx, bucket, prefix, true)
}(disk)
}
wg.Wait()
}

// Remove the old multipart uploads on the given disk.
Expand All @@ -154,7 +118,7 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto
continue
}
if now.Sub(fi.ModTime) > expiry {
er.renameAll(ctx, minioMetaMultipartBucket, uploadIDPath)
er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, fi.Erasure.DataBlocks+1)
}
}
}
Expand All @@ -163,12 +127,12 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto
return
}
for _, tmpDir := range tmpDirs {
vi, err := disk.StatVol(ctx, pathJoin(minioMetaTmpBucket, tmpDir))
fi, err := disk.ReadVersion(ctx, minioMetaTmpBucket, tmpDir, "", false)
if err != nil {
continue
}
if now.Sub(vi.Created) > expiry {
er.deleteAll(ctx, minioMetaTmpBucket, tmpDir)
if now.Sub(fi.ModTime) > expiry {
er.deleteObject(ctx, minioMetaTmpBucket, tmpDir, fi.Erasure.DataBlocks+1)
}
}
}
Expand Down
12 changes: 5 additions & 7 deletions cmd/erasure-object.go
Original file line number Diff line number Diff line change
Expand Up @@ -372,14 +372,12 @@ func (er erasureObjects) getObject(ctx context.Context, bucket, object string, s

// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
if !opts.NoLock {
// Lock the object before reading.
lk := er.NewNSLock(bucket, object)
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
return ObjectInfo{}, err
}
defer lk.RUnlock()
// Lock the object before reading.
lk := er.NewNSLock(bucket, object)
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
return ObjectInfo{}, err
}
defer lk.RUnlock()

return er.getObjectInfo(ctx, bucket, object, opts)
}
Expand Down
Loading

0 comments on commit e9928a4

Please sign in to comment.