From 140c309f90a3d8e777b87870847ec91fc16b771d Mon Sep 17 00:00:00 2001 From: Petu Eusebiu Date: Sat, 17 Jul 2021 06:53:05 +0300 Subject: [PATCH] storage: add s3 backend support (without GC and dedupe) Signed-off-by: Petu Eusebiu --- .github/workflows/ci-cd.yml | 12 + examples/README.md | 13 + examples/config-s3.json | 53 ++ go.mod | 2 + go.sum | 3 +- pkg/api/config/config.go | 2 + pkg/api/controller.go | 40 +- pkg/api/controller_test.go | 126 +++ pkg/storage/s3/s3_test.go | 715 +++++++++++++++ pkg/storage/s3/storage.go | 1094 +++++++++++++++++++++++ pkg/storage/storage_fs.go | 4 +- pkg/storage/storage_fs_test.go | 603 +++++++++++++ pkg/storage/storage_test.go | 1536 ++++++++++++-------------------- 13 files changed, 3218 insertions(+), 985 deletions(-) create mode 100644 examples/config-s3.json create mode 100644 pkg/storage/s3/s3_test.go create mode 100644 pkg/storage/s3/storage.go create mode 100644 pkg/storage/storage_fs_test.go diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 41b891c546..216c424ba4 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -13,6 +13,14 @@ jobs: build-test: name: Build and test ZOT runs-on: ubuntu-latest + services: + s3mock: + image: localstack/localstack-full + env: + SERVICES: s3 + ports: + - 4563-4599:4563-4599 + - 9090:8080 steps: - name: Install go uses: actions/setup-go@v2 @@ -36,6 +44,10 @@ jobs: timeout-minutes: 30 run: | cd $GITHUB_WORKSPACE && make + env: + S3MOCK_ENDPOINT: localhost:4566 + AWS_ACCESS_KEY_ID: fake + AWS_SECRET_ACCESS_KEY: fake - name: Upload code coverage uses: codecov/codecov-action@v1 diff --git a/examples/README.md b/examples/README.md index 00a26ac316..f5c347997a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -245,3 +245,16 @@ Enable audit logs and set output file with: "audit": "/tmp/zot-audit.log" } ``` + +## Storage Drivers + +zot supports multiple storage options, including: +- [s3](https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Service (S3) bucket. +- [azure](https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/azure.md): A driver storing objects in Microsoft Azure Blob Storage. +- [swift](https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/swift.md): A driver storing objects in Openstack Swift. +- [oss](https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/oss.md): A driver storing objects in Aliyun OSS. +- [gcs](https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/gcs.md): A driver storing objects in a Google Cloud Storage bucket. + +For an s3 zot configuration with multiple storage drivers see: [s3-config](config-s3.json). +zot also supports different storage drivers for each subpath. + diff --git a/examples/config-s3.json b/examples/config-s3.json new file mode 100644 index 0000000000..69890e0e87 --- /dev/null +++ b/examples/config-s3.json @@ -0,0 +1,53 @@ +{ + "version": "0.1.0-dev", + "storage": { + "rootDirectory": "/zot", + "storageDriver": { + "name": "s3", + "region": "us-east-2", + "bucket": "zot-storage", + "secure": true, + "skipverify": false + }, + "subPaths": { + "/a": { + "rootDirectory": "/zot-a", + "storageDriver": { + "name": "s3", + "region": "us-east-2", + "bucket": "zot-storage", + "secure": true, + "skipverify": false + } + }, + "/b": { + "rootDirectory": "/zot-b", + "storageDriver": { + "name": "s3", + "region": "us-east-2", + "bucket": "zot-storage", + "secure": true, + "skipverify": false + } + }, + "/c": { + "rootDirectory": "/zot-c", + "storageDriver": { + "name": "s3", + "region": "us-east-2", + "bucket": "zot-storage", + "secure": false, + "skipverify": false + } + } + } + }, + "http": { + "address": "127.0.0.1", + "port": "8080", + "ReadOnly": false + }, + "log": { + "level": "debug" + } +} diff --git a/go.mod b/go.mod index 22c31840b4..628bac7cc9 100644 --- a/go.mod +++ b/go.mod @@ -16,11 +16,13 @@ require ( github.com/containers/common v0.26.0 github.com/containers/image/v5 v5.13.2 github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/docker/distribution v2.7.1+incompatible github.com/dustin/go-humanize v1.0.0 github.com/fsnotify/fsnotify v1.5.1 github.com/getlantern/deepcopy v0.0.0-20160317154340-7f45deb8130a github.com/go-ldap/ldap/v3 v3.4.1 github.com/gofrs/uuid v4.0.0+incompatible + github.com/golang/mock v1.6.0 // indirect github.com/google/go-containerregistry v0.6.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 diff --git a/go.sum b/go.sum index 959ca98dde..903fcccf5f 100644 --- a/go.sum +++ b/go.sum @@ -725,8 +725,9 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/pkg/api/config/config.go b/pkg/api/config/config.go index 2152dbd35f..211232077a 100644 --- a/pkg/api/config/config.go +++ b/pkg/api/config/config.go @@ -20,6 +20,7 @@ type StorageConfig struct { RootDirectory string GC bool Dedupe bool + StorageDriver map[string]interface{} `mapstructure:",omitempty"` } type TLSConfig struct { @@ -80,6 +81,7 @@ type GlobalStorageConfig struct { RootDirectory string Dedupe bool GC bool + StorageDriver map[string]interface{} `mapstructure:",omitempty"` SubPaths map[string]StorageConfig } diff --git a/pkg/api/controller.go b/pkg/api/controller.go index de3971103d..cedefed579 100644 --- a/pkg/api/controller.go +++ b/pkg/api/controller.go @@ -14,8 +14,11 @@ import ( ext "github.com/anuvu/zot/pkg/extensions" "github.com/anuvu/zot/pkg/log" "github.com/anuvu/zot/pkg/storage" + "github.com/anuvu/zot/pkg/storage/s3" "github.com/gorilla/handlers" "github.com/gorilla/mux" + + "github.com/docker/distribution/registry/storage/driver/factory" ) const ( @@ -96,8 +99,23 @@ func (c *Controller) Run() error { } } - defaultStore := storage.NewImageStore(c.Config.Storage.RootDirectory, - c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log) + var defaultStore storage.ImageStore + if len(c.Config.Storage.StorageDriver) == 0 { + defaultStore = storage.NewImageStore(c.Config.Storage.RootDirectory, + c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log) + } else { + storeName := fmt.Sprintf("%v", c.Config.Storage.StorageDriver["name"]) + + // Init a Storager from connection string. + store, err := factory.Create(storeName, c.Config.Storage.StorageDriver) + if err != nil { + c.Log.Error().Err(err).Str("rootDir", c.Config.Storage.RootDirectory).Msg("Unable to create s3 service") + return err + } + + defaultStore = s3.NewImageStore(c.Config.Storage.RootDirectory, + c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log, store) + } c.StoreController.DefaultStore = defaultStore @@ -130,8 +148,22 @@ func (c *Controller) Run() error { } } - subImageStore[route] = storage.NewImageStore(storageConfig.RootDirectory, - storageConfig.GC, storageConfig.Dedupe, c.Log) + if len(storageConfig.StorageDriver) == 0 { + subImageStore[route] = storage.NewImageStore(storageConfig.RootDirectory, + storageConfig.GC, storageConfig.Dedupe, c.Log) + } else { + storeName := fmt.Sprintf("%v", storageConfig.StorageDriver["name"]) + + // Init a Storager from connection string. + store, err := factory.Create(storeName, storageConfig.StorageDriver) + if err != nil { + c.Log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service") + return err + } + + subImageStore[route] = s3.NewImageStore(storageConfig.RootDirectory, + storageConfig.GC, storageConfig.Dedupe, c.Log, store) + } // Enable extensions if extension config is provided if c.Config != nil && c.Config.Extensions != nil { diff --git a/pkg/api/controller_test.go b/pkg/api/controller_test.go index 9ff0bdf69e..e0c4fecd35 100644 --- a/pkg/api/controller_test.go +++ b/pkg/api/controller_test.go @@ -123,6 +123,12 @@ func getCredString(username, password string) string { return usernameAndHash } +func skipIt(t *testing.T) { + if os.Getenv("S3MOCK_ENDPOINT") == "" { + t.Skip("Skipping testing without AWS S3 mock server") + } +} + func TestNew(t *testing.T) { Convey("Make a new controller", t, func() { conf := config.New() @@ -131,6 +137,126 @@ func TestNew(t *testing.T) { }) } +func TestObjectStorageController(t *testing.T) { + skipIt(t) + Convey("Negative make a new object storage controller", t, func() { + port := getFreePort() + conf := config.New() + conf.HTTP.Port = port + storageDriverParams := map[string]interface{}{ + "rootDir": "zot", + "name": "s3", + } + conf.Storage.StorageDriver = storageDriverParams + c := api.NewController(conf) + So(c, ShouldNotBeNil) + + c.Config.Storage.RootDirectory = "zot" + + err := c.Run() + So(err, ShouldNotBeNil) + }) + + Convey("Make a new object storage controller", t, func() { + port := getFreePort() + baseURL := getBaseURL(port, false) + conf := config.New() + conf.HTTP.Port = port + + bucket := "zot-storage-test" + endpoint := os.Getenv("S3MOCK_ENDPOINT") + + storageDriverParams := map[string]interface{}{ + "rootDir": "zot", + "name": "s3", + "region": "us-east-2", + "bucket": bucket, + "regionendpoint": endpoint, + "secure": false, + "skipverify": false, + } + conf.Storage.StorageDriver = storageDriverParams + c := api.NewController(conf) + So(c, ShouldNotBeNil) + + c.Config.Storage.RootDirectory = "/" + + go func(controller *api.Controller) { + // this blocks + if err := controller.Run(); err != nil { + return + } + }(c) + + // wait till ready + for { + _, err := resty.R().Get(baseURL) + if err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + defer func(controller *api.Controller) { + ctx := context.Background() + _ = controller.Server.Shutdown(ctx) + }(c) + }) +} + +func TestObjectStorageControllerSubPaths(t *testing.T) { + skipIt(t) + Convey("Make a new object storage controller", t, func() { + port := getFreePort() + baseURL := getBaseURL(port, false) + conf := config.New() + conf.HTTP.Port = port + + bucket := "zot-storage-test" + endpoint := os.Getenv("S3MOCK_ENDPOINT") + + storageDriverParams := map[string]interface{}{ + "rootDir": "zot", + "name": "s3", + "region": "us-east-2", + "bucket": bucket, + "regionendpoint": endpoint, + "secure": false, + "skipverify": false, + } + conf.Storage.StorageDriver = storageDriverParams + c := api.NewController(conf) + So(c, ShouldNotBeNil) + + c.Config.Storage.RootDirectory = "zot" + subPathMap := make(map[string]config.StorageConfig) + subPathMap["/a"] = config.StorageConfig{ + RootDirectory: "/a", + StorageDriver: storageDriverParams, + } + c.Config.Storage.SubPaths = subPathMap + + go func(controller *api.Controller) { + // this blocks + if err := controller.Run(); err != nil { + return + } + }(c) + + for { + _, err := resty.R().Get(baseURL) + if err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + defer func(controller *api.Controller) { + ctx := context.Background() + _ = controller.Server.Shutdown(ctx) + }(c) + }) +} + func TestHtpasswdSingleCred(t *testing.T) { Convey("Single cred", t, func() { port := getFreePort() diff --git a/pkg/storage/s3/s3_test.go b/pkg/storage/s3/s3_test.go new file mode 100644 index 0000000000..73d0c8efe3 --- /dev/null +++ b/pkg/storage/s3/s3_test.go @@ -0,0 +1,715 @@ +package s3_test + +import ( + "bytes" + "context" + _ "crypto/sha256" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" + "time" + + godigest "github.com/opencontainers/go-digest" + //"strings" + + "testing" + + "github.com/anuvu/zot/pkg/log" + "github.com/anuvu/zot/pkg/storage" + "github.com/anuvu/zot/pkg/storage/s3" + guuid "github.com/gofrs/uuid" + "github.com/rs/zerolog" + . "github.com/smartystreets/goconvey/convey" + + // Add s3 support + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" + + "gopkg.in/resty.v1" +) + +// nolint: gochecknoglobals +var ( + testImage = "test" + fileWriterSize = 12 + fileInfoSize = 10 + errorText = "new s3 error" + errS3 = errors.New(errorText) +) + +func cleanupStorage(store storageDriver.StorageDriver, name string) { + _ = store.Delete(context.Background(), name) +} + +func skipIt(t *testing.T) { + if os.Getenv("S3MOCK_ENDPOINT") == "" { + t.Skip("Skipping testing without AWS S3 mock server") + } +} + +func createMockStorage(rootDir string, store storageDriver.StorageDriver) storage.ImageStore { + il := s3.NewImageStore(rootDir, false, false, log.Logger{Logger: zerolog.New(os.Stdout)}, store) + + return il +} + +func createObjectsStore(rootDir string) (storageDriver.StorageDriver, storage.ImageStore, error) { + bucket := "zot-storage-test" + endpoint := os.Getenv("S3MOCK_ENDPOINT") + storageDriverParams := map[string]interface{}{ + "rootDir": rootDir, + "name": "s3", + "region": "us-east-2", + "bucket": bucket, + "regionendpoint": endpoint, + "secure": false, + "skipverify": false, + } + + storeName := fmt.Sprintf("%v", storageDriverParams["name"]) + + store, err := factory.Create(storeName, storageDriverParams) + if err != nil { + panic(err) + } + + // create bucket if it doesn't exists + _, err = resty.R().Put("http://" + endpoint + "/" + bucket) + if err != nil { + panic(err) + } + + il := s3.NewImageStore(rootDir, false, false, log.Logger{Logger: zerolog.New(os.Stdout)}, store) + + return store, il, err +} + +type FileInfoMock struct { + isDirFn func() bool +} + +func (f *FileInfoMock) Path() string { + return "" +} + +func (f *FileInfoMock) Size() int64 { + return int64(fileInfoSize) +} + +func (f *FileInfoMock) ModTime() time.Time { + return time.Now() +} + +func (f *FileInfoMock) IsDir() bool { + if f != nil && f.isDirFn != nil { + return f.isDirFn() + } + + return true +} + +type FileWriterMock struct { + writeFn func([]byte) (int, error) + cancelFn func() error + commitFn func() error + closeFn func() error +} + +func (f *FileWriterMock) Size() int64 { + return int64(fileWriterSize) +} + +func (f *FileWriterMock) Cancel() error { + if f != nil && f.cancelFn != nil { + return f.cancelFn() + } + + return nil +} + +func (f *FileWriterMock) Commit() error { + if f != nil && f.commitFn != nil { + return f.commitFn() + } + + return nil +} + +func (f *FileWriterMock) Write(p []byte) (int, error) { + if f != nil && f.writeFn != nil { + return f.writeFn(p) + } + + return 10, nil +} + +func (f *FileWriterMock) Close() error { + if f != nil && f.closeFn != nil { + return f.closeFn() + } + + return nil +} + +type StorageDriverMock struct { + nameFn func() string + getContentFn func(ctx context.Context, path string) ([]byte, error) + putContentFn func(ctx context.Context, path string, content []byte) error + readerFn func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + writerFn func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) + statFn func(ctx context.Context, path string) (storageDriver.FileInfo, error) + listFn func(ctx context.Context, path string) ([]string, error) + moveFn func(ctx context.Context, sourcePath string, destPath string) error + deleteFn func(ctx context.Context, path string) error + walkFn func(ctx context.Context, path string, f storageDriver.WalkFn) error +} + +func (s *StorageDriverMock) Name() string { + if s != nil && s.nameFn != nil { + return s.nameFn() + } + + return "" +} + +func (s *StorageDriverMock) GetContent(ctx context.Context, path string) ([]byte, error) { + if s != nil && s.getContentFn != nil { + return s.getContentFn(ctx, path) + } + + return []byte{}, nil +} + +func (s *StorageDriverMock) PutContent(ctx context.Context, path string, content []byte) error { + if s != nil && s.putContentFn != nil { + return s.putContentFn(ctx, path, content) + } + + return nil +} + +func (s *StorageDriverMock) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + if s != nil && s.readerFn != nil { + return s.readerFn(ctx, path, offset) + } + + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (s *StorageDriverMock) Writer(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + if s != nil && s.writerFn != nil { + return s.writerFn(ctx, path, append) + } + + return &FileWriterMock{}, nil +} + +func (s *StorageDriverMock) Stat(ctx context.Context, path string) (storageDriver.FileInfo, error) { + if s != nil && s.statFn != nil { + return s.statFn(ctx, path) + } + + return &FileInfoMock{}, nil +} + +func (s *StorageDriverMock) List(ctx context.Context, path string) ([]string, error) { + if s != nil && s.listFn != nil { + return s.listFn(ctx, path) + } + + return []string{"a"}, nil +} + +func (s *StorageDriverMock) Move(ctx context.Context, sourcePath string, destPath string) error { + if s != nil && s.moveFn != nil { + return s.moveFn(ctx, sourcePath, destPath) + } + + return nil +} + +func (s *StorageDriverMock) Delete(ctx context.Context, path string) error { + if s != nil && s.deleteFn != nil { + return s.deleteFn(ctx, path) + } + + return nil +} + +func (s *StorageDriverMock) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", nil +} + +func (s *StorageDriverMock) Walk(ctx context.Context, path string, f storageDriver.WalkFn) error { + if s != nil && s.walkFn != nil { + return s.walkFn(ctx, path, f) + } + + return nil +} + +func TestNegativeCasesObjectsStorage(t *testing.T) { + skipIt(t) + + uuid, err := guuid.NewV4() + if err != nil { + panic(err) + } + + testDir := path.Join("/oci-repo-test", uuid.String()) + + store, il, _ := createObjectsStore(testDir) + defer cleanupStorage(store, testDir) + + Convey("Invalid validate repo", t, func(c C) { + So(il, ShouldNotBeNil) + So(il.InitRepo(testImage), ShouldBeNil) + objects, err := store.List(context.Background(), path.Join(il.RootDir(), testImage)) + So(err, ShouldBeNil) + for _, object := range objects { + t.Logf("Removing object: %s", object) + err := store.Delete(context.Background(), object) + So(err, ShouldBeNil) + } + _, err = il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + _, err = il.GetRepositories() + So(err, ShouldBeNil) + }) + + Convey("Invalid get image tags", t, func(c C) { + store, il, err := createObjectsStore(testDir) + defer cleanupStorage(store, testDir) + So(err, ShouldBeNil) + So(il.InitRepo(testImage), ShouldBeNil) + + So(store.Move(context.Background(), path.Join(testDir, testImage, "index.json"), + path.Join(testDir, testImage, "blobs")), ShouldBeNil) + ok, _ := il.ValidateRepo(testImage) + So(ok, ShouldBeFalse) + _, err = il.GetImageTags(testImage) + So(err, ShouldNotBeNil) + + So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil) + + So(il.InitRepo(testImage), ShouldBeNil) + So(store.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil) + _, err = il.GetImageTags(testImage) + So(err, ShouldNotBeNil) + }) + + Convey("Invalid get image manifest", t, func(c C) { + store, il, err := createObjectsStore(testDir) + defer cleanupStorage(store, testDir) + So(err, ShouldBeNil) + So(il, ShouldNotBeNil) + So(il.InitRepo(testImage), ShouldBeNil) + So(store.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil) + _, _, _, err = il.GetImageManifest(testImage, "") + So(err, ShouldNotBeNil) + So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil) + So(il.InitRepo(testImage), ShouldBeNil) + So(store.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil) + _, _, _, err = il.GetImageManifest(testImage, "") + So(err, ShouldNotBeNil) + }) + + Convey("Invalid validate repo", t, func(c C) { + store, il, err := createObjectsStore(testDir) + defer cleanupStorage(store, testDir) + So(err, ShouldBeNil) + So(il, ShouldNotBeNil) + + So(il.InitRepo(testImage), ShouldBeNil) + So(store.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil) + _, err = il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil) + So(il.InitRepo(testImage), ShouldBeNil) + So(store.Move(context.Background(), path.Join(testDir, testImage, "index.json"), + path.Join(testDir, testImage, "_index.json")), ShouldBeNil) + ok, err := il.ValidateRepo(testImage) + So(err, ShouldBeNil) + So(ok, ShouldBeFalse) + }) + + Convey("Invalid finish blob upload", t, func(c C) { + store, il, err := createObjectsStore(testDir) + defer cleanupStorage(store, testDir) + So(err, ShouldBeNil) + So(il, ShouldNotBeNil) + + So(il.InitRepo(testImage), ShouldBeNil) + v, err := il.NewBlobUpload(testImage) + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + content := []byte("test-data1") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + + b, err := il.PutBlobChunk(testImage, v, 0, int64(l), buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + src := il.BlobUploadPath(testImage, v) + fw, err := store.Writer(context.Background(), src, true) + So(err, ShouldBeNil) + + _, err = fw.Write([]byte("another-chunk-of-data")) + So(err, ShouldBeNil) + + err = fw.Close() + So(err, ShouldBeNil) + + err = il.FinishBlobUpload(testImage, v, buf, d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test storage driver errors", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + listFn: func(ctx context.Context, path string) ([]string, error) { + return []string{testImage}, errS3 + }, + moveFn: func(ctx context.Context, sourcePath, destPath string) error { + return errS3 + }, + getContentFn: func(ctx context.Context, path string) ([]byte, error) { + return []byte{}, errS3 + }, + putContentFn: func(ctx context.Context, path string, content []byte) error { + return errS3 + }, + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{}, errS3 + }, + readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), errS3 + }, + walkFn: func(ctx context.Context, path string, f storageDriver.WalkFn) error { + return errS3 + }, + statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) { + return &FileInfoMock{}, errS3 + }, + deleteFn: func(ctx context.Context, path string) error { + return errS3 + }, + }) + So(il, ShouldNotBeNil) + + So(il.InitRepo(testImage), ShouldNotBeNil) + _, err := il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + + v, err := il.NewBlobUpload(testImage) + So(err, ShouldNotBeNil) + + content := []byte("test-data1") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + + _, err = il.PutBlobChunk(testImage, v, 0, int64(l), buf) + So(err, ShouldNotBeNil) + + err = il.FinishBlobUpload(testImage, v, buf, d.String()) + So(err, ShouldNotBeNil) + + err = il.DeleteBlob(testImage, d.String()) + So(err, ShouldNotBeNil) + + err = il.DeleteBlobUpload(testImage, v) + So(err, ShouldNotBeNil) + + err = il.DeleteImageManifest(testImage, "1.0") + So(err, ShouldNotBeNil) + + _, err = il.PutImageManifest(testImage, "1.0", "application/json", []byte{}) + So(err, ShouldNotBeNil) + + _, err = il.PutBlobChunkStreamed(testImage, v, bytes.NewBuffer([]byte(testImage))) + So(err, ShouldNotBeNil) + + _, _, err = il.FullBlobUpload(testImage, bytes.NewBuffer([]byte{}), "inexistent") + So(err, ShouldNotBeNil) + + _, _, err = il.CheckBlob(testImage, d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test ValidateRepo", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + listFn: func(ctx context.Context, path string) ([]string, error) { + return []string{testImage, testImage}, errS3 + }, + }) + _, err := il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + }) + + Convey("Test ValidateRepo2", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + listFn: func(ctx context.Context, path string) ([]string, error) { + return []string{"test/test/oci-layout", "test/test/index.json"}, nil + }, + statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) { + return &FileInfoMock{}, nil + }, + }) + _, err := il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + }) + + Convey("Test ValidateRepo3", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + listFn: func(ctx context.Context, path string) ([]string, error) { + return []string{"test/test/oci-layout", "test/test/index.json"}, nil + }, + statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) { + return &FileInfoMock{}, nil + }, + getContentFn: func(ctx context.Context, path string) ([]byte, error) { + return []byte{}, errS3 + }, + }) + _, err := il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + }) + + Convey("Test ValidateRepo4", t, func(c C) { + ociLayout := []byte(`{"imageLayoutVersion": "9.9.9"}`) + il = createMockStorage(testDir, &StorageDriverMock{ + listFn: func(ctx context.Context, path string) ([]string, error) { + return []string{"test/test/oci-layout", "test/test/index.json"}, nil + }, + statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) { + return &FileInfoMock{}, nil + }, + getContentFn: func(ctx context.Context, path string) ([]byte, error) { + return ociLayout, nil + }, + }) + _, err := il.ValidateRepo(testImage) + So(err, ShouldNotBeNil) + }) + + Convey("Test GetRepositories", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + walkFn: func(ctx context.Context, path string, f storageDriver.WalkFn) error { + return f(new(FileInfoMock)) + }, + }) + repos, err := il.GetRepositories() + So(repos, ShouldBeEmpty) + So(err, ShouldBeNil) + }) + + Convey("Test DeleteImageManifest", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + getContentFn: func(ctx context.Context, path string) ([]byte, error) { + return []byte{}, errS3 + }, + }) + err := il.DeleteImageManifest(testImage, "1.0") + So(err, ShouldNotBeNil) + }) + + Convey("Test DeleteImageManifest2", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{}) + err := il.DeleteImageManifest(testImage, "1.0") + So(err, ShouldNotBeNil) + }) + + Convey("Test NewBlobUpload", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + putContentFn: func(ctx context.Context, path string, content []byte) error { + return errS3 + }, + }) + _, err := il.NewBlobUpload(testImage) + So(err, ShouldNotBeNil) + }) + + Convey("Test GetBlobUpload", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) { + return &FileInfoMock{}, errS3 + }, + }) + _, err := il.GetBlobUpload(testImage, "uuid") + So(err, ShouldNotBeNil) + }) + + Convey("Test PutBlobChunkStreamed", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{}, errS3 + }, + }) + _, err := il.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader(""))) + So(err, ShouldNotBeNil) + }) + + Convey("Test PutBlobChunkStreamed2", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{writeFn: func(b []byte) (int, error) { + return 0, errS3 + }}, nil + }, + }) + _, err := il.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader(""))) + So(err, ShouldNotBeNil) + }) + + Convey("Test PutBlobChunk", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{}, errS3 + }, + }) + _, err := il.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader(""))) + So(err, ShouldNotBeNil) + }) + + Convey("Test PutBlobChunk2", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{ + writeFn: func(b []byte) (int, error) { + return 0, errS3 + }, + cancelFn: func() error { + return errS3 + }, + }, nil + }, + }) + _, err := il.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader(""))) + So(err, ShouldNotBeNil) + }) + + Convey("Test PutBlobChunk3", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{ + writeFn: func(b []byte) (int, error) { + return 0, errS3 + }, + }, nil + }, + }) + _, err := il.PutBlobChunk(testImage, "uuid", 12, 100, ioutil.NopCloser(strings.NewReader(""))) + So(err, ShouldNotBeNil) + }) + + Convey("Test FinishBlobUpload", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{ + commitFn: func() error { + return errS3 + }, + }, nil + }, + }) + d := godigest.FromBytes([]byte("test")) + err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test FinishBlobUpload2", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{ + closeFn: func() error { + return errS3 + }, + }, nil + }, + }) + d := godigest.FromBytes([]byte("test")) + err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test FinishBlobUpload3", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + return nil, errS3 + }, + }) + d := godigest.FromBytes([]byte("test")) + err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test FinishBlobUpload4", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + moveFn: func(ctx context.Context, sourcePath, destPath string) error { + return errS3 + }, + }) + d := godigest.FromBytes([]byte("")) + err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test FullBlobUpload", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) { + return &FileWriterMock{}, errS3 + }, + }) + d := godigest.FromBytes([]byte("")) + _, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test FullBlobUpload2", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{}) + d := godigest.FromBytes([]byte(" ")) + _, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test FullBlobUpload3", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + moveFn: func(ctx context.Context, sourcePath, destPath string) error { + return errS3 + }, + }) + d := godigest.FromBytes([]byte("")) + _, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Test GetBlob", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), errS3 + }, + }) + d := godigest.FromBytes([]byte("")) + _, _, err := il.GetBlob(testImage, d.String(), "") + So(err, ShouldNotBeNil) + }) + + Convey("Test DeleteBlob", t, func(c C) { + il = createMockStorage(testDir, &StorageDriverMock{ + deleteFn: func(ctx context.Context, path string) error { + return errS3 + }, + }) + d := godigest.FromBytes([]byte("")) + err := il.DeleteBlob(testImage, d.String()) + So(err, ShouldNotBeNil) + }) +} diff --git a/pkg/storage/s3/storage.go b/pkg/storage/s3/storage.go new file mode 100644 index 0000000000..6f9ba5cb96 --- /dev/null +++ b/pkg/storage/s3/storage.go @@ -0,0 +1,1094 @@ +package s3 + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "path" + "path/filepath" + "strings" + "sync" + + "github.com/anuvu/zot/errors" + zlog "github.com/anuvu/zot/pkg/log" + "github.com/anuvu/zot/pkg/storage" + guuid "github.com/gofrs/uuid" + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog" + + // Add s3 support + storageDriver "github.com/docker/distribution/registry/storage/driver" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" // Load s3 driver +) + +// ObjectStorage provides the image storage operations. +type ObjectStorage struct { + rootDir string + store storageDriver.StorageDriver + lock *sync.RWMutex + blobUploads map[string]storage.BlobUpload + log zerolog.Logger + // We must keep track of multi part uploads to s3, because the lib + // which we are using doesn't cancel multiparts uploads + // see: https://github.com/distribution/distribution/blob/main/registry/storage/driver/s3-aws/s3.go#L545 + isMultiPartUpload map[string]bool +} + +func (is *ObjectStorage) RootDir() string { + return is.rootDir +} + +func (is *ObjectStorage) DirExists(d string) bool { + if fi, err := is.store.Stat(context.Background(), d); err == nil && fi.IsDir() { + return true + } + + return false +} + +// NewObjectStorage returns a new image store backed by cloud storages. +// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers +func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger, + store storageDriver.StorageDriver) storage.ImageStore { + is := &ObjectStorage{ + rootDir: rootDir, + store: store, + lock: &sync.RWMutex{}, + blobUploads: make(map[string]storage.BlobUpload), + log: log.With().Caller().Logger(), + isMultiPartUpload: make(map[string]bool), + } + + return is +} + +// RLock read-lock. +func (is *ObjectStorage) RLock() { + is.lock.RLock() +} + +// RUnlock read-unlock. +func (is *ObjectStorage) RUnlock() { + is.lock.RUnlock() +} + +// Lock write-lock. +func (is *ObjectStorage) Lock() { + is.lock.Lock() +} + +// Unlock write-unlock. +func (is *ObjectStorage) Unlock() { + is.lock.Unlock() +} + +func (is *ObjectStorage) initRepo(name string) error { + repoDir := path.Join(is.rootDir, name) + + if fi, err := is.store.Stat(context.Background(), repoDir); err == nil && fi.IsDir() { + return nil + } + + // "oci-layout" file - create if it doesn't exist + ilPath := path.Join(repoDir, ispec.ImageLayoutFile) + if _, err := is.store.Stat(context.Background(), ilPath); err != nil { + il := ispec.ImageLayout{Version: ispec.ImageLayoutVersion} + buf, err := json.Marshal(il) + + if err != nil { + is.log.Error().Err(err).Msg("unable to marshal JSON") + return err + } + + if _, err := writeFile(is.store, ilPath, buf); err != nil { + is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + return err + } + } + + // "index.json" file - create if it doesn't exist + indexPath := path.Join(repoDir, "index.json") + if _, err := is.store.Stat(context.Background(), indexPath); err != nil { + index := ispec.Index{} + index.SchemaVersion = 2 + buf, err := json.Marshal(index) + + if err != nil { + is.log.Error().Err(err).Msg("unable to marshal JSON") + return err + } + + if _, err := writeFile(is.store, indexPath, buf); err != nil { + is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + return err + } + } + + return nil +} + +// InitRepo creates an image repository under this store. +func (is *ObjectStorage) InitRepo(name string) error { + is.Lock() + defer is.Unlock() + + return is.initRepo(name) +} + +// ValidateRepo validates that the repository layout is complaint with the OCI repo layout. +func (is *ObjectStorage) ValidateRepo(name string) (bool, error) { + // https://github.com/opencontainers/image-spec/blob/master/image-layout.md#content + // at least, expect at least 3 entries - ["blobs", "oci-layout", "index.json"] + // and an additional/optional BlobUploadDir in each image store + // for objects storage we can not create empty dirs, so we check only against index.json and oci-layout + dir := path.Join(is.rootDir, name) + if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() { + return false, errors.ErrRepoNotFound + } + + files, err := is.store.List(context.Background(), dir) + if err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory") + return false, errors.ErrRepoNotFound + } + + // nolint:gomnd + if len(files) < 2 { + return false, errors.ErrRepoBadVersion + } + + found := map[string]bool{ + ispec.ImageLayoutFile: false, + "index.json": false, + } + + for _, file := range files { + f, err := is.store.Stat(context.Background(), file) + if err != nil { + return false, err + } + + if strings.HasSuffix(file, "blobs") && !f.IsDir() { + return false, nil + } + + filename, err := filepath.Rel(dir, file) + if err != nil { + return false, err + } + + found[filename] = true + } + + for k, v := range found { + if !v && k != storage.BlobUploadDir { + return false, nil + } + } + + buf, err := is.store.GetContent(context.Background(), path.Join(dir, ispec.ImageLayoutFile)) + if err != nil { + return false, err + } + + var il ispec.ImageLayout + if err := json.Unmarshal(buf, &il); err != nil { + return false, err + } + + if il.Version != ispec.ImageLayoutVersion { + return false, errors.ErrRepoBadVersion + } + + return true, nil +} + +// GetRepositories returns a list of all the repositories under this store. +func (is *ObjectStorage) GetRepositories() ([]string, error) { + dir := is.rootDir + + is.RLock() + defer is.RUnlock() + + stores := make([]string, 0) + err := is.store.Walk(context.Background(), dir, func(fileInfo storageDriver.FileInfo) error { + if !fileInfo.IsDir() { + return nil + } + + rel, err := filepath.Rel(is.rootDir, fileInfo.Path()) + if err != nil { + return nil + } + + if ok, err := is.ValidateRepo(rel); !ok || err != nil { + return nil + } + + stores = append(stores, rel) + + return nil + }) + + // if the root directory is not yet created then return an empty slice of repositories + _, ok := err.(storageDriver.PathNotFoundError) + if ok { + return stores, nil + } + + return stores, err +} + +// GetImageTags returns a list of image tags available in the specified repository. +func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) { + dir := path.Join(is.rootDir, repo) + if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() { + return nil, errors.ErrRepoNotFound + } + + is.RLock() + defer is.RUnlock() + + buf, err := is.GetIndexContent(repo) + if err != nil { + return nil, err + } + + var index ispec.Index + if err := json.Unmarshal(buf, &index); err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON") + return nil, errors.ErrRepoNotFound + } + + tags := make([]string, 0) + + for _, manifest := range index.Manifests { + v, ok := manifest.Annotations[ispec.AnnotationRefName] + if ok { + tags = append(tags, v) + } + } + + return tags, nil +} + +// GetImageManifest returns the image manifest of an image in the specific repository. +func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte, string, string, error) { + dir := path.Join(is.rootDir, repo) + if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() { + return nil, "", "", errors.ErrRepoNotFound + } + + is.RLock() + defer is.RUnlock() + + buf, err := is.GetIndexContent(repo) + if err != nil { + return nil, "", "", err + } + + var index ispec.Index + if err := json.Unmarshal(buf, &index); err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON") + return nil, "", "", err + } + + found := false + + var digest godigest.Digest + + mediaType := "" + + for _, m := range index.Manifests { + if reference == m.Digest.String() { + digest = m.Digest + mediaType = m.MediaType + found = true + + break + } + + v, ok := m.Annotations[ispec.AnnotationRefName] + if ok && v == reference { + digest = m.Digest + mediaType = m.MediaType + found = true + + break + } + } + + if !found { + return nil, "", "", errors.ErrManifestNotFound + } + + p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded()) + + buf, err = is.store.GetContent(context.Background(), p) + if err != nil { + is.log.Error().Err(err).Str("blob", p).Msg("failed to read manifest") + return nil, "", "", err + } + + var manifest ispec.Manifest + if err := json.Unmarshal(buf, &manifest); err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON") + return nil, "", "", err + } + + return buf, digest.String(), mediaType, nil +} + +// PutImageManifest adds an image manifest to the repository. +func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaType string, + body []byte) (string, error) { + if err := is.InitRepo(repo); err != nil { + is.log.Debug().Err(err).Msg("init repo") + return "", err + } + + if mediaType != ispec.MediaTypeImageManifest { + is.log.Debug().Interface("actual", mediaType). + Interface("expected", ispec.MediaTypeImageManifest).Msg("bad manifest media type") + return "", errors.ErrBadManifest + } + + if len(body) == 0 { + is.log.Debug().Int("len", len(body)).Msg("invalid body length") + return "", errors.ErrBadManifest + } + + var m ispec.Manifest + if err := json.Unmarshal(body, &m); err != nil { + is.log.Error().Err(err).Msg("unable to unmarshal JSON") + return "", errors.ErrBadManifest + } + + if m.SchemaVersion != storage.SchemaVersion { + is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest") + return "", errors.ErrBadManifest + } + + for _, l := range m.Layers { + digest := l.Digest + blobPath := is.BlobPath(repo, digest) + is.log.Info().Str("blobPath", blobPath).Str("reference", reference).Msg("manifest layers") + + if _, err := is.store.Stat(context.Background(), blobPath); err != nil { + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob") + return digest.String(), errors.ErrBlobNotFound + } + } + + mDigest := godigest.FromBytes(body) + refIsDigest := false + d, err := godigest.Parse(reference) + + if err == nil { + if d.String() != mDigest.String() { + is.log.Error().Str("actual", mDigest.String()).Str("expected", d.String()). + Msg("manifest digest is not valid") + return "", errors.ErrBadManifest + } + + refIsDigest = true + } + + is.Lock() + defer is.Unlock() + + dir := path.Join(is.rootDir, repo) + + buf, err := is.GetIndexContent(repo) + if err != nil { + return "", err + } + + var index ispec.Index + if err := json.Unmarshal(buf, &index); err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON") + return "", errors.ErrRepoBadVersion + } + + updateIndex := true + // create a new descriptor + desc := ispec.Descriptor{MediaType: mediaType, Size: int64(len(body)), Digest: mDigest, + Platform: &ispec.Platform{Architecture: "amd64", OS: "linux"}} + if !refIsDigest { + desc.Annotations = map[string]string{ispec.AnnotationRefName: reference} + } + + for i, m := range index.Manifests { + if reference == m.Digest.String() { + // nothing changed, so don't update + desc = m + updateIndex = false + + break + } + + v, ok := m.Annotations[ispec.AnnotationRefName] + if ok && v == reference { + if m.Digest.String() == mDigest.String() { + // nothing changed, so don't update + desc = m + updateIndex = false + + break + } + // manifest contents have changed for the same tag, + // so update index.json descriptor + is.log.Info(). + Int64("old size", desc.Size). + Int64("new size", int64(len(body))). + Str("old digest", desc.Digest.String()). + Str("new digest", mDigest.String()). + Msg("updating existing tag with new manifest contents") + + desc = m + desc.Size = int64(len(body)) + desc.Digest = mDigest + + index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...) + + break + } + } + + if !updateIndex { + return desc.Digest.String(), nil + } + + // write manifest to "blobs" + dir = path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String()) + manifestPath := path.Join(dir, mDigest.Encoded()) + + if err = is.store.PutContent(context.Background(), manifestPath, body); err != nil { + is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write") + return "", err + } + + // now update "index.json" + index.Manifests = append(index.Manifests, desc) + dir = path.Join(is.rootDir, repo) + indexPath := path.Join(dir, "index.json") + buf, err = json.Marshal(index) + + if err != nil { + is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON") + return "", err + } + + if err = is.store.PutContent(context.Background(), indexPath, buf); err != nil { + is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write") + return "", err + } + + return desc.Digest.String(), nil +} + +// DeleteImageManifest deletes the image manifest from the repository. +func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) error { + dir := path.Join(is.rootDir, repo) + if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() { + return errors.ErrRepoNotFound + } + + isTag := false + + // as per spec "reference" can only be a digest and not a tag + digest, err := godigest.Parse(reference) + if err != nil { + is.log.Debug().Str("invalid digest: ", reference).Msg("storage: assuming tag") + + isTag = true + } + + is.Lock() + defer is.Unlock() + + buf, err := is.GetIndexContent(repo) + if err != nil { + return err + } + + var index ispec.Index + if err := json.Unmarshal(buf, &index); err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON") + return err + } + + found := false + + var m ispec.Descriptor + + // we are deleting, so keep only those manifests that don't match + outIndex := index + outIndex.Manifests = []ispec.Descriptor{} + + for _, m = range index.Manifests { + if isTag { + tag, ok := m.Annotations[ispec.AnnotationRefName] + if ok && tag == reference { + is.log.Debug().Str("deleting tag", tag).Msg("") + + digest = m.Digest + + found = true + + continue + } + } else if reference == m.Digest.String() { + is.log.Debug().Str("deleting reference", reference).Msg("") + found = true + continue + } + + outIndex.Manifests = append(outIndex.Manifests, m) + } + + if !found { + return errors.ErrManifestNotFound + } + + // now update "index.json" + dir = path.Join(is.rootDir, repo) + file := path.Join(dir, "index.json") + buf, err = json.Marshal(outIndex) + + if err != nil { + return err + } + + if _, err := writeFile(is.store, file, buf); err != nil { + is.log.Debug().Str("deleting reference", reference).Msg("") + return err + } + + // Delete blob only when blob digest not present in manifest entry. + // e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed. + toDelete := true + + for _, m = range outIndex.Manifests { + if digest.String() == m.Digest.String() { + toDelete = false + break + } + } + + if toDelete { + p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded()) + + err = is.store.Delete(context.Background(), p) + if err != nil { + return err + } + } + + return nil +} + +// BlobUploadPath returns the upload path for a blob in this store. +func (is *ObjectStorage) BlobUploadPath(repo string, uuid string) string { + dir := path.Join(is.rootDir, repo) + blobUploadPath := path.Join(dir, storage.BlobUploadDir, uuid) + + return blobUploadPath +} + +// NewBlobUpload returns the unique ID for an upload in progress. +func (is *ObjectStorage) NewBlobUpload(repo string) (string, error) { + if err := is.InitRepo(repo); err != nil { + is.log.Error().Err(err).Msg("error initializing repo") + + return "", err + } + + uuid, err := guuid.NewV4() + if err != nil { + return "", err + } + + u := uuid.String() + + blobUploadPath := is.BlobUploadPath(repo, u) + + // here we should create an empty multi part upload, but that's not possible + // so we just create a regular empty file which will be overwritten by FinishBlobUpload + err = is.store.PutContent(context.Background(), blobUploadPath, []byte{}) + if err != nil { + return "", errors.ErrRepoNotFound + } + + return u, nil +} + +// GetBlobUpload returns the current size of a blob upload. +func (is *ObjectStorage) GetBlobUpload(repo string, uuid string) (int64, error) { + var fileSize int64 + + blobUploadPath := is.BlobUploadPath(repo, uuid) + + // if it's not a multipart upload check for the regular empty file + // created by NewBlobUpload, it should have 0 size every time + isMultiPartStarted, ok := is.isMultiPartUpload[blobUploadPath] + if !isMultiPartStarted || !ok { + fi, err := is.store.Stat(context.Background(), blobUploadPath) + if err != nil { + _, ok := err.(storageDriver.PathNotFoundError) + if ok { + return -1, errors.ErrUploadNotFound + } + + return -1, err + } + + fileSize = fi.Size() + } else { + // otherwise get the size of multi parts upload + fi, err := getMultipartFileWriter(is, blobUploadPath) + if err != nil { + return -1, err + } + + fileSize = fi.Size() + } + + return fileSize, nil +} + +// PutBlobChunkStreamed appends another chunk of data to the specified blob. It returns +// the number of actual bytes to the blob. +func (is *ObjectStorage) PutBlobChunkStreamed(repo string, uuid string, body io.Reader) (int64, error) { + if err := is.InitRepo(repo); err != nil { + return -1, err + } + + blobUploadPath := is.BlobUploadPath(repo, uuid) + + _, err := is.store.Stat(context.Background(), blobUploadPath) + if err != nil { + return -1, errors.ErrUploadNotFound + } + + file, err := getMultipartFileWriter(is, blobUploadPath) + if err != nil { + is.log.Error().Err(err).Msg("failed to create multipart upload") + return -1, err + } + + defer file.Close() + + buf := new(bytes.Buffer) + + _, err = buf.ReadFrom(body) + if err != nil { + is.log.Error().Err(err).Msg("failed to read blob") + return -1, err + } + + n, err := file.Write(buf.Bytes()) + if err != nil { + is.log.Error().Err(err).Msg("failed to append to file") + return -1, err + } + + return int64(n), err +} + +// PutBlobChunk writes another chunk of data to the specified blob. It returns +// the number of actual bytes to the blob. +func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to int64, + body io.Reader) (int64, error) { + if err := is.InitRepo(repo); err != nil { + return -1, err + } + + blobUploadPath := is.BlobUploadPath(repo, uuid) + + _, err := is.store.Stat(context.Background(), blobUploadPath) + if err != nil { + return -1, errors.ErrUploadNotFound + } + + file, err := getMultipartFileWriter(is, blobUploadPath) + if err != nil { + is.log.Error().Err(err).Msg("failed to create multipart upload") + return -1, err + } + + defer file.Close() + + if from != file.Size() { + // cancel multipart upload created earlier + err := file.Cancel() + if err != nil { + is.log.Error().Err(err).Msg("failed to cancel multipart upload") + return -1, err + } + + is.log.Error().Int64("expected", from).Int64("actual", file.Size()). + Msg("invalid range start for blob upload") + + return -1, errors.ErrBadUploadRange + } + + buf := new(bytes.Buffer) + + _, err = buf.ReadFrom(body) + if err != nil { + is.log.Error().Err(err).Msg("failed to read blob") + return -1, err + } + + n, err := file.Write(buf.Bytes()) + if err != nil { + is.log.Error().Err(err).Msg("failed to append to file") + return -1, err + } + + is.isMultiPartUpload[blobUploadPath] = true + + return int64(n), err +} + +// BlobUploadInfo returns the current blob size in bytes. +func (is *ObjectStorage) BlobUploadInfo(repo string, uuid string) (int64, error) { + var fileSize int64 + + blobUploadPath := is.BlobUploadPath(repo, uuid) + + // if it's not a multipart upload check for the regular empty file + // created by NewBlobUpload, it should have 0 size every time + isMultiPartStarted, ok := is.isMultiPartUpload[blobUploadPath] + if !isMultiPartStarted || !ok { + fi, err := is.store.Stat(context.Background(), blobUploadPath) + if err != nil { + is.log.Error().Err(err).Str("blob", blobUploadPath).Msg("failed to stat blob") + return -1, err + } + + fileSize = fi.Size() + } else { + // otherwise get the size of multi parts upload + fi, err := getMultipartFileWriter(is, blobUploadPath) + if err != nil { + is.log.Error().Err(err).Str("blob", blobUploadPath).Msg("failed to stat blob") + return -1, err + } + + fileSize = fi.Size() + } + + return fileSize, nil +} + +// FinishBlobUpload finalizes the blob upload and moves blob the repository. +func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Reader, digest string) error { + dstDigest, err := godigest.Parse(digest) + if err != nil { + is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") + return errors.ErrBadBlobDigest + } + + src := is.BlobUploadPath(repo, uuid) + + // complete multiUploadPart + fileWriter, err := is.store.Writer(context.Background(), src, true) + if err != nil { + is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob") + return errors.ErrBadBlobDigest + } + + if err := fileWriter.Commit(); err != nil { + is.log.Error().Err(err).Msg("failed to commit file") + return err + } + + if err := fileWriter.Close(); err != nil { + is.log.Error().Err(err).Msg("failed to close file") + } + + fileReader, err := is.store.Reader(context.Background(), src, 0) + if err != nil { + is.log.Error().Err(err).Str("blob", src).Msg("failed to open file") + return errors.ErrUploadNotFound + } + + srcDigest, err := godigest.FromReader(fileReader) + if err != nil { + is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob") + return errors.ErrBadBlobDigest + } + + if srcDigest != dstDigest { + is.log.Error().Str("srcDigest", srcDigest.String()). + Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest") + return errors.ErrBadBlobDigest + } + + fileReader.Close() + + dst := is.BlobPath(repo, dstDigest) + + if err := is.store.Move(context.Background(), src, dst); err != nil { + is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). + Str("dst", dst).Msg("unable to finish blob") + return err + } + + // remove multipart upload, not needed anymore + delete(is.isMultiPartUpload, src) + + return nil +} + +// FullBlobUpload handles a full blob upload, and no partial session is created. +func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error) { + if err := is.InitRepo(repo); err != nil { + return "", -1, err + } + + dstDigest, err := godigest.Parse(digest) + if err != nil { + is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") + return "", -1, errors.ErrBadBlobDigest + } + + u, err := guuid.NewV4() + if err != nil { + return "", -1, err + } + + uuid := u.String() + + src := is.BlobUploadPath(repo, uuid) + + digester := sha256.New() + + buf := new(bytes.Buffer) + + _, err = buf.ReadFrom(body) + if err != nil { + is.log.Error().Err(err).Msg("failed to read blob") + return "", -1, err + } + + n, err := writeFile(is.store, src, buf.Bytes()) + if err != nil { + is.log.Error().Err(err).Msg("failed to write blob") + return "", -1, err + } + + _, err = digester.Write(buf.Bytes()) + if err != nil { + is.log.Error().Err(err).Msg("digester failed to write") + return "", -1, err + } + + srcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf("%x", digester.Sum(nil))) + if srcDigest != dstDigest { + is.log.Error().Str("srcDigest", srcDigest.String()). + Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest") + return "", -1, errors.ErrBadBlobDigest + } + + is.Lock() + defer is.Unlock() + + dst := is.BlobPath(repo, dstDigest) + + if err := is.store.Move(context.Background(), src, dst); err != nil { + is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). + Str("dst", dst).Msg("unable to finish blob") + return "", -1, err + } + + return uuid, int64(n), nil +} + +func (is *ObjectStorage) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error { + return nil +} + +// DeleteBlobUpload deletes an existing blob upload that is currently in progress. +func (is *ObjectStorage) DeleteBlobUpload(repo string, uuid string) error { + blobUploadPath := is.BlobUploadPath(repo, uuid) + if err := is.store.Delete(context.Background(), blobUploadPath); err != nil { + is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload") + return err + } + + return nil +} + +// BlobPath returns the repository path of a blob. +func (is *ObjectStorage) BlobPath(repo string, digest godigest.Digest) string { + return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded()) +} + +// CheckBlob verifies a blob and returns true if the blob is correct. +func (is *ObjectStorage) CheckBlob(repo string, digest string) (bool, int64, error) { + d, err := godigest.Parse(digest) + if err != nil { + is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") + return false, -1, errors.ErrBadBlobDigest + } + + blobPath := is.BlobPath(repo, d) + + is.RLock() + defer is.RUnlock() + + blobInfo, err := is.store.Stat(context.Background(), blobPath) + if err != nil { + _, ok := err.(storageDriver.PathNotFoundError) + if ok { + return false, -1, errors.ErrBlobNotFound + } + + is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob") + + return false, -1, errors.ErrBadBlobDigest + } + + is.log.Debug().Str("blob path", blobPath).Msg("blob path found") + + return true, blobInfo.Size(), nil +} + +// GetBlob returns a stream to read the blob. +// FIXME: we should probably parse the manifest and use (digest, mediaType) as a +// blob selector instead of directly downloading the blob. +func (is *ObjectStorage) GetBlob(repo string, digest string, mediaType string) (io.Reader, int64, error) { + d, err := godigest.Parse(digest) + if err != nil { + is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") + return nil, -1, errors.ErrBadBlobDigest + } + + blobPath := is.BlobPath(repo, d) + + is.RLock() + defer is.RUnlock() + + blobInfo, err := is.store.Stat(context.Background(), blobPath) + if err != nil { + is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob") + return nil, -1, errors.ErrBlobNotFound + } + + blobReader, err := is.store.Reader(context.Background(), blobPath, 0) + if err != nil { + is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to open blob") + return nil, -1, err + } + + return blobReader, blobInfo.Size(), nil +} + +func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, error) { + blob, _, err := is.GetBlob(repo, digest, ispec.MediaTypeImageManifest) + if err != nil { + return []byte{}, err + } + + buf := new(bytes.Buffer) + + _, err = buf.ReadFrom(blob) + if err != nil { + is.log.Error().Err(err).Msg("failed to read blob") + return []byte{}, err + } + + return buf.Bytes(), nil +} + +func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) { + dir := path.Join(is.rootDir, repo) + + buf, err := is.store.GetContent(context.Background(), path.Join(dir, "index.json")) + if err != nil { + is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json") + return []byte{}, errors.ErrRepoNotFound + } + + return buf, nil +} + +// DeleteBlob removes the blob from the repository. +func (is *ObjectStorage) DeleteBlob(repo string, digest string) error { + d, err := godigest.Parse(digest) + if err != nil { + is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") + return errors.ErrBlobNotFound + } + + blobPath := is.BlobPath(repo, d) + + is.Lock() + defer is.Unlock() + + _, err = is.store.Stat(context.Background(), blobPath) + if err != nil { + is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob") + return errors.ErrBlobNotFound + } + + if err := is.store.Delete(context.Background(), blobPath); err != nil { + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + return err + } + + return nil +} + +// Do not use for multipart upload, buf must not be empty. +// If you want to create an empty file use is.store.PutContent(). +func writeFile(store storageDriver.StorageDriver, filepath string, buf []byte) (int, error) { + var n int + + if fw, err := store.Writer(context.Background(), filepath, false); err == nil { + defer fw.Close() + + if n, err = fw.Write(buf); err != nil { + return -1, err + } + + if err := fw.Commit(); err != nil { + return -1, err + } + } else { + return -1, err + } + + return n, nil +} + +// Because we can not create an empty multipart upload, we store multi part uploads +// so that we know when to create a fileWriter with append=true or with append=false +// Trying and handling errors results in weird s3 api errors. +func getMultipartFileWriter(is *ObjectStorage, filepath string) (storageDriver.FileWriter, error) { + var file storageDriver.FileWriter + + var err error + + isMultiPartStarted, ok := is.isMultiPartUpload[filepath] + if !isMultiPartStarted || !ok { + file, err = is.store.Writer(context.Background(), filepath, false) + if err != nil { + return file, err + } + } else { + file, err = is.store.Writer(context.Background(), filepath, true) + if err != nil { + return file, err + } + } + + return file, nil +} diff --git a/pkg/storage/storage_fs.go b/pkg/storage/storage_fs.go index cfee4cddd4..2d10f041e7 100644 --- a/pkg/storage/storage_fs.go +++ b/pkg/storage/storage_fs.go @@ -29,7 +29,7 @@ import ( const ( // BlobUploadDir defines the upload directory for blob uploads. BlobUploadDir = ".uploads" - schemaVersion = 2 + SchemaVersion = 2 gcDelay = 1 * time.Hour ) @@ -458,7 +458,7 @@ func (is *ImageStoreFS) PutImageManifest(repo string, reference string, mediaTyp return "", errors.ErrBadManifest } - if m.SchemaVersion != schemaVersion { + if m.SchemaVersion != SchemaVersion { is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest") return "", errors.ErrBadManifest } diff --git a/pkg/storage/storage_fs_test.go b/pkg/storage/storage_fs_test.go new file mode 100644 index 0000000000..764a7f950b --- /dev/null +++ b/pkg/storage/storage_fs_test.go @@ -0,0 +1,603 @@ +package storage_test + +import ( + "bytes" + _ "crypto/sha256" + "encoding/json" + "io/ioutil" + "math/rand" + "os" + "os/exec" + "path" + "strings" + "testing" + "time" + + "github.com/anuvu/zot/errors" + "github.com/anuvu/zot/pkg/log" + "github.com/anuvu/zot/pkg/storage" + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog" + . "github.com/smartystreets/goconvey/convey" +) + +func TestStorageFSAPIs(t *testing.T) { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + + defer os.RemoveAll(dir) + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + + Convey("Repo layout", t, func(c C) { + repoName := "test" + + Convey("Bad image manifest", func() { + v, err := il.NewBlobUpload("test") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + content := []byte("test-data1") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + + b, err := il.PutBlobChunk(repoName, v, 0, int64(l), buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + err = il.FinishBlobUpload("test", v, buf, d.String()) + So(err, ShouldBeNil) + + annotationsMap := make(map[string]string) + annotationsMap[ispec.AnnotationRefName] = "1.0" + m := ispec.Manifest{ + Config: ispec.Descriptor{ + Digest: d, + Size: int64(l), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: d, + Size: int64(l), + }, + }, + Annotations: annotationsMap, + } + + m.SchemaVersion = 2 + mb, _ := json.Marshal(m) + d = godigest.FromBytes(mb) + + So(os.Chmod(path.Join(il.RootDir(), repoName, "index.json"), 0000), ShouldBeNil) + _, err = il.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldNotBeNil) + + So(os.Chmod(path.Join(il.RootDir(), repoName, "index.json"), 0755), ShouldBeNil) + _, err = il.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) + + manifestPath := path.Join(il.RootDir(), repoName, "blobs", d.Algorithm().String(), d.Encoded()) + + So(os.Chmod(manifestPath, 0000), ShouldBeNil) + _, _, _, err = il.GetImageManifest(repoName, d.String()) + So(err, ShouldNotBeNil) + + So(os.Remove(manifestPath), ShouldBeNil) + _, _, _, err = il.GetImageManifest(repoName, d.String()) + So(err, ShouldNotBeNil) + + So(os.Chmod(path.Join(il.RootDir(), repoName), 0000), ShouldBeNil) + _, err = il.PutImageManifest(repoName, "2.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldNotBeNil) + So(os.Chmod(path.Join(il.RootDir(), repoName), 0755), ShouldBeNil) + + // invalid DeleteImageManifest + indexPath := path.Join(il.RootDir(), repoName, "index.json") + So(os.Chmod(indexPath, 0000), ShouldBeNil) + + err = il.DeleteImageManifest(repoName, d.String()) + So(err, ShouldNotBeNil) + + So(os.RemoveAll(path.Join(il.RootDir(), repoName)), ShouldBeNil) + }) + }) +} + +func TestDedupeLinks(t *testing.T) { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + + defer os.RemoveAll(dir) + + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + + Convey("Dedupe", t, func(c C) { + blobDigest1 := "" + blobDigest2 := "" + + // manifest1 + v, err := il.NewBlobUpload("dedupe1") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + content := []byte("test-data3") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + b, err := il.PutBlobChunkStreamed("dedupe1", v, buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + blobDigest1 = strings.Split(d.String(), ":")[1] + So(blobDigest1, ShouldNotBeEmpty) + + err = il.FinishBlobUpload("dedupe1", v, buf, d.String()) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + _, _, err = il.CheckBlob("dedupe1", d.String()) + So(err, ShouldBeNil) + + _, _, err = il.GetBlob("dedupe1", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") + So(err, ShouldBeNil) + + m := ispec.Manifest{} + m.SchemaVersion = 2 + m = ispec.Manifest{ + Config: ispec.Descriptor{ + Digest: d, + Size: int64(l), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: d, + Size: int64(l), + }, + }, + } + m.SchemaVersion = 2 + mb, _ := json.Marshal(m) + d = godigest.FromBytes(mb) + _, err = il.PutImageManifest("dedupe1", d.String(), ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) + + _, _, _, err = il.GetImageManifest("dedupe1", d.String()) + So(err, ShouldBeNil) + + // manifest2 + v, err = il.NewBlobUpload("dedupe2") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + content = []byte("test-data3") + buf = bytes.NewBuffer(content) + l = buf.Len() + d = godigest.FromBytes(content) + b, err = il.PutBlobChunkStreamed("dedupe2", v, buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + blobDigest2 = strings.Split(d.String(), ":")[1] + So(blobDigest2, ShouldNotBeEmpty) + + err = il.FinishBlobUpload("dedupe2", v, buf, d.String()) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + _, _, err = il.CheckBlob("dedupe2", d.String()) + So(err, ShouldBeNil) + + _, _, err = il.GetBlob("dedupe2", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") + So(err, ShouldBeNil) + + m = ispec.Manifest{} + m.SchemaVersion = 2 + m = ispec.Manifest{ + Config: ispec.Descriptor{ + Digest: d, + Size: int64(l), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: d, + Size: int64(l), + }, + }, + } + m.SchemaVersion = 2 + mb, _ = json.Marshal(m) + d = godigest.FromBytes(mb) + _, err = il.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) + + _, _, _, err = il.GetImageManifest("dedupe2", d.String()) + So(err, ShouldBeNil) + + // verify that dedupe with hard links happened + fi1, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest1)) + So(err, ShouldBeNil) + fi2, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest2)) + So(err, ShouldBeNil) + So(os.SameFile(fi1, fi2), ShouldBeTrue) + }) +} + +func TestDedupe(t *testing.T) { + Convey("Dedupe", t, func(c C) { + Convey("Nil ImageStore", func() { + var is storage.ImageStore + So(func() { _ = is.DedupeBlob("", "", "") }, ShouldPanic) + }) + + Convey("Valid ImageStore", func() { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + + is := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + + So(is.DedupeBlob("", "", ""), ShouldNotBeNil) + }) + }) +} + +func TestNegativeCases(t *testing.T) { + Convey("Invalid root dir", t, func(c C) { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + os.RemoveAll(dir) + + So(storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil) + if os.Geteuid() != 0 { + So(storage.NewImageStore("/deadBEEF", true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil) + } + }) + + Convey("Invalid init repo", t, func(c C) { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + err = os.Chmod(dir, 0000) // remove all perms + So(err, ShouldBeNil) + if os.Geteuid() != 0 { + err = il.InitRepo("test") + So(err, ShouldNotBeNil) + } + + err = os.Chmod(dir, 0755) + So(err, ShouldBeNil) + + // Init repo should fail if repo is a file. + err = ioutil.WriteFile(path.Join(dir, "file-test"), []byte("this is test file"), 0755) // nolint:gosec + So(err, ShouldBeNil) + err = il.InitRepo("file-test") + So(err, ShouldNotBeNil) + + err = os.Mkdir(path.Join(dir, "test-dir"), 0755) + So(err, ShouldBeNil) + + err = il.InitRepo("test-dir") + So(err, ShouldBeNil) + }) + + Convey("Invalid validate repo", t, func(c C) { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + So(il, ShouldNotBeNil) + So(il.InitRepo("test"), ShouldBeNil) + + err = os.MkdirAll(path.Join(dir, "invalid-test"), 0755) + So(err, ShouldBeNil) + + err = os.Chmod(path.Join(dir, "invalid-test"), 0000) // remove all perms + So(err, ShouldBeNil) + + _, err = il.ValidateRepo("invalid-test") + So(err, ShouldNotBeNil) + So(err, ShouldEqual, errors.ErrRepoNotFound) + + err = os.Chmod(path.Join(dir, "invalid-test"), 0755) // remove all perms + So(err, ShouldBeNil) + + err = ioutil.WriteFile(path.Join(dir, "invalid-test", "blobs"), []byte{}, 0755) // nolint: gosec + So(err, ShouldBeNil) + + err = ioutil.WriteFile(path.Join(dir, "invalid-test", "index.json"), []byte{}, 0755) // nolint: gosec + So(err, ShouldBeNil) + + err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte{}, 0755) // nolint: gosec + So(err, ShouldBeNil) + + isValid, err := il.ValidateRepo("invalid-test") + So(err, ShouldBeNil) + So(isValid, ShouldEqual, false) + + err = os.Remove(path.Join(dir, "invalid-test", "blobs")) + So(err, ShouldBeNil) + + err = os.Mkdir(path.Join(dir, "invalid-test", "blobs"), 0755) + So(err, ShouldBeNil) + + isValid, err = il.ValidateRepo("invalid-test") + So(err, ShouldNotBeNil) + So(isValid, ShouldEqual, false) + + err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte("{}"), 0755) // nolint: gosec + So(err, ShouldBeNil) + + isValid, err = il.ValidateRepo("invalid-test") + So(err, ShouldNotBeNil) + So(err, ShouldEqual, errors.ErrRepoBadVersion) + So(isValid, ShouldEqual, false) + + files, err := ioutil.ReadDir(path.Join(dir, "test")) + So(err, ShouldBeNil) + for _, f := range files { + os.Remove(path.Join(dir, "test", f.Name())) + } + _, err = il.ValidateRepo("test") + So(err, ShouldNotBeNil) + os.RemoveAll(path.Join(dir, "test")) + _, err = il.ValidateRepo("test") + So(err, ShouldNotBeNil) + err = os.Chmod(dir, 0000) // remove all perms + So(err, ShouldBeNil) + if os.Geteuid() != 0 { + So(func() { _, _ = il.ValidateRepo("test") }, ShouldPanic) + } + os.RemoveAll(dir) + _, err = il.GetRepositories() + So(err, ShouldNotBeNil) + }) + + Convey("Invalid get image tags", t, func(c C) { + var ilfs storage.ImageStoreFS + _, err := ilfs.GetImageTags("test") + So(err, ShouldNotBeNil) + + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + So(il, ShouldNotBeNil) + So(il.InitRepo("test"), ShouldBeNil) + So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil) + _, err = il.GetImageTags("test") + So(err, ShouldNotBeNil) + So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil) + So(il.InitRepo("test"), ShouldBeNil) + So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600), ShouldBeNil) + _, err = il.GetImageTags("test") + So(err, ShouldNotBeNil) + }) + + Convey("Invalid get image manifest", t, func(c C) { + var ilfs storage.ImageStoreFS + _, _, _, err := ilfs.GetImageManifest("test", "") + So(err, ShouldNotBeNil) + + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + So(il, ShouldNotBeNil) + So(il.InitRepo("test"), ShouldBeNil) + So(os.Chmod(path.Join(dir, "test", "index.json"), 0000), ShouldBeNil) + _, _, _, err = il.GetImageManifest("test", "") + So(err, ShouldNotBeNil) + So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil) + _, _, _, err = il.GetImageManifest("test", "") + So(err, ShouldNotBeNil) + So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil) + So(il.InitRepo("test"), ShouldBeNil) + So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600), ShouldBeNil) + _, _, _, err = il.GetImageManifest("test", "") + So(err, ShouldNotBeNil) + }) + + Convey("Invalid new blob upload", t, func(c C) { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + So(il, ShouldNotBeNil) + So(il.InitRepo("test"), ShouldBeNil) + + So(os.Chmod(path.Join(dir, "test", ".uploads"), 0000), ShouldBeNil) + _, err = il.NewBlobUpload("test") + So(err, ShouldNotBeNil) + + So(os.Chmod(path.Join(dir, "test"), 0000), ShouldBeNil) + _, err = il.NewBlobUpload("test") + So(err, ShouldNotBeNil) + + So(os.Chmod(path.Join(dir, "test"), 0755), ShouldBeNil) + So(il.InitRepo("test"), ShouldBeNil) + + _, err = il.NewBlobUpload("test") + So(err, ShouldNotBeNil) + + So(os.Chmod(path.Join(dir, "test", ".uploads"), 0755), ShouldBeNil) + v, err := il.NewBlobUpload("test") + So(err, ShouldBeNil) + + So(os.Chmod(path.Join(dir, "test", ".uploads"), 0000), ShouldBeNil) + content := []byte("test-data3") + buf := bytes.NewBuffer(content) + l := buf.Len() + _, err = il.PutBlobChunkStreamed("test", v, buf) + So(err, ShouldNotBeNil) + + _, err = il.PutBlobChunk("test", v, 0, int64(l), buf) + So(err, ShouldNotBeNil) + }) + + Convey("Invalid dedupe sceanrios", t, func() { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + + il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + v, err := il.NewBlobUpload("dedupe1") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + content := []byte("test-data3") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + b, err := il.PutBlobChunkStreamed("dedupe1", v, buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + blobDigest1 := strings.Split(d.String(), ":")[1] + So(blobDigest1, ShouldNotBeEmpty) + + err = il.FinishBlobUpload("dedupe1", v, buf, d.String()) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + // Create a file at the same place where FinishBlobUpload will create + err = il.InitRepo("dedupe2") + So(err, ShouldBeNil) + + err = os.MkdirAll(path.Join(dir, "dedupe2", "blobs/sha256"), 0755) + So(err, ShouldBeNil) + + err = ioutil.WriteFile(path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1), content, 0755) // nolint: gosec + So(err, ShouldBeNil) + + v, err = il.NewBlobUpload("dedupe2") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + content = []byte("test-data3") + buf = bytes.NewBuffer(content) + l = buf.Len() + d = godigest.FromBytes(content) + b, err = il.PutBlobChunkStreamed("dedupe2", v, buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + cmd := exec.Command("sudo", "chattr", "+i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec + _, err = cmd.Output() + if err != nil { + panic(err) + } + + err = il.FinishBlobUpload("dedupe2", v, buf, d.String()) + So(err, ShouldNotBeNil) + So(b, ShouldEqual, l) + + cmd = exec.Command("sudo", "chattr", "-i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec + _, err = cmd.Output() + if err != nil { + panic(err) + } + + err = il.FinishBlobUpload("dedupe2", v, buf, d.String()) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + }) +} + +func TestHardLink(t *testing.T) { + Convey("Test that ValidateHardLink creates rootDir if it does not exist", t, func() { + var randomDir string + + rand.Seed(time.Now().UnixNano()) + for { + randomLen := rand.Intn(100) + randomDir = "/tmp/" + randSeq(randomLen) + + if _, err := os.Stat(randomDir); os.IsNotExist(err) { + break + } + } + defer os.RemoveAll(randomDir) + + err := storage.ValidateHardLink(randomDir) + So(err, ShouldBeNil) + }) + Convey("Test that ValidateHardLink returns error if rootDir is a file", t, func() { + dir, err := ioutil.TempDir("", "storage-hard-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + + filePath := path.Join(dir, "file.txt") + err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0644) //nolint: gosec + if err != nil { + panic(err) + } + + err = storage.ValidateHardLink(filePath) + So(err, ShouldNotBeNil) + }) + Convey("Test if filesystem supports hardlink", t, func() { + dir, err := ioutil.TempDir("", "storage-hard-test") + if err != nil { + panic(err) + } + defer os.RemoveAll(dir) + + err = storage.ValidateHardLink(dir) + So(err, ShouldBeNil) + + err = ioutil.WriteFile(path.Join(dir, "hardtest.txt"), []byte("testing hard link code"), 0644) //nolint: gosec + if err != nil { + panic(err) + } + + err = os.Chmod(dir, 0400) + if err != nil { + panic(err) + } + + err = os.Link(path.Join(dir, "hardtest.txt"), path.Join(dir, "duphardtest.txt")) + So(err, ShouldNotBeNil) + + err = os.Chmod(dir, 0644) + if err != nil { + panic(err) + } + }) +} + +func randSeq(n int) string { + var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + + return string(b) +} diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index bdbc3d71c3..a78130f7fb 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -2,412 +2,520 @@ package storage_test import ( "bytes" + "context" _ "crypto/sha256" "encoding/json" + "fmt" "io/ioutil" - "math/rand" "os" - "os/exec" "path" "strings" "sync" + + //"strings" "testing" - "time" - "github.com/anuvu/zot/errors" "github.com/anuvu/zot/pkg/log" "github.com/anuvu/zot/pkg/storage" + "github.com/anuvu/zot/pkg/storage/s3" + guuid "github.com/gofrs/uuid" godigest "github.com/opencontainers/go-digest" ispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/rs/zerolog" . "github.com/smartystreets/goconvey/convey" -) - -func TestAPIs(t *testing.T) { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - - defer os.RemoveAll(dir) - - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - - Convey("Repo layout", t, func(c C) { - repoName := "test" - - Convey("Validate repo without initialization", func() { - v, err := il.ValidateRepo(repoName) - So(v, ShouldEqual, false) - So(err, ShouldNotBeNil) - ok := il.DirExists(path.Join(il.RootDir(), repoName)) - So(ok, ShouldBeFalse) - }) - - Convey("Initialize repo", func() { - err := il.InitRepo(repoName) - So(err, ShouldBeNil) - ok := il.DirExists(path.Join(il.RootDir(), repoName)) - So(ok, ShouldBeTrue) - storeController := storage.StoreController{} - storeController.DefaultStore = il - So(storeController.GetImageStore("test"), ShouldResemble, il) - }) - - Convey("Validate repo", func() { - v, err := il.ValidateRepo(repoName) - So(err, ShouldBeNil) - So(v, ShouldEqual, true) - }) - - Convey("Get repos", func() { - v, err := il.GetRepositories() - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - }) - - Convey("Get image tags", func() { - v, err := il.GetImageTags("test") - So(err, ShouldBeNil) - So(v, ShouldBeEmpty) - }) - - Convey("Full blob upload", func() { - body := []byte("this is a blob") - buf := bytes.NewBuffer(body) - d := godigest.FromBytes(body) - u, n, err := il.FullBlobUpload("test", buf, d.String()) - So(err, ShouldBeNil) - So(n, ShouldEqual, len(body)) - So(u, ShouldNotBeEmpty) - }) - - Convey("New blob upload", func() { - v, err := il.NewBlobUpload("test") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - err = il.DeleteBlobUpload("test", v) - So(err, ShouldBeNil) - - v, err = il.NewBlobUpload("test") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - Convey("Get blob upload", func() { - b, err := il.GetBlobUpload("test", "invalid") - So(err, ShouldNotBeNil) - So(b, ShouldEqual, -1) + "gopkg.in/resty.v1" - b, err = il.GetBlobUpload("test", v) - So(err, ShouldBeNil) - So(b, ShouldBeGreaterThanOrEqualTo, 0) - - b, err = il.BlobUploadInfo("test", v) - So(err, ShouldBeNil) - So(b, ShouldBeGreaterThanOrEqualTo, 0) - - content := []byte("test-data1") - firstChunkContent := []byte("test") - firstChunkBuf := bytes.NewBuffer(firstChunkContent) - secondChunkContent := []byte("-data1") - secondChunkBuf := bytes.NewBuffer(secondChunkContent) - firstChunkLen := firstChunkBuf.Len() - secondChunkLen := secondChunkBuf.Len() - - buf := bytes.NewBuffer(content) - l := buf.Len() - d := godigest.FromBytes(content) - blobDigest := d - - // invalid chunk range - _, err = il.PutBlobChunk("test", v, 10, int64(l), buf) - So(err, ShouldNotBeNil) - - b, err = il.PutBlobChunk("test", v, 0, int64(firstChunkLen), firstChunkBuf) - So(err, ShouldBeNil) - So(b, ShouldEqual, firstChunkLen) - - b, err = il.GetBlobUpload("test", v) - So(err, ShouldBeNil) - So(b, ShouldEqual, int64(firstChunkLen)) - - b, err = il.BlobUploadInfo("test", v) - So(err, ShouldBeNil) - So(b, ShouldEqual, int64(firstChunkLen)) - - b, err = il.PutBlobChunk("test", v, int64(firstChunkLen), int64(l), secondChunkBuf) - So(err, ShouldBeNil) - So(b, ShouldEqual, secondChunkLen) - - err = il.FinishBlobUpload("test", v, buf, d.String()) - So(err, ShouldBeNil) + // Add s3 support + "github.com/docker/distribution/registry/storage/driver" + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +) - _, _, err = il.CheckBlob("test", d.String()) - So(err, ShouldBeNil) +func cleanupStorage(store storageDriver.StorageDriver, name string) { + _ = store.Delete(context.Background(), name) +} - _, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") - So(err, ShouldBeNil) +func skipIt(t *testing.T) { + if os.Getenv("S3MOCK_ENDPOINT") == "" { + t.Skip("Skipping testing without AWS S3 mock server") + } +} - m := ispec.Manifest{} - m.SchemaVersion = 2 - mb, _ := json.Marshal(m) +func createObjectsStore(rootDir string) (storageDriver.StorageDriver, storage.ImageStore, error) { + bucket := "zot-storage-test" + endpoint := os.Getenv("S3MOCK_ENDPOINT") + storageDriverParams := map[string]interface{}{ + "rootDir": rootDir, + "name": "s3", + "region": "us-east-2", + "bucket": bucket, + "regionendpoint": endpoint, + "secure": false, + "skipverify": false, + } - Convey("Bad image manifest", func() { - _, err = il.PutImageManifest("test", d.String(), "application/json", mb) - So(err, ShouldNotBeNil) + storeName := fmt.Sprintf("%v", storageDriverParams["name"]) - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte{}) - So(err, ShouldNotBeNil) + store, err := factory.Create(storeName, storageDriverParams) + if err != nil { + panic(err) + } - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte(`{"test":true}`)) - So(err, ShouldNotBeNil) + // create bucket if it doesn't exists + _, err = resty.R().Put("http://" + endpoint + "/" + bucket) + if err != nil { + panic(err) + } - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) - So(err, ShouldNotBeNil) + il := s3.NewImageStore(rootDir, false, false, log.Logger{Logger: zerolog.New(os.Stdout)}, store) - _, _, _, err = il.GetImageManifest("test", d.String()) - So(err, ShouldNotBeNil) + return store, il, err +} - _, _, _, err = il.GetImageManifest("inexistent", d.String()) - So(err, ShouldNotBeNil) +// nolint: gochecknoglobals +var testCases = []struct { + testCaseName string + storageType string +}{ + { + testCaseName: "S3APIs", + storageType: "s3", + }, + { + testCaseName: "FileSystemAPIs", + storageType: "fs", + }, +} - annotationsMap := make(map[string]string) - annotationsMap[ispec.AnnotationRefName] = "1.0" - m := ispec.Manifest{ - Config: ispec.Descriptor{ - Digest: d, - Size: int64(l), - }, - Layers: []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Digest: d, - Size: int64(l), - }, - }, - Annotations: annotationsMap, - } +func TestStorageAPIs(t *testing.T) { + for _, testcase := range testCases { + testcase := testcase + t.Run(testcase.testCaseName, func(t *testing.T) { + var il storage.ImageStore + if testcase.storageType == "s3" { + skipIt(t) + + uuid, err := guuid.NewV4() + if err != nil { + panic(err) + } + + testDir := path.Join("/oci-repo-test", uuid.String()) + + var store driver.StorageDriver + store, il, _ = createObjectsStore(testDir) + defer cleanupStorage(store, testDir) + } else { + dir, err := ioutil.TempDir("", "oci-repo-test") + if err != nil { + panic(err) + } + + defer os.RemoveAll(dir) + + il = storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) + } - m.SchemaVersion = 2 - mb, _ = json.Marshal(m) - d := godigest.FromBytes(mb) + Convey("Repo layout", t, func(c C) { + repoName := "test" - So(os.Chmod(path.Join(il.RootDir(), "test", "index.json"), 0000), ShouldBeNil) - _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb) + Convey("Validate repo without initialization", func() { + v, err := il.ValidateRepo(repoName) + So(v, ShouldEqual, false) So(err, ShouldNotBeNil) + ok := il.DirExists(path.Join(il.RootDir(), repoName)) + So(ok, ShouldBeFalse) + }) - So(os.Chmod(path.Join(il.RootDir(), "test", "index.json"), 0755), ShouldBeNil) - _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb) + Convey("Initialize repo", func() { + err := il.InitRepo(repoName) So(err, ShouldBeNil) - - manifestPath := path.Join(il.RootDir(), "test", "blobs", d.Algorithm().String(), d.Encoded()) - - So(os.Chmod(manifestPath, 0000), ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", d.String()) - So(err, ShouldNotBeNil) - - So(os.Remove(manifestPath), ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", d.String()) - So(err, ShouldNotBeNil) - - So(os.Chmod(path.Join(il.RootDir(), "test"), 0000), ShouldBeNil) - _, err = il.PutImageManifest("test", "2.0", ispec.MediaTypeImageManifest, mb) - So(err, ShouldNotBeNil) - So(os.Chmod(path.Join(il.RootDir(), "test"), 0755), ShouldBeNil) - So(os.RemoveAll(path.Join(il.RootDir(), "test")), ShouldBeNil) + ok := il.DirExists(path.Join(il.RootDir(), repoName)) + So(ok, ShouldBeTrue) + storeController := storage.StoreController{} + storeController.DefaultStore = il + So(storeController.GetImageStore("test"), ShouldResemble, il) }) - Convey("Good image manifest", func() { - annotationsMap := make(map[string]string) - annotationsMap[ispec.AnnotationRefName] = "1.0" - m := ispec.Manifest{ - Config: ispec.Descriptor{ - Digest: d, - Size: int64(l), - }, - Layers: []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Digest: d, - Size: int64(l), - }, - }, - Annotations: annotationsMap, - } - - m.SchemaVersion = 2 - mb, _ = json.Marshal(m) - d := godigest.FromBytes(mb) - - // bad manifest - m.Layers[0].Digest = godigest.FromBytes([]byte("inexistent")) - badMb, _ := json.Marshal(m) - - _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, badMb) - So(err, ShouldNotBeNil) - - _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb) + Convey("Validate repo", func() { + v, err := il.ValidateRepo(repoName) So(err, ShouldBeNil) + So(v, ShouldEqual, true) + }) - // same manifest for coverage - _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb) + Convey("Get repos", func() { + v, err := il.GetRepositories() So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + }) - _, err = il.PutImageManifest("test", "2.0", ispec.MediaTypeImageManifest, mb) + Convey("Get image tags", func() { + v, err := il.GetImageTags("test") So(err, ShouldBeNil) + So(v, ShouldBeEmpty) + }) - _, err = il.PutImageManifest("test", "3.0", ispec.MediaTypeImageManifest, mb) + Convey("Full blob upload", func() { + body := []byte("this is a blob") + buf := bytes.NewBuffer(body) + d := godigest.FromBytes(body) + u, n, err := il.FullBlobUpload("test", buf, d.String()) So(err, ShouldBeNil) + So(n, ShouldEqual, len(body)) + So(u, ShouldNotBeEmpty) + }) - _, err = il.GetImageTags("inexistent") - So(err, ShouldNotBeNil) - - // total tags should be 3 but they have same reference. - tags, err := il.GetImageTags("test") + Convey("New blob upload", func() { + v, err := il.NewBlobUpload("test") So(err, ShouldBeNil) - So(len(tags), ShouldEqual, 3) + So(v, ShouldNotBeEmpty) - _, _, _, err = il.GetImageManifest("test", d.String()) + err = il.DeleteBlobUpload("test", v) So(err, ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", "3.0") + v, err = il.NewBlobUpload("test") So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + Convey("Get blob upload", func() { + b, err := il.GetBlobUpload("test", "invalid") + So(err, ShouldNotBeNil) + So(b, ShouldEqual, -1) + + b, err = il.GetBlobUpload("test", v) + So(err, ShouldBeNil) + So(b, ShouldBeGreaterThanOrEqualTo, 0) + + b, err = il.BlobUploadInfo("test", v) + So(err, ShouldBeNil) + So(b, ShouldBeGreaterThanOrEqualTo, 0) + + content := []byte("test-data1") + firstChunkContent := []byte("test") + firstChunkBuf := bytes.NewBuffer(firstChunkContent) + secondChunkContent := []byte("-data1") + secondChunkBuf := bytes.NewBuffer(secondChunkContent) + firstChunkLen := firstChunkBuf.Len() + secondChunkLen := secondChunkBuf.Len() + + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + blobDigest := d + + // invalid chunk range + _, err = il.PutBlobChunk("test", v, 10, int64(l), buf) + So(err, ShouldNotBeNil) + + b, err = il.PutBlobChunk("test", v, 0, int64(firstChunkLen), firstChunkBuf) + So(err, ShouldBeNil) + So(b, ShouldEqual, firstChunkLen) + + b, err = il.GetBlobUpload("test", v) + So(err, ShouldBeNil) + So(b, ShouldEqual, int64(firstChunkLen)) + + b, err = il.BlobUploadInfo("test", v) + So(err, ShouldBeNil) + So(b, ShouldEqual, int64(firstChunkLen)) + + b, err = il.PutBlobChunk("test", v, int64(firstChunkLen), int64(l), secondChunkBuf) + So(err, ShouldBeNil) + So(b, ShouldEqual, secondChunkLen) + + err = il.FinishBlobUpload("test", v, buf, d.String()) + So(err, ShouldBeNil) + + _, _, err = il.CheckBlob("test", d.String()) + So(err, ShouldBeNil) + + _, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") + So(err, ShouldBeNil) + + m := ispec.Manifest{} + m.SchemaVersion = 2 + mb, _ := json.Marshal(m) + + Convey("Bad image manifest", func() { + _, err = il.PutImageManifest("test", d.String(), "application/json", mb) + So(err, ShouldNotBeNil) + + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte{}) + So(err, ShouldNotBeNil) + + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte(`{"test":true}`)) + So(err, ShouldNotBeNil) - err = il.DeleteImageManifest("test", "1.0") - So(err, ShouldBeNil) + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) + So(err, ShouldNotBeNil) + + _, _, _, err = il.GetImageManifest("test", d.String()) + So(err, ShouldNotBeNil) + + _, _, _, err = il.GetImageManifest("inexistent", d.String()) + So(err, ShouldNotBeNil) + }) + + Convey("Good image manifest", func() { + annotationsMap := make(map[string]string) + annotationsMap[ispec.AnnotationRefName] = "1.0" + m := ispec.Manifest{ + Config: ispec.Descriptor{ + Digest: d, + Size: int64(l), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: d, + Size: int64(l), + }, + }, + Annotations: annotationsMap, + } + + m.SchemaVersion = 2 + mb, _ = json.Marshal(m) + d := godigest.FromBytes(mb) + + // bad manifest + m.Layers[0].Digest = godigest.FromBytes([]byte("inexistent")) + badMb, _ := json.Marshal(m) - tags, err = il.GetImageTags("test") - So(err, ShouldBeNil) - So(len(tags), ShouldEqual, 2) + _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, badMb) + So(err, ShouldNotBeNil) + + _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) + + // same manifest for coverage + _, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) + + _, err = il.PutImageManifest("test", "2.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) - // We deleted only one tag, make sure blob should not be removed. - hasBlob, _, err := il.CheckBlob("test", d.String()) - So(err, ShouldBeNil) - So(hasBlob, ShouldEqual, true) + _, err := il.PutImageManifest("test", "3.0", ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) - // invalid DeleteImageManifest - indexPath := path.Join(il.RootDir(), "test", "index.json") - So(os.Chmod(indexPath, 0000), ShouldBeNil) + _, err = il.GetImageTags("inexistent") + So(err, ShouldNotBeNil) - err = il.DeleteImageManifest("test", d.String()) - So(err, ShouldNotBeNil) + // total tags should be 3 but they have same reference. + tags, err := il.GetImageTags("test") + So(err, ShouldBeNil) + So(len(tags), ShouldEqual, 3) - So(os.Chmod(indexPath, 0755), ShouldBeNil) + _, _, _, err = il.GetImageManifest("test", d.String()) + So(err, ShouldBeNil) - // If we pass reference all manifest with input reference should be deleted. - err = il.DeleteImageManifest("test", d.String()) - So(err, ShouldBeNil) + _, _, _, err = il.GetImageManifest("test", "3.0") + So(err, ShouldBeNil) - tags, err = il.GetImageTags("test") - So(err, ShouldBeNil) - So(len(tags), ShouldEqual, 0) + err = il.DeleteImageManifest("test", "1.0") + So(err, ShouldBeNil) - // All tags/references are deleted, blob should not be present in disk. - hasBlob, _, err = il.CheckBlob("test", d.String()) - So(err, ShouldNotBeNil) - So(hasBlob, ShouldEqual, false) + tags, err = il.GetImageTags("test") + So(err, ShouldBeNil) + So(len(tags), ShouldEqual, 2) - err = il.DeleteBlob("test", "inexistent") - So(err, ShouldNotBeNil) + // We deleted only one tag, make sure blob should not be removed. + hasBlob, _, err := il.CheckBlob("test", d.String()) + So(err, ShouldBeNil) + So(hasBlob, ShouldEqual, true) - err = il.DeleteBlob("test", godigest.FromBytes([]byte("inexistent")).String()) - So(err, ShouldNotBeNil) + // If we pass reference all manifest with input reference should be deleted. + err = il.DeleteImageManifest("test", d.String()) + So(err, ShouldBeNil) - err = il.DeleteBlob("test", blobDigest.String()) - So(err, ShouldBeNil) + tags, err = il.GetImageTags("test") + So(err, ShouldBeNil) + So(len(tags), ShouldEqual, 0) + + // All tags/references are deleted, blob should not be present in disk. + hasBlob, _, err = il.CheckBlob("test", d.String()) + So(err, ShouldNotBeNil) + So(hasBlob, ShouldEqual, false) + + err = il.DeleteBlob("test", "inexistent") + So(err, ShouldNotBeNil) + + err = il.DeleteBlob("test", godigest.FromBytes([]byte("inexistent")).String()) + So(err, ShouldNotBeNil) - _, _, _, err = il.GetImageManifest("test", d.String()) + err = il.DeleteBlob("test", blobDigest.String()) + So(err, ShouldBeNil) + + _, _, _, err = il.GetImageManifest("test", d.String()) + So(err, ShouldNotBeNil) + }) + }) + + err = il.DeleteBlobUpload("test", v) So(err, ShouldNotBeNil) }) - }) - err = il.DeleteBlobUpload("test", v) - So(err, ShouldNotBeNil) - }) + Convey("New blob upload streamed", func() { + v, err := il.NewBlobUpload("test") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) + + Convey("Get blob upload", func() { + err = il.FinishBlobUpload("test", v, bytes.NewBuffer([]byte{}), "inexistent") + So(err, ShouldNotBeNil) + + b, err := il.GetBlobUpload("test", "invalid") + So(err, ShouldNotBeNil) + So(b, ShouldEqual, -1) + + b, err = il.GetBlobUpload("test", v) + So(err, ShouldBeNil) + So(b, ShouldBeGreaterThanOrEqualTo, 0) + + _, err = il.BlobUploadInfo("test", "inexistent") + So(err, ShouldNotBeNil) + + b, err = il.BlobUploadInfo("test", v) + So(err, ShouldBeNil) + So(b, ShouldBeGreaterThanOrEqualTo, 0) + + content := []byte("test-data2") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + b, err = il.PutBlobChunkStreamed("test", v, buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + + _, err = il.PutBlobChunkStreamed("test", "inexistent", buf) + So(err, ShouldNotBeNil) - Convey("New blob upload streamed", func() { - v, err := il.NewBlobUpload("test") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) + err = il.FinishBlobUpload("test", "inexistent", buf, d.String()) + So(err, ShouldNotBeNil) - Convey("Get blob upload", func() { - err = il.FinishBlobUpload("test", v, bytes.NewBuffer([]byte{}), "inexistent") - So(err, ShouldNotBeNil) + err = il.FinishBlobUpload("test", v, buf, d.String()) + So(err, ShouldBeNil) - b, err := il.GetBlobUpload("test", "invalid") - So(err, ShouldNotBeNil) - So(b, ShouldEqual, -1) + _, _, err = il.CheckBlob("test", d.String()) + So(err, ShouldBeNil) - b, err = il.GetBlobUpload("test", v) - So(err, ShouldBeNil) - So(b, ShouldBeGreaterThanOrEqualTo, 0) + _, _, err = il.GetBlob("test", "inexistent", "application/vnd.oci.image.layer.v1.tar+gzip") + So(err, ShouldNotBeNil) - _, err = il.BlobUploadInfo("test", "inexistent") - So(err, ShouldNotBeNil) + _, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") + So(err, ShouldBeNil) - b, err = il.BlobUploadInfo("test", v) - So(err, ShouldBeNil) - So(b, ShouldBeGreaterThanOrEqualTo, 0) + blobContent, err := il.GetBlobContent("test", d.String()) + So(err, ShouldBeNil) + So(content, ShouldResemble, blobContent) - content := []byte("test-data2") - buf := bytes.NewBuffer(content) - l := buf.Len() - d := godigest.FromBytes(content) - b, err = il.PutBlobChunkStreamed("test", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) + _, err = il.GetBlobContent("inexistent", d.String()) + So(err, ShouldNotBeNil) - _, err = il.PutBlobChunkStreamed("test", "inexistent", buf) - So(err, ShouldNotBeNil) + m := ispec.Manifest{} + m.SchemaVersion = 2 + mb, _ := json.Marshal(m) - err = il.FinishBlobUpload("test", "inexistent", buf, d.String()) - So(err, ShouldNotBeNil) + Convey("Bad digests", func() { + _, _, err := il.FullBlobUpload("test", bytes.NewBuffer([]byte{}), "inexistent") + So(err, ShouldNotBeNil) - err = il.FinishBlobUpload("test", v, buf, d.String()) - So(err, ShouldBeNil) + _, _, err = il.CheckBlob("test", "inexistent") + So(err, ShouldNotBeNil) + }) - _, _, err = il.CheckBlob("test", d.String()) - So(err, ShouldBeNil) + Convey("Bad image manifest", func() { + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) + So(err, ShouldNotBeNil) - _, _, err = il.GetBlob("test", "inexistent", "application/vnd.oci.image.layer.v1.tar+gzip") - So(err, ShouldNotBeNil) + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte("bad json")) + So(err, ShouldNotBeNil) - _, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") - So(err, ShouldBeNil) + _, _, _, err = il.GetImageManifest("test", d.String()) + So(err, ShouldNotBeNil) + }) - blobContent, err := il.GetBlobContent("test", d.String()) - So(err, ShouldBeNil) - So(content, ShouldResemble, blobContent) + Convey("Good image manifest", func() { + m := ispec.Manifest{ + Config: ispec.Descriptor{ + Digest: d, + Size: int64(l), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: d, + Size: int64(l), + }, + }, + } + m.SchemaVersion = 2 + mb, _ = json.Marshal(m) + d := godigest.FromBytes(mb) + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) - _, err = il.GetBlobContent("inexistent", d.String()) - So(err, ShouldNotBeNil) + // same manifest for coverage + _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) + So(err, ShouldBeNil) - m := ispec.Manifest{} - m.SchemaVersion = 2 - mb, _ := json.Marshal(m) + _, _, _, err = il.GetImageManifest("test", d.String()) + So(err, ShouldBeNil) + + _, err = il.GetIndexContent("inexistent") + So(err, ShouldNotBeNil) + + indexContent, err := il.GetIndexContent("test") + So(err, ShouldBeNil) - Convey("Bad digests", func() { - _, _, err := il.FullBlobUpload("test", bytes.NewBuffer([]byte{}), "inexistent") - So(err, ShouldNotBeNil) + var index ispec.Index + + err = json.Unmarshal(indexContent, &index) + So(err, ShouldBeNil) + + So(len(index.Manifests), ShouldEqual, 1) + err = il.DeleteImageManifest("test", "1.0") + So(err, ShouldNotBeNil) + + err = il.DeleteImageManifest("inexistent", "1.0") + So(err, ShouldNotBeNil) + + err = il.DeleteImageManifest("test", d.String()) + So(err, ShouldBeNil) + + _, _, _, err = il.GetImageManifest("test", d.String()) + So(err, ShouldNotBeNil) + }) + }) - _, _, err = il.CheckBlob("test", "inexistent") + err = il.DeleteBlobUpload("test", v) So(err, ShouldNotBeNil) }) - Convey("Bad image manifest", func() { - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) - So(err, ShouldNotBeNil) + Convey("Modify manifest in-place", func() { + // original blob + v, err := il.NewBlobUpload("replace") + So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte("bad json")) - So(err, ShouldNotBeNil) + content := []byte("test-data-replace-1") + buf := bytes.NewBuffer(content) + l := buf.Len() + d := godigest.FromBytes(content) + b, err := il.PutBlobChunkStreamed("replace", v, buf) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) + blobDigest1 := strings.Split(d.String(), ":")[1] + So(blobDigest1, ShouldNotBeEmpty) - _, _, _, err = il.GetImageManifest("test", d.String()) - So(err, ShouldNotBeNil) - }) + err = il.FinishBlobUpload("replace", v, buf, d.String()) + So(err, ShouldBeNil) + So(b, ShouldEqual, l) - Convey("Good image manifest", func() { - m := ispec.Manifest{ + m := ispec.Manifest{} + m.SchemaVersion = 2 + m = ispec.Manifest{ Config: ispec.Descriptor{ Digest: d, Size: int64(l), @@ -420,691 +528,163 @@ func TestAPIs(t *testing.T) { }, }, } - m.SchemaVersion = 2 - mb, _ = json.Marshal(m) - d := godigest.FromBytes(mb) - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) + mb, _ := json.Marshal(m) + d = godigest.FromBytes(mb) + _, err = il.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, mb) So(err, ShouldBeNil) - // same manifest for coverage - _, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb) + _, _, _, err = il.GetImageManifest("replace", d.String()) So(err, ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", d.String()) + // new blob to replace + v, err = il.NewBlobUpload("replace") So(err, ShouldBeNil) + So(v, ShouldNotBeEmpty) - _, err = il.GetIndexContent("inexistent") - So(err, ShouldNotBeNil) - - indexContent, err := il.GetIndexContent("test") + content = []byte("test-data-replace-2") + buf = bytes.NewBuffer(content) + l = buf.Len() + d = godigest.FromBytes(content) + b, err = il.PutBlobChunkStreamed("replace", v, buf) So(err, ShouldBeNil) + So(b, ShouldEqual, l) + blobDigest2 := strings.Split(d.String(), ":")[1] + So(blobDigest2, ShouldNotBeEmpty) - var index ispec.Index - - err = json.Unmarshal(indexContent, &index) + err = il.FinishBlobUpload("replace", v, buf, d.String()) So(err, ShouldBeNil) + So(b, ShouldEqual, l) - So(len(index.Manifests), ShouldEqual, 1) - err = il.DeleteImageManifest("test", "1.0") - So(err, ShouldNotBeNil) - - err = il.DeleteImageManifest("inexistent", "1.0") - So(err, ShouldNotBeNil) - - err = il.DeleteImageManifest("test", d.String()) + m = ispec.Manifest{ + Config: ispec.Descriptor{ + Digest: d, + Size: int64(l), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: d, + Size: int64(l), + }, + }, + } + m.SchemaVersion = 2 + mb, _ = json.Marshal(m) + _ = godigest.FromBytes(mb) + _, err = il.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, mb) So(err, ShouldBeNil) + }) - _, _, _, err = il.GetImageManifest("test", d.String()) - So(err, ShouldNotBeNil) + Convey("Locks", func() { + // in parallel, a mix of read and write locks - mainly for coverage + var wg sync.WaitGroup + for i := 0; i < 1000; i++ { + wg.Add(2) + go func() { + defer wg.Done() + il.Lock() + func() {}() + il.Unlock() + }() + go func() { + defer wg.Done() + il.RLock() + func() {}() + il.RUnlock() + }() + } + wg.Wait() }) }) - - err = il.DeleteBlobUpload("test", v) - So(err, ShouldNotBeNil) - }) - - Convey("Modify manifest in-place", func() { - // original blob - v, err := il.NewBlobUpload("replace") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - content := []byte("test-data-replace-1") - buf := bytes.NewBuffer(content) - l := buf.Len() - d := godigest.FromBytes(content) - b, err := il.PutBlobChunkStreamed("replace", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - blobDigest1 := strings.Split(d.String(), ":")[1] - So(blobDigest1, ShouldNotBeEmpty) - - err = il.FinishBlobUpload("replace", v, buf, d.String()) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - m := ispec.Manifest{} - m.SchemaVersion = 2 - m = ispec.Manifest{ - Config: ispec.Descriptor{ - Digest: d, - Size: int64(l), - }, - Layers: []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Digest: d, - Size: int64(l), - }, - }, - } - m.SchemaVersion = 2 - mb, _ := json.Marshal(m) - d = godigest.FromBytes(mb) - _, err = il.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, mb) - So(err, ShouldBeNil) - - _, _, _, err = il.GetImageManifest("replace", d.String()) - So(err, ShouldBeNil) - - // new blob to replace - v, err = il.NewBlobUpload("replace") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - content = []byte("test-data-replace-2") - buf = bytes.NewBuffer(content) - l = buf.Len() - d = godigest.FromBytes(content) - b, err = il.PutBlobChunkStreamed("replace", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - blobDigest2 := strings.Split(d.String(), ":")[1] - So(blobDigest2, ShouldNotBeEmpty) - - err = il.FinishBlobUpload("replace", v, buf, d.String()) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - m = ispec.Manifest{ - Config: ispec.Descriptor{ - Digest: d, - Size: int64(l), - }, - Layers: []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Digest: d, - Size: int64(l), - }, - }, - } - m.SchemaVersion = 2 - mb, _ = json.Marshal(m) - _ = godigest.FromBytes(mb) - _, err = il.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, mb) - So(err, ShouldBeNil) - }) - - Convey("Dedupe", func() { - blobDigest1 := "" - blobDigest2 := "" - - // manifest1 - v, err := il.NewBlobUpload("dedupe1") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - content := []byte("test-data3") - buf := bytes.NewBuffer(content) - l := buf.Len() - d := godigest.FromBytes(content) - b, err := il.PutBlobChunkStreamed("dedupe1", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - blobDigest1 = strings.Split(d.String(), ":")[1] - So(blobDigest1, ShouldNotBeEmpty) - - err = il.FinishBlobUpload("dedupe1", v, buf, d.String()) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - _, _, err = il.CheckBlob("dedupe1", d.String()) - So(err, ShouldBeNil) - - _, _, err = il.GetBlob("dedupe1", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") - So(err, ShouldBeNil) - - m := ispec.Manifest{} - m.SchemaVersion = 2 - m = ispec.Manifest{ - Config: ispec.Descriptor{ - Digest: d, - Size: int64(l), - }, - Layers: []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Digest: d, - Size: int64(l), - }, - }, - } - m.SchemaVersion = 2 - mb, _ := json.Marshal(m) - d = godigest.FromBytes(mb) - _, err = il.PutImageManifest("dedupe1", d.String(), ispec.MediaTypeImageManifest, mb) - So(err, ShouldBeNil) - - _, _, _, err = il.GetImageManifest("dedupe1", d.String()) - So(err, ShouldBeNil) - - // manifest2 - v, err = il.NewBlobUpload("dedupe2") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - content = []byte("test-data3") - buf = bytes.NewBuffer(content) - l = buf.Len() - d = godigest.FromBytes(content) - b, err = il.PutBlobChunkStreamed("dedupe2", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - blobDigest2 = strings.Split(d.String(), ":")[1] - So(blobDigest2, ShouldNotBeEmpty) - - err = il.FinishBlobUpload("dedupe2", v, buf, d.String()) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - _, _, err = il.CheckBlob("dedupe2", d.String()) - So(err, ShouldBeNil) - - _, _, err = il.GetBlob("dedupe2", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip") - So(err, ShouldBeNil) - - m = ispec.Manifest{} - m.SchemaVersion = 2 - m = ispec.Manifest{ - Config: ispec.Descriptor{ - Digest: d, - Size: int64(l), - }, - Layers: []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Digest: d, - Size: int64(l), - }, - }, - } - m.SchemaVersion = 2 - mb, _ = json.Marshal(m) - d = godigest.FromBytes(mb) - _, err = il.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, mb) - So(err, ShouldBeNil) - - _, _, _, err = il.GetImageManifest("dedupe2", d.String()) - So(err, ShouldBeNil) - - // verify that dedupe with hard links happened - fi1, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest1)) - So(err, ShouldBeNil) - fi2, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest2)) - So(err, ShouldBeNil) - So(os.SameFile(fi1, fi2), ShouldBeTrue) - }) - - Convey("Locks", func() { - // in parallel, a mix of read and write locks - mainly for coverage - var wg sync.WaitGroup - for i := 0; i < 1000; i++ { - wg.Add(2) - go func() { - defer wg.Done() - il.Lock() - func() {}() - il.Unlock() - }() - go func() { - defer wg.Done() - il.RLock() - func() {}() - il.RUnlock() - }() - } - wg.Wait() - }) - }) -} - -func TestDedupe(t *testing.T) { - Convey("Dedupe", t, func(c C) { - Convey("Nil ImageStore", func() { - var is storage.ImageStore - So(func() { _ = is.DedupeBlob("", "", "") }, ShouldPanic) - }) - - Convey("Valid ImageStore", func() { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - - is := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - - So(is.DedupeBlob("", "", ""), ShouldNotBeNil) }) - }) -} - -func TestNegativeCases(t *testing.T) { - Convey("Invalid root dir", t, func(c C) { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - os.RemoveAll(dir) - - So(storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil) - if os.Geteuid() != 0 { - So(storage.NewImageStore("/deadBEEF", true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil) - } - }) - - Convey("Invalid init repo", t, func(c C) { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - err = os.Chmod(dir, 0000) // remove all perms - So(err, ShouldBeNil) - if os.Geteuid() != 0 { - err = il.InitRepo("test") - So(err, ShouldNotBeNil) - } - - err = os.Chmod(dir, 0755) - So(err, ShouldBeNil) - - // Init repo should fail if repo is a file. - err = ioutil.WriteFile(path.Join(dir, "file-test"), []byte("this is test file"), 0755) // nolint:gosec - So(err, ShouldBeNil) - err = il.InitRepo("file-test") - So(err, ShouldNotBeNil) - - err = os.Mkdir(path.Join(dir, "test-dir"), 0755) - So(err, ShouldBeNil) - - err = il.InitRepo("test-dir") - So(err, ShouldBeNil) - }) - - Convey("Invalid validate repo", t, func(c C) { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - So(il, ShouldNotBeNil) - So(il.InitRepo("test"), ShouldBeNil) - - err = os.MkdirAll(path.Join(dir, "invalid-test"), 0755) - So(err, ShouldBeNil) - - err = os.Chmod(path.Join(dir, "invalid-test"), 0000) // remove all perms - So(err, ShouldBeNil) - - _, err = il.ValidateRepo("invalid-test") - So(err, ShouldNotBeNil) - So(err, ShouldEqual, errors.ErrRepoNotFound) - - err = os.Chmod(path.Join(dir, "invalid-test"), 0755) // remove all perms - So(err, ShouldBeNil) - - err = ioutil.WriteFile(path.Join(dir, "invalid-test", "blobs"), []byte{}, 0755) // nolint: gosec - So(err, ShouldBeNil) - - err = ioutil.WriteFile(path.Join(dir, "invalid-test", "index.json"), []byte{}, 0755) // nolint: gosec - So(err, ShouldBeNil) - - err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte{}, 0755) // nolint: gosec - So(err, ShouldBeNil) - - isValid, err := il.ValidateRepo("invalid-test") - So(err, ShouldBeNil) - So(isValid, ShouldEqual, false) - - err = os.Remove(path.Join(dir, "invalid-test", "blobs")) - So(err, ShouldBeNil) - - err = os.Mkdir(path.Join(dir, "invalid-test", "blobs"), 0755) - So(err, ShouldBeNil) - - isValid, err = il.ValidateRepo("invalid-test") - So(err, ShouldNotBeNil) - So(isValid, ShouldEqual, false) - - err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte("{}"), 0755) // nolint: gosec - So(err, ShouldBeNil) - - isValid, err = il.ValidateRepo("invalid-test") - So(err, ShouldNotBeNil) - So(err, ShouldEqual, errors.ErrRepoBadVersion) - So(isValid, ShouldEqual, false) - - files, err := ioutil.ReadDir(path.Join(dir, "test")) - So(err, ShouldBeNil) - for _, f := range files { - os.Remove(path.Join(dir, "test", f.Name())) - } - _, err = il.ValidateRepo("test") - So(err, ShouldNotBeNil) - os.RemoveAll(path.Join(dir, "test")) - _, err = il.ValidateRepo("test") - So(err, ShouldNotBeNil) - err = os.Chmod(dir, 0000) // remove all perms - So(err, ShouldBeNil) - if os.Geteuid() != 0 { - So(func() { _, _ = il.ValidateRepo("test") }, ShouldPanic) - } - os.RemoveAll(dir) - _, err = il.GetRepositories() - So(err, ShouldNotBeNil) - }) - - Convey("Invalid get image tags", t, func(c C) { - var ilfs storage.ImageStoreFS - _, err := ilfs.GetImageTags("test") - So(err, ShouldNotBeNil) - - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - So(il, ShouldNotBeNil) - So(il.InitRepo("test"), ShouldBeNil) - So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil) - _, err = il.GetImageTags("test") - So(err, ShouldNotBeNil) - So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil) - So(il.InitRepo("test"), ShouldBeNil) - So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600), ShouldBeNil) - _, err = il.GetImageTags("test") - So(err, ShouldNotBeNil) - }) - - Convey("Invalid get image manifest", t, func(c C) { - var ilfs storage.ImageStoreFS - _, _, _, err := ilfs.GetImageManifest("test", "") - So(err, ShouldNotBeNil) - - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - So(il, ShouldNotBeNil) - So(il.InitRepo("test"), ShouldBeNil) - So(os.Chmod(path.Join(dir, "test", "index.json"), 0000), ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", "") - So(err, ShouldNotBeNil) - So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", "") - So(err, ShouldNotBeNil) - So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil) - So(il.InitRepo("test"), ShouldBeNil) - So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600), ShouldBeNil) - _, _, _, err = il.GetImageManifest("test", "") - So(err, ShouldNotBeNil) - }) - - Convey("Invalid new blob upload", t, func(c C) { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - So(il, ShouldNotBeNil) - So(il.InitRepo("test"), ShouldBeNil) - - So(os.Chmod(path.Join(dir, "test", ".uploads"), 0000), ShouldBeNil) - _, err = il.NewBlobUpload("test") - So(err, ShouldNotBeNil) - - So(os.Chmod(path.Join(dir, "test"), 0000), ShouldBeNil) - _, err = il.NewBlobUpload("test") - So(err, ShouldNotBeNil) - - So(os.Chmod(path.Join(dir, "test"), 0755), ShouldBeNil) - So(il.InitRepo("test"), ShouldBeNil) - - _, err = il.NewBlobUpload("test") - So(err, ShouldNotBeNil) - - So(os.Chmod(path.Join(dir, "test", ".uploads"), 0755), ShouldBeNil) - v, err := il.NewBlobUpload("test") - So(err, ShouldBeNil) - - So(os.Chmod(path.Join(dir, "test", ".uploads"), 0000), ShouldBeNil) - content := []byte("test-data3") - buf := bytes.NewBuffer(content) - l := buf.Len() - _, err = il.PutBlobChunkStreamed("test", v, buf) - So(err, ShouldNotBeNil) - - _, err = il.PutBlobChunk("test", v, 0, int64(l), buf) - So(err, ShouldNotBeNil) - }) - - Convey("Invalid dedupe scenarios", t, func() { - dir, err := ioutil.TempDir("", "oci-repo-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - - il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}) - v, err := il.NewBlobUpload("dedupe1") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - content := []byte("test-data3") - buf := bytes.NewBuffer(content) - l := buf.Len() - d := godigest.FromBytes(content) - b, err := il.PutBlobChunkStreamed("dedupe1", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - blobDigest1 := strings.Split(d.String(), ":")[1] - So(blobDigest1, ShouldNotBeEmpty) - - err = il.FinishBlobUpload("dedupe1", v, buf, d.String()) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - // Create a file at the same place where FinishBlobUpload will create - err = il.InitRepo("dedupe2") - So(err, ShouldBeNil) - - err = os.MkdirAll(path.Join(dir, "dedupe2", "blobs/sha256"), 0755) - So(err, ShouldBeNil) - - err = ioutil.WriteFile(path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1), content, 0755) // nolint: gosec - So(err, ShouldBeNil) - - v, err = il.NewBlobUpload("dedupe2") - So(err, ShouldBeNil) - So(v, ShouldNotBeEmpty) - - content = []byte("test-data3") - buf = bytes.NewBuffer(content) - l = buf.Len() - d = godigest.FromBytes(content) - b, err = il.PutBlobChunkStreamed("dedupe2", v, buf) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - - cmd := exec.Command("sudo", "chattr", "+i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec - _, err = cmd.Output() - if err != nil { - panic(err) - } - - err = il.FinishBlobUpload("dedupe2", v, buf, d.String()) - So(err, ShouldNotBeNil) - So(b, ShouldEqual, l) - - cmd = exec.Command("sudo", "chattr", "-i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec - _, err = cmd.Output() - if err != nil { - panic(err) - } - - err = il.FinishBlobUpload("dedupe2", v, buf, d.String()) - So(err, ShouldBeNil) - So(b, ShouldEqual, l) - }) -} - -func TestHardLink(t *testing.T) { - Convey("Test that ValidateHardLink creates rootDir if it does not exist", t, func() { - var randomDir string - - rand.Seed(time.Now().UnixNano()) - for { - randomLen := rand.Intn(100) - randomDir = "/tmp/" + randSeq(randomLen) - - if _, err := os.Stat(randomDir); os.IsNotExist(err) { - break - } - } - defer os.RemoveAll(randomDir) - - err := storage.ValidateHardLink(randomDir) - So(err, ShouldBeNil) - }) - Convey("Test that ValidateHardLink returns error if rootDir is a file", t, func() { - dir, err := ioutil.TempDir("", "storage-hard-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - - filePath := path.Join(dir, "file.txt") - err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0644) //nolint: gosec - if err != nil { - panic(err) - } - - err = storage.ValidateHardLink(filePath) - So(err, ShouldNotBeNil) - }) - Convey("Test if filesystem supports hardlink", t, func() { - dir, err := ioutil.TempDir("", "storage-hard-test") - if err != nil { - panic(err) - } - defer os.RemoveAll(dir) - - err = storage.ValidateHardLink(dir) - So(err, ShouldBeNil) - - err = ioutil.WriteFile(path.Join(dir, "hardtest.txt"), []byte("testing hard link code"), 0644) //nolint: gosec - if err != nil { - panic(err) - } - - err = os.Chmod(dir, 0400) - if err != nil { - panic(err) - } - - err = os.Link(path.Join(dir, "hardtest.txt"), path.Join(dir, "duphardtest.txt")) - So(err, ShouldNotBeNil) - - err = os.Chmod(dir, 0644) - if err != nil { - panic(err) - } - }) + } } func TestStorageHandler(t *testing.T) { - Convey("Test storage handler", t, func() { - // Create temporary directory - firstRootDir, err := ioutil.TempDir("", "util_test") - if err != nil { - panic(err) - } - defer os.RemoveAll(firstRootDir) - - secondRootDir, err := ioutil.TempDir("", "util_test") - if err != nil { - panic(err) - } - defer os.RemoveAll(secondRootDir) - - thirdRootDir, err := ioutil.TempDir("", "util_test") - if err != nil { - panic(err) - } - defer os.RemoveAll(thirdRootDir) - - log := log.NewLogger("debug", "") - - // Create ImageStore - firstStore := storage.NewImageStore(firstRootDir, false, false, log) - - secondStore := storage.NewImageStore(secondRootDir, false, false, log) - - thirdStore := storage.NewImageStore(thirdRootDir, false, false, log) - - storeController := storage.StoreController{} - - storeController.DefaultStore = firstStore + for _, testcase := range testCases { + testcase := testcase + t.Run(testcase.testCaseName, func(t *testing.T) { + var firstStore storage.ImageStore + var secondStore storage.ImageStore + var thirdStore storage.ImageStore + var firstRootDir string + var secondRootDir string + var thirdRootDir string + + if testcase.storageType == "s3" { + skipIt(t) + var firstStorageDriver driver.StorageDriver + var secondStorageDriver driver.StorageDriver + var thirdStorageDriver driver.StorageDriver + + firstRootDir = "/util_test1" + firstStorageDriver, firstStore, _ = createObjectsStore(firstRootDir) + defer cleanupStorage(firstStorageDriver, firstRootDir) + + secondRootDir = "/util_test2" + secondStorageDriver, secondStore, _ = createObjectsStore(secondRootDir) + defer cleanupStorage(secondStorageDriver, secondRootDir) + + thirdRootDir = "/util_test3" + thirdStorageDriver, thirdStore, _ = createObjectsStore(thirdRootDir) + defer cleanupStorage(thirdStorageDriver, thirdRootDir) + } else { + // Create temporary directory + var err error + + firstRootDir, err = ioutil.TempDir("", "util_test") + if err != nil { + panic(err) + } + defer os.RemoveAll(firstRootDir) + + secondRootDir, err = ioutil.TempDir("", "util_test") + if err != nil { + panic(err) + } + defer os.RemoveAll(secondRootDir) + + thirdRootDir, err = ioutil.TempDir("", "util_test") + if err != nil { + panic(err) + } + defer os.RemoveAll(thirdRootDir) + + log := log.NewLogger("debug", "") + + // Create ImageStore + firstStore = storage.NewImageStore(firstRootDir, false, false, log) + + secondStore = storage.NewImageStore(secondRootDir, false, false, log) + + thirdStore = storage.NewImageStore(thirdRootDir, false, false, log) + } - subStore := make(map[string]storage.ImageStore) + Convey("Test storage handler", t, func() { + storeController := storage.StoreController{} - subStore["/a"] = secondStore - subStore["/b"] = thirdStore + storeController.DefaultStore = firstStore - storeController.SubStore = subStore + subStore := make(map[string]storage.ImageStore) - is := storeController.GetImageStore("zot-x-test") - So(is.RootDir(), ShouldEqual, firstRootDir) + subStore["/a"] = secondStore + subStore["/b"] = thirdStore - is = storeController.GetImageStore("a/zot-a-test") - So(is.RootDir(), ShouldEqual, secondRootDir) + storeController.SubStore = subStore - is = storeController.GetImageStore("b/zot-b-test") - So(is.RootDir(), ShouldEqual, thirdRootDir) + is := storeController.GetImageStore("zot-x-test") + So(is.RootDir(), ShouldEqual, firstRootDir) - is = storeController.GetImageStore("c/zot-c-test") - So(is.RootDir(), ShouldEqual, firstRootDir) - }) -} + is = storeController.GetImageStore("a/zot-a-test") + So(is.RootDir(), ShouldEqual, secondRootDir) -func randSeq(n int) string { - var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + is = storeController.GetImageStore("b/zot-b-test") + So(is.RootDir(), ShouldEqual, thirdRootDir) - b := make([]rune, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] + is = storeController.GetImageStore("c/zot-c-test") + So(is.RootDir(), ShouldEqual, firstRootDir) + }) + }) } - - return string(b) }