diff --git a/pkg/api/controller_test.go b/pkg/api/controller_test.go index ddbd60e85b..9c78bde978 100644 --- a/pkg/api/controller_test.go +++ b/pkg/api/controller_test.go @@ -10650,6 +10650,167 @@ func RunAuthorizationTests(t *testing.T, client *resty.Client, baseURL, user str }) } +func TestSupportedDigestAlgorithms(t *testing.T) { + port := test.GetFreePort() + baseURL := test.GetBaseURL(port) + + conf := config.New() + conf.HTTP.Port = port + + dir := t.TempDir() + + ctlr := api.NewController(conf) + ctlr.Config.Storage.RootDirectory = dir + ctlr.Config.Storage.Dedupe = false + ctlr.Config.Storage.GC = false + + cm := test.NewControllerManager(ctlr) + + cm.StartAndWait(port) + defer cm.StopServer() + + Convey("Test SHA512 single-arch image", t, func() { + image := CreateImageWithDigestAlgorithm(godigest.SHA512). + RandomLayers(1, 10).DefaultConfig().Build() + + name := "algo-sha256" + tag := "singlearch" + + err := UploadImage(image, baseURL, name, tag) + So(err, ShouldBeNil) + + client := resty.New() + + // The server picks canonical digests when tags are pushed + // See https://github.com/opencontainers/distribution-spec/issues/494 + // It would be nice to be able to push tags with other digest algorithms and verify those are returned + // but there is no way to specify a client preference + // so all we can do is verify the correct algorithm is returned + + expectedDigestStr := image.DigestForAlgorithm(godigest.Canonical).String() + + verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr) + verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr) + }) + + Convey("Test SHA384 single-arch image", t, func() { + image := CreateImageWithDigestAlgorithm(godigest.SHA384). + RandomLayers(1, 10).DefaultConfig().Build() + + name := "algo-sha384" + tag := "singlearch" + + err := UploadImage(image, baseURL, name, tag) + So(err, ShouldBeNil) + + client := resty.New() + + // The server picks canonical digests when tags are pushed + // See https://github.com/opencontainers/distribution-spec/issues/494 + // It would be nice to be able to push tags with other digest algorithms and verify those are returned + // but there is no way to specify a client preference + // so all we can do is verify the correct algorithm is returned + + expectedDigestStr := image.DigestForAlgorithm(godigest.Canonical).String() + + verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr) + verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr) + }) + + Convey("Test SHA512 multi-arch image", t, func() { + subImage1 := CreateImageWithDigestAlgorithm(godigest.SHA512).RandomLayers(1, 10). + DefaultConfig().Build() + subImage2 := CreateImageWithDigestAlgorithm(godigest.SHA512).RandomLayers(1, 10). + DefaultConfig().Build() + multiarch := CreateMultiarchWithDigestAlgorithm(godigest.SHA512). + Images([]Image{subImage1, subImage2}).Build() + + name := "algo-sha256" + tag := "multiarch" + + err := UploadMultiarchImage(multiarch, baseURL, name, tag) + So(err, ShouldBeNil) + + client := resty.New() + + // The server picks canonical digests when tags are pushed + // See https://github.com/opencontainers/distribution-spec/issues/494 + // It would be nice to be able to push tags with other digest algorithms and verify those are returned + // but there is no way to specify a client preference + // so all we can do is verify the correct algorithm is returned + expectedDigestStr := multiarch.DigestForAlgorithm(godigest.Canonical).String() + + verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr) + verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr) + + // While the expected multiarch manifest digest is always using the canonical algorithm + // the sub-imgage manifest digest can use any algorith + verifyReturnedManifestDigest(t, client, baseURL, name, + subImage1.ManifestDescriptor.Digest.String(), subImage1.ManifestDescriptor.Digest.String()) + verifyReturnedManifestDigest(t, client, baseURL, name, + subImage2.ManifestDescriptor.Digest.String(), subImage2.ManifestDescriptor.Digest.String()) + }) + + Convey("Test SHA384 multi-arch image", t, func() { + subImage1 := CreateImageWithDigestAlgorithm(godigest.SHA384).RandomLayers(1, 10). + DefaultConfig().Build() + subImage2 := CreateImageWithDigestAlgorithm(godigest.SHA384).RandomLayers(1, 10). + DefaultConfig().Build() + multiarch := CreateMultiarchWithDigestAlgorithm(godigest.SHA384). + Images([]Image{subImage1, subImage2}).Build() + + name := "algo-sha384" + tag := "multiarch" + + err := UploadMultiarchImage(multiarch, baseURL, name, tag) + So(err, ShouldBeNil) + + client := resty.New() + + // The server picks canonical digests when tags are pushed + // See https://github.com/opencontainers/distribution-spec/issues/494 + // It would be nice to be able to push tags with other digest algorithms and verify those are returned + // but there is no way to specify a client preference + // so all we can do is verify the correct algorithm is returned + expectedDigestStr := multiarch.DigestForAlgorithm(godigest.Canonical).String() + + verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr) + verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr) + + // While the expected multiarch manifest digest is always using the canonical algorithm + // the sub-imgage manifest digest can use any algorith + verifyReturnedManifestDigest(t, client, baseURL, name, + subImage1.ManifestDescriptor.Digest.String(), subImage1.ManifestDescriptor.Digest.String()) + verifyReturnedManifestDigest(t, client, baseURL, name, + subImage2.ManifestDescriptor.Digest.String(), subImage2.ManifestDescriptor.Digest.String()) + }) +} + +func verifyReturnedManifestDigest(t *testing.T, client *resty.Client, baseURL, repoName, + reference, expectedDigestStr string, +) { + t.Helper() + + t.Logf("Verify Docker-Content-Digest returned for repo %s reference %s is %s", + repoName, reference, expectedDigestStr) + + getResponse, err := client.R().Get(fmt.Sprintf("%s/v2/%s/manifests/%s", baseURL, repoName, reference)) + So(err, ShouldBeNil) + So(getResponse, ShouldNotBeNil) + So(getResponse.StatusCode(), ShouldEqual, http.StatusOK) + + contentDigestStr := getResponse.Header().Get("Docker-Content-Digest") + So(contentDigestStr, ShouldEqual, expectedDigestStr) + + getResponse, err = client.R().Head(fmt.Sprintf("%s/v2/%s/manifests/%s", baseURL, repoName, reference)) + So(err, ShouldBeNil) + So(getResponse, ShouldNotBeNil) + So(getResponse.StatusCode(), ShouldEqual, http.StatusOK) + + contentDigestStr = getResponse.Header().Get("Docker-Content-Digest") + So(contentDigestStr, ShouldEqual, expectedDigestStr) +} + func getEmptyImageConfig() ([]byte, godigest.Digest) { config := ispec.Image{} diff --git a/pkg/storage/common/common.go b/pkg/storage/common/common.go index 506fbb37bd..65c334ece7 100644 --- a/pkg/storage/common/common.go +++ b/pkg/storage/common/common.go @@ -64,19 +64,19 @@ func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Desc func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaType string, body []byte, log zlog.Logger, -) (godigest.Digest, error) { +) error { // validate the manifest if !IsSupportedMediaType(mediaType) { log.Debug().Interface("actual", mediaType). Msg("bad manifest media type") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } if len(body) == 0 { log.Debug().Int("len", len(body)).Msg("invalid body length") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } switch mediaType { @@ -87,13 +87,13 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy if err := ValidateManifestSchema(body); err != nil { log.Error().Err(err).Msg("failed to validate OCIv1 image manifest schema") - return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) + return zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } if err := json.Unmarshal(body, &manifest); err != nil { log.Error().Err(err).Msg("failed to unmarshal JSON") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } // validate blobs only for known media types @@ -105,7 +105,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy log.Error().Err(err).Str("digest", manifest.Config.Digest.String()). Msg("failed to stat blob due to missing config blob") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } // validate layers - a lightweight check if the blob is present @@ -122,7 +122,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy log.Error().Err(err).Str("digest", layer.Digest.String()). Msg("failed to validate manifest due to missing layer blob") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } } } @@ -131,21 +131,21 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy if err := json.Unmarshal(body, &m); err != nil { log.Error().Err(err).Msg("failed to unmarshal JSON") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } case ispec.MediaTypeImageIndex: // validate manifest if err := ValidateImageIndexSchema(body); err != nil { log.Error().Err(err).Msg("failed to validate OCIv1 image index manifest schema") - return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) + return zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } var indexManifest ispec.Index if err := json.Unmarshal(body, &indexManifest); err != nil { log.Error().Err(err).Msg("failed to unmarshal JSON") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } for _, manifest := range indexManifest.Manifests { @@ -153,28 +153,37 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy log.Error().Err(err).Str("digest", manifest.Digest.String()). Msg("failed to stat manifest due to missing manifest blob") - return "", zerr.ErrBadManifest + return zerr.ErrBadManifest } } } - return "", nil + return nil } -func GetAndValidateRequestDigest(body []byte, digestStr string, log zlog.Logger) (godigest.Digest, error) { - bodyDigest := godigest.FromBytes(body) +// Returns the canonical digest or the digest provided by the reference if any +// Per spec, the canonical digest would always be returned to the client in +// request headers, but that does not make sense if the client requested a different digest algorithm +// See https://github.com/opencontainers/distribution-spec/issues/494 +func GetAndValidateRequestDigest(body []byte, reference string, log zlog.Logger) ( + godigest.Digest, error, +) { + expectedDigest, err := godigest.Parse(reference) + if err != nil { + // This is a non-digest reference + return godigest.Canonical.FromBytes(body), err + } + + actualDigest := expectedDigest.Algorithm().FromBytes(body) - d, err := godigest.Parse(digestStr) - if err == nil { - if d.String() != bodyDigest.String() { - log.Error().Str("actual", bodyDigest.String()).Str("expected", d.String()). - Msg("failed to validate manifest digest") + if expectedDigest.String() != actualDigest.String() { + log.Error().Str("actual", actualDigest.String()).Str("expected", expectedDigest.String()). + Msg("failed to validate manifest digest") - return "", zerr.ErrBadManifest - } + return actualDigest, zerr.ErrBadManifest } - return bodyDigest, err + return actualDigest, nil } /* diff --git a/pkg/storage/common/common_test.go b/pkg/storage/common/common_test.go index 768b756ad8..56029db4b5 100644 --- a/pkg/storage/common/common_test.go +++ b/pkg/storage/common/common_test.go @@ -51,6 +51,29 @@ func TestValidateManifest(t *testing.T) { So(err, ShouldBeNil) So(clen, ShouldEqual, len(cblob)) + Convey("bad manifest mediatype", func() { + manifest := ispec.Manifest{} + + body, err := json.Marshal(manifest) + So(err, ShouldBeNil) + + _, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageConfig, body) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, zerr.ErrBadManifest) + }) + + Convey("empty manifest with bad media type", func() { + _, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageConfig, []byte("")) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, zerr.ErrBadManifest) + }) + + Convey("empty manifest with correct media type", func() { + _, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, []byte("")) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, zerr.ErrBadManifest) + }) + Convey("bad manifest schema version", func() { manifest := ispec.Manifest{ Config: ispec.Descriptor{ diff --git a/pkg/storage/gc/gc.go b/pkg/storage/gc/gc.go index a0cfdbb769..7df27b1dde 100644 --- a/pkg/storage/gc/gc.go +++ b/pkg/storage/gc/gc.go @@ -582,11 +582,10 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio gcBlobs := make([]godigest.Digest, 0) - for _, blob := range allBlobs { - digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob) + for _, digest := range allBlobs { if err = digest.Validate(); err != nil { - log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob). - Msg("failed to parse digest") + log.Error().Err(err).Str("module", "gc").Str("repository", repo). + Str("digest", digest.String()).Msg("failed to parse digest") return err } @@ -594,8 +593,8 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio if _, ok := refBlobs[digest.String()]; !ok { canGC, err := isBlobOlderThan(gc.imgStore, repo, digest, delay, log) if err != nil { - log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob). - Msg("failed to determine GC delay") + log.Error().Err(err).Str("module", "gc").Str("repository", repo). + Str("digest", digest.String()).Msg("failed to determine GC delay") return err } diff --git a/pkg/storage/gc/gc_internal_test.go b/pkg/storage/gc/gc_internal_test.go index 6e127b2867..bfd1de3528 100644 --- a/pkg/storage/gc/gc_internal_test.go +++ b/pkg/storage/gc/gc_internal_test.go @@ -440,8 +440,8 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) { GetIndexContentFn: func(repo string) ([]byte, error) { return returnedIndexJSONBuf, nil }, - GetAllBlobsFn: func(repo string) ([]string, error) { - return []string{}, errGC + GetAllBlobsFn: func(repo string) ([]godigest.Digest, error) { + return []godigest.Digest{}, errGC }, } diff --git a/pkg/storage/imagestore/imagestore.go b/pkg/storage/imagestore/imagestore.go index f11ac60d7e..b027c668a0 100644 --- a/pkg/storage/imagestore/imagestore.go +++ b/pkg/storage/imagestore/imagestore.go @@ -2,7 +2,6 @@ package imagestore import ( "context" - "crypto/sha256" "encoding/json" "errors" "fmt" @@ -141,7 +140,7 @@ func (is *ImageStore) initRepo(name string) error { } // create "blobs" subdir - err := is.storeDriver.EnsureDir(path.Join(repoDir, "blobs")) + err := is.storeDriver.EnsureDir(path.Join(repoDir, ispec.ImageBlobsDir)) if err != nil { is.log.Error().Err(err).Str("repository", name).Str("dir", repoDir).Msg("failed to create blobs subdir") @@ -250,7 +249,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) { return false, err } - if filename == "blobs" && !fileInfo.IsDir() { + if filename == ispec.ImageBlobsDir && !fileInfo.IsDir() { return false, nil } @@ -259,7 +258,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) { // check blobs dir exists only for filesystem, in s3 we can't have empty dirs if is.storeDriver.Name() == storageConstants.LocalStorageDriverName { - if !is.storeDriver.DirExists(path.Join(dir, "blobs")) { + if !is.storeDriver.DirExists(path.Join(dir, ispec.ImageBlobsDir)) { return false, nil } } @@ -515,9 +514,9 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli refIsDigest = false } - dig, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log) + err = common.ValidateManifest(is, repo, reference, mediaType, body, is.log) if err != nil { - return dig, "", err + return mDigest, "", err } index, err := common.GetIndex(is, repo, is.log) @@ -572,11 +571,11 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli } if !updateIndex { - return desc.Digest, subjectDigest, nil + return mDigest, subjectDigest, nil } // write manifest to "blobs" - dir := path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String()) + dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, mDigest.Algorithm().String()) manifestPath := path.Join(dir, mDigest.Encoded()) if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil { @@ -609,7 +608,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli return "", "", err } - return desc.Digest, subjectDigest, nil + return mDigest, subjectDigest, nil } // DeleteImageManifest deletes the image manifest from the repository. @@ -696,7 +695,8 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio } if toDelete { - p := path.Join(dir, "blobs", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded()) + p := path.Join(dir, ispec.ImageBlobsDir, manifestDesc.Digest.Algorithm().String(), + manifestDesc.Digest.Encoded()) err = is.storeDriver.Delete(p) if err != nil { @@ -882,7 +882,7 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig return err } - srcDigest, err := getBlobDigest(is, src) + srcDigest, err := getBlobDigest(is, src, dstDigest.Algorithm()) if err != nil { is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob") @@ -896,11 +896,11 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig return zerr.ErrBadBlobDigest } - dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String()) + dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, dstDigest.Algorithm().String()) err = is.storeDriver.EnsureDir(dir) if err != nil { - is.log.Error().Err(err).Str("dir", dir).Msg("failed to create dir") + is.log.Error().Str("directory", dir).Err(err).Msg("failed to create dir") return err } @@ -949,7 +949,10 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi uuid := u.String() src := is.BlobUploadPath(repo, uuid) - digester := sha256.New() + + dstDigestAlgorithm := dstDigest.Algorithm() + + digester := dstDigestAlgorithm.Hash() blobFile, err := is.storeDriver.Writer(src, false) if err != nil { @@ -973,7 +976,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi return "", -1, err } - srcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf("%x", digester.Sum(nil))) + srcDigest := godigest.NewDigestFromEncoded(dstDigestAlgorithm, fmt.Sprintf("%x", digester.Sum(nil))) if srcDigest != dstDigest { is.log.Error().Str("srcDigest", srcDigest.String()). Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest") @@ -981,7 +984,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi return "", -1, zerr.ErrBadBlobDigest } - dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String()) + dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, dstDigestAlgorithm.String()) _ = is.storeDriver.EnsureDir(dir) var lockLatency time.Time @@ -1136,7 +1139,7 @@ func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error { // BlobPath returns the repository path of a blob. func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string { - return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded()) + return path.Join(is.rootDir, repo, ispec.ImageBlobsDir, digest.Algorithm().String(), digest.Encoded()) } /* @@ -1667,7 +1670,8 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { return nil } -func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) { +func getBlobDigest(imgStore *ImageStore, path string, digestAlgorithm godigest.Algorithm, +) (godigest.Digest, error) { fileReader, err := imgStore.storeDriver.Reader(path, 0) if err != nil { return "", zerr.ErrUploadNotFound @@ -1675,7 +1679,7 @@ func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) { defer fileReader.Close() - digest, err := godigest.FromReader(fileReader) + digest, err := digestAlgorithm.FromReader(fileReader) if err != nil { return "", zerr.ErrBadBlobDigest } @@ -1683,24 +1687,44 @@ func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) { return digest, nil } -func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) { - dir := path.Join(is.rootDir, repo, "blobs", "sha256") +func (is *ImageStore) GetAllBlobs(repo string) ([]godigest.Digest, error) { + blobsDir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir) - files, err := is.storeDriver.List(dir) + ret := []godigest.Digest{} + + algorithmPaths, err := is.storeDriver.List(blobsDir) if err != nil { if errors.As(err, &driver.PathNotFoundError{}) { - is.log.Debug().Msg("empty rootDir") + is.log.Debug().Str("directory", blobsDir).Msg("empty blobs directory") - return []string{}, nil + return ret, nil } - return []string{}, err + return ret, err } - ret := []string{} + for _, algorithmPath := range algorithmPaths { + algorithm := godigest.Algorithm(path.Base(algorithmPath)) - for _, file := range files { - ret = append(ret, filepath.Base(file)) + if !algorithm.Available() { + continue + } + + digestPaths, err := is.storeDriver.List(algorithmPath) + if err != nil { + // algorithmPath was obtained by looking up under the blobs directory + // we are sure it already exists, so PathNotFoundError does not need to be checked + return []godigest.Digest{}, err + } + + for _, file := range digestPaths { + digest := godigest.NewDigestFromEncoded(algorithm, filepath.Base(file)) + ret = append(ret, digest) + } + } + + if len(ret) == 0 { + is.log.Debug().Str("directory", blobsDir).Msg("empty blobs directory") } return ret, nil @@ -1729,14 +1753,24 @@ func (is *ImageStore) GetNextDigestWithBlobPaths(repos []string, lastDigests []g if fileInfo.IsDir() { // skip repositories not found in repos repo := path.Base(fileInfo.Path()) + if !zcommon.Contains(repos, repo) && repo != ispec.ImageBlobsDir { + candidateAlgorithm := godigest.Algorithm(repo) - if !zcommon.Contains(repos, repo) && repo != "blobs" && repo != "sha256" { - return driver.ErrSkipDir + if !candidateAlgorithm.Available() { + return driver.ErrSkipDir + } } } - blobDigest := godigest.NewDigestFromEncoded("sha256", path.Base(fileInfo.Path())) + digestHash := path.Base(fileInfo.Path()) + digestAlgorithm := godigest.Algorithm(path.Base(path.Dir(fileInfo.Path()))) + + blobDigest := godigest.NewDigestFromEncoded(digestAlgorithm, digestHash) if err := blobDigest.Validate(); err != nil { //nolint: nilerr + is.log.Debug().Str("path", fileInfo.Path()).Str("digestHash", digestHash). + Str("digestAlgorithm", digestAlgorithm.String()). + Msg("digest validation failed when walking blob paths") + return nil //nolint: nilerr // ignore files which are not blobs } diff --git a/pkg/storage/local/local_test.go b/pkg/storage/local/local_test.go index adfbbb4aef..905e620d8a 100644 --- a/pkg/storage/local/local_test.go +++ b/pkg/storage/local/local_test.go @@ -2045,7 +2045,8 @@ func TestGarbageCollectForImageStore(t *testing.T) { So(err, ShouldBeNil) manifestDigest := image.ManifestDescriptor.Digest - err = os.Remove(path.Join(dir, repoName, "blobs/sha256", manifestDigest.Encoded())) + err = os.Remove(path.Join(dir, repoName, "blobs", + manifestDigest.Algorithm().String(), manifestDigest.Encoded())) if err != nil { panic(err) } @@ -2240,7 +2241,8 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) { So(err, ShouldBeNil) artifactDigest := godigest.FromBytes(artifactBuf) - err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", artifactDigest.Encoded()), + err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs", + artifactDigest.Algorithm().String(), artifactDigest.Encoded()), artifactBuf, storageConstants.DefaultFilePerms) So(err, ShouldBeNil) @@ -2257,7 +2259,8 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) { So(err, ShouldBeNil) referrerDigest := godigest.FromBytes(referrerBuf) - err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", referrerDigest.Encoded()), + err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs", + artifactDigest.Algorithm().String(), referrerDigest.Encoded()), referrerBuf, storageConstants.DefaultFilePerms) So(err, ShouldBeNil) diff --git a/pkg/storage/s3/s3_test.go b/pkg/storage/s3/s3_test.go index 73a948e173..5edade352c 100644 --- a/pkg/storage/s3/s3_test.go +++ b/pkg/storage/s3/s3_test.go @@ -2286,7 +2286,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) }, @@ -2302,7 +2302,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { Convey("Trigger GetContent error in restoreDedupedBlobs()", t, func() { imgStore := createMockStorage(testDir, tdir, false, &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(0) @@ -2322,7 +2322,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2330,7 +2330,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2351,7 +2351,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { Convey("Trigger GetContent error in restoreDedupedBlobs()", t, func() { imgStore := createMockStorage(testDir, tdir, false, &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(0) @@ -2371,7 +2371,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2379,7 +2379,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2400,7 +2400,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { Convey("Trigger Stat() error in restoreDedupedBlobs()", t, func() { imgStore := createMockStorage(testDir, tdir, false, &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(10) @@ -2420,7 +2420,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2428,7 +2428,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2445,7 +2445,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { Convey("Trigger Stat() error in dedupeBlobs()", func() { imgStore := createMockStorage(testDir, t.TempDir(), true, &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(10) @@ -2465,7 +2465,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2473,7 +2473,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2493,7 +2493,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { tdir := t.TempDir() imgStore := createMockStorage(testDir, tdir, true, &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(0) @@ -2513,7 +2513,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2521,7 +2521,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2544,7 +2544,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { tdir := t.TempDir() imgStore := createMockStorage(testDir, tdir, true, &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(0) @@ -2564,7 +2564,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2572,7 +2572,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2612,7 +2612,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2620,7 +2620,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2650,7 +2650,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { Convey("Trigger cache errors", t, func() { storageDriverMockIfBranch := &StorageDriverMock{ StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return &FileInfoMock{ SizeFn: func() int64 { return int64(0) @@ -2670,7 +2670,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2678,7 +2678,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2708,7 +2708,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) _ = walkFn(&FileInfoMock{ @@ -2716,7 +2716,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PathFn: func() string { - return fmt.Sprintf("path/to/second/%s", validDigest.Encoded()) + return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) }, }) @@ -2749,7 +2749,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) { return false }, PutBlobFn: func(digest godigest.Digest, path string) error { - if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) { + if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) { return errCache } diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 96bda64eaf..78de3ed877 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -216,6 +216,22 @@ func TestStorageAPIs(t *testing.T) { So(v, ShouldBeEmpty) }) + Convey("Full blob upload unavailable algorithm", func() { + body := []byte("this blob will be hashed using an unavailable hashing algorithm") + buf := bytes.NewBuffer(body) + digest := godigest.Digest("md5:8114c3f59ef9dcf737410e0f4b00a154") + upload, n, err := imgStore.FullBlobUpload("test", buf, digest) + So(err, ShouldEqual, godigest.ErrDigestUnsupported) + So(n, ShouldEqual, -1) + So(upload, ShouldEqual, "") + + // Check no blobs are returned and there are no errors + // if other paths for different algorithms are missing + digests, err := imgStore.GetAllBlobs("test") + So(err, ShouldBeNil) + So(digests, ShouldBeEmpty) + }) + Convey("Full blob upload", func() { body := []byte("this is a blob") buf := bytes.NewBuffer(body) @@ -227,6 +243,51 @@ func TestStorageAPIs(t *testing.T) { err = imgStore.VerifyBlobDigestValue("test", digest) So(err, ShouldBeNil) + + // Check the blob is returned and there are no errors + // if other paths for different algorithms are missing + digests, err := imgStore.GetAllBlobs("test") + So(err, ShouldBeNil) + So(digests, ShouldContain, digest) + So(len(digests), ShouldEqual, 1) + }) + + Convey("Full blob upload sha512", func() { + body := []byte("this blob will be hashed using sha512") + buf := bytes.NewBuffer(body) + digest := godigest.SHA512.FromBytes(body) + upload, n, err := imgStore.FullBlobUpload("test", buf, digest) + So(err, ShouldBeNil) + So(n, ShouldEqual, len(body)) + So(upload, ShouldNotBeEmpty) + + // Check the blob is returned and there are no errors + // if other paths for different algorithms are missing + digests, err := imgStore.GetAllBlobs("test") + So(err, ShouldBeNil) + So(digests, ShouldContain, digest) + // imgStore is reused so look for this digest and + // the ones uploaded by previous tests + So(len(digests), ShouldEqual, 2) + }) + + Convey("Full blob upload sha384", func() { + body := []byte("this blob will be hashed using sha384") + buf := bytes.NewBuffer(body) + digest := godigest.SHA384.FromBytes(body) + upload, n, err := imgStore.FullBlobUpload("test", buf, digest) + So(err, ShouldBeNil) + So(n, ShouldEqual, len(body)) + So(upload, ShouldNotBeEmpty) + + // Check the blob is returned and there are no errors + // if other paths for different algorithms are missing + digests, err := imgStore.GetAllBlobs("test") + So(err, ShouldBeNil) + So(digests, ShouldContain, digest) + // imgStore is reused so look for this digest and + // the ones uploaded by previous tests + So(len(digests), ShouldEqual, 3) }) Convey("New blob upload", func() { diff --git a/pkg/storage/types/types.go b/pkg/storage/types/types.go index 625d5a1e58..02f522a796 100644 --- a/pkg/storage/types/types.go +++ b/pkg/storage/types/types.go @@ -62,7 +62,7 @@ type ImageStore interface { //nolint:interfacebloat RunDedupeBlobs(interval time.Duration, sch *scheduler.Scheduler) RunDedupeForDigest(ctx context.Context, digest godigest.Digest, dedupe bool, duplicateBlobs []string) error GetNextDigestWithBlobPaths(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error) - GetAllBlobs(repo string) ([]string, error) + GetAllBlobs(repo string) ([]godigest.Digest, error) PopulateStorageMetrics(interval time.Duration, sch *scheduler.Scheduler) VerifyBlobDigestValue(repo string, digest godigest.Digest) error } diff --git a/pkg/test/image-utils/images.go b/pkg/test/image-utils/images.go index 44bdfc462b..9f555bf23f 100644 --- a/pkg/test/image-utils/images.go +++ b/pkg/test/image-utils/images.go @@ -88,9 +88,10 @@ type ManifestBuilder interface { } type Image struct { - Manifest ispec.Manifest - Config ispec.Image - Layers [][]byte + Manifest ispec.Manifest + Config ispec.Image + Layers [][]byte + digestAlgorithm godigest.Algorithm ConfigDescriptor ispec.Descriptor ManifestDescriptor ispec.Descriptor @@ -108,13 +109,28 @@ func (img *Image) Digest() godigest.Digest { panic("unreachable: ispec.Manifest should always be marshable") } - return godigest.FromBytes(blob) + digestAlgorithm := img.digestAlgorithm + + if digestAlgorithm == "" { + digestAlgorithm = godigest.Canonical + } + + return digestAlgorithm.FromBytes(blob) } func (img *Image) DigestStr() string { return img.Digest().String() } +func (img *Image) DigestForAlgorithm(digestAlgorithm godigest.Algorithm) godigest.Digest { + blob, err := json.Marshal(img.Manifest) + if err != nil { + panic("unreachable: ispec.Manifest should always be marshable") + } + + return digestAlgorithm.FromBytes(blob) +} + func (img *Image) Size() int { size := img.ConfigDescriptor.Size + img.ManifestDescriptor.Size @@ -167,7 +183,15 @@ type Layer struct { // specifying the layers of the image. func CreateImageWith() LayerBuilder { // set default values here - return &BaseImageBuilder{} + return &BaseImageBuilder{ + digestAlgorithm: godigest.Canonical, + } +} + +func CreateImageWithDigestAlgorithm(digestAlgorithm godigest.Algorithm) LayerBuilder { + return &BaseImageBuilder{ + digestAlgorithm: digestAlgorithm, + } } func CreateDefaultImage() Image { @@ -223,6 +247,8 @@ type BaseImageBuilder struct { annotations map[string]string subject *ispec.Descriptor artifactType string + + digestAlgorithm godigest.Algorithm } func (ib *BaseImageBuilder) Layers(layers []Layer) ConfigBuilder { @@ -236,7 +262,7 @@ func (ib *BaseImageBuilder) LayerBlobs(layers [][]byte) ConfigBuilder { ib.layers = append(ib.layers, Layer{ Blob: layer, MediaType: ispec.MediaTypeImageLayerGzip, - Digest: godigest.FromBytes(layer), + Digest: ib.digestAlgorithm.FromBytes(layer), }) } @@ -267,7 +293,7 @@ func (ib *BaseImageBuilder) RandomLayers(count, size int) ConfigBuilder { ib.layers = append(ib.layers, Layer{ Blob: layer, MediaType: ispec.MediaTypeImageLayerGzip, - Digest: godigest.FromBytes(layer), + Digest: ib.digestAlgorithm.FromBytes(layer), }) } @@ -290,7 +316,7 @@ func (ib *BaseImageBuilder) VulnerableLayers() VulnerableConfigBuilder { { Blob: layer, MediaType: ispec.MediaTypeImageLayerGzip, - Digest: godigest.FromBytes(layer), + Digest: ib.digestAlgorithm.FromBytes(layer), }, } @@ -309,7 +335,7 @@ func (ib *BaseImageBuilder) ImageConfig(config ispec.Image) ManifestBuilder { MediaType: ispec.MediaTypeImageConfig, Size: int64(len(configBlob)), Data: configBlob, - Digest: godigest.FromBytes(configBlob), + Digest: ib.digestAlgorithm.FromBytes(configBlob), } return ib @@ -351,7 +377,7 @@ func (ib *BaseImageBuilder) CustomConfigBlob(configBlob []byte, mediaType string MediaType: mediaType, Size: int64(len(configBlob)), Data: configBlob, - Digest: godigest.FromBytes(configBlob), + Digest: ib.digestAlgorithm.FromBytes(configBlob), } return ib @@ -372,7 +398,7 @@ func (ib *BaseImageBuilder) RandomConfig() ManifestBuilder { ib.configDescriptor = ispec.Descriptor{ MediaType: ispec.MediaTypeImageConfig, - Digest: godigest.FromBytes(configBlob), + Digest: ib.digestAlgorithm.FromBytes(configBlob), Size: int64(len(configBlob)), Data: configBlob, } @@ -390,7 +416,7 @@ func (ib *BaseImageBuilder) DefaultVulnConfig() ManifestBuilder { vulnConfigDescriptor := ispec.Descriptor{ MediaType: ispec.MediaTypeImageConfig, - Digest: godigest.FromBytes(configBlob), + Digest: ib.digestAlgorithm.FromBytes(configBlob), Size: int64(len(configBlob)), Data: configBlob, } @@ -421,7 +447,7 @@ func (ib *BaseImageBuilder) VulnerableConfig(config ispec.Image) ManifestBuilder vulnConfigDescriptor := ispec.Descriptor{ MediaType: ispec.MediaTypeImageConfig, - Digest: godigest.FromBytes(configBlob), + Digest: ib.digestAlgorithm.FromBytes(configBlob), Size: int64(len(configBlob)), Data: configBlob, } @@ -446,7 +472,7 @@ func (ib *BaseImageBuilder) RandomVulnConfig() ManifestBuilder { vulnConfigDescriptor := ispec.Descriptor{ MediaType: ispec.MediaTypeImageConfig, - Digest: godigest.FromBytes(configBlob), + Digest: ib.digestAlgorithm.FromBytes(configBlob), Size: int64(len(configBlob)), Data: configBlob, } @@ -493,6 +519,7 @@ func (ib *BaseImageBuilder) Build() Image { Subject: ib.subject, Annotations: ib.annotations, }, + digestAlgorithm: ib.digestAlgorithm, } manifestBlob, err := json.Marshal(img.Manifest) @@ -502,7 +529,7 @@ func (ib *BaseImageBuilder) Build() Image { img.ManifestDescriptor = ispec.Descriptor{ MediaType: ispec.MediaTypeImageManifest, - Digest: godigest.FromBytes(manifestBlob), + Digest: ib.digestAlgorithm.FromBytes(manifestBlob), Size: int64(len(manifestBlob)), Data: manifestBlob, } diff --git a/pkg/test/image-utils/multiarch.go b/pkg/test/image-utils/multiarch.go index e01e3d6c65..bddb882c76 100644 --- a/pkg/test/image-utils/multiarch.go +++ b/pkg/test/image-utils/multiarch.go @@ -11,8 +11,9 @@ import ( ) type MultiarchImage struct { - Index ispec.Index - Images []Image + Index ispec.Index + Images []Image + digestAlgorithm godigest.Algorithm IndexDescriptor ispec.Descriptor } @@ -23,13 +24,28 @@ func (mi *MultiarchImage) Digest() godigest.Digest { panic("unreachable: ispec.Index should always be marshable") } - return godigest.FromBytes(indexBlob) + digestAlgorithm := mi.digestAlgorithm + + if digestAlgorithm == "" { + digestAlgorithm = godigest.Canonical + } + + return digestAlgorithm.FromBytes(indexBlob) } func (mi *MultiarchImage) DigestStr() string { return mi.Digest().String() } +func (mi *MultiarchImage) DigestForAlgorithm(digestAlgorithm godigest.Algorithm) godigest.Digest { + blob, err := json.Marshal(mi.Index) + if err != nil { + panic("unreachable: ispec.Index should always be marshable") + } + + return digestAlgorithm.FromBytes(blob) +} + func (mi MultiarchImage) AsImageMeta() mTypes.ImageMeta { index := mi.Index @@ -61,7 +77,15 @@ type MultiarchBuilder interface { } func CreateMultiarchWith() ImagesBuilder { - return &BaseMultiarchBuilder{} + return &BaseMultiarchBuilder{ + digestAlgorithm: godigest.Canonical, + } +} + +func CreateMultiarchWithDigestAlgorithm(digestAlgorithm godigest.Algorithm) ImagesBuilder { + return &BaseMultiarchBuilder{ + digestAlgorithm: digestAlgorithm, + } } func CreateRandomMultiarch() MultiarchImage { @@ -85,10 +109,11 @@ func CreateVulnerableMultiarch() MultiarchImage { } type BaseMultiarchBuilder struct { - images []Image - subject *ispec.Descriptor - artifactType string - annotations map[string]string + images []Image + subject *ispec.Descriptor + artifactType string + annotations map[string]string + digestAlgorithm godigest.Algorithm } func (mb *BaseMultiarchBuilder) Images(images []Image) MultiarchBuilder { @@ -154,7 +179,7 @@ func (mb *BaseMultiarchBuilder) Build() MultiarchImage { panic("unreachable: ispec.Index should always be marshable") } - indexDigest := godigest.FromBytes(indexBlob) + indexDigest := mb.digestAlgorithm.FromBytes(indexBlob) return MultiarchImage{ Index: index, diff --git a/pkg/test/image-utils/upload.go b/pkg/test/image-utils/upload.go index 783eeddfee..dcf1a7d626 100644 --- a/pkg/test/image-utils/upload.go +++ b/pkg/test/image-utils/upload.go @@ -21,6 +21,12 @@ var ( ) func UploadImage(img Image, baseURL, repo, ref string) error { + digestAlgorithm := img.digestAlgorithm + + if digestAlgorithm == "" { + digestAlgorithm = godigest.Canonical + } + for _, blob := range img.Layers { resp, err := resty.R().Post(baseURL + "/v2/" + repo + "/blobs/uploads/") if err != nil { @@ -33,7 +39,7 @@ func UploadImage(img Image, baseURL, repo, ref string) error { loc := resp.Header().Get("Location") - digest := godigest.FromBytes(blob).String() + digest := digestAlgorithm.FromBytes(blob).String() resp, err = resty.R(). SetHeader("Content-Length", fmt.Sprintf("%d", len(blob))). @@ -63,7 +69,7 @@ func UploadImage(img Image, baseURL, repo, ref string) error { } } - cdigest := godigest.FromBytes(cblob) + cdigest := digestAlgorithm.FromBytes(cblob) if img.Manifest.Config.MediaType == ispec.MediaTypeEmptyJSON || img.Manifest.Config.Digest == ispec.DescriptorEmptyJSON.Digest { @@ -117,14 +123,16 @@ func UploadImage(img Image, baseURL, repo, ref string) error { return ErrPutBlob } - if inject.ErrStatusCode(resp.StatusCode()) != http.StatusCreated { - return ErrPutBlob - } - return err } func UploadImageWithBasicAuth(img Image, baseURL, repo, ref, user, password string) error { + digestAlgorithm := img.digestAlgorithm + + if digestAlgorithm == "" { + digestAlgorithm = godigest.Canonical + } + for _, blob := range img.Layers { resp, err := resty.R(). SetBasicAuth(user, password). @@ -139,7 +147,7 @@ func UploadImageWithBasicAuth(img Image, baseURL, repo, ref, user, password stri loc := resp.Header().Get("Location") - digest := godigest.FromBytes(blob).String() + digest := digestAlgorithm.FromBytes(blob).String() resp, err = resty.R(). SetBasicAuth(user, password). @@ -163,7 +171,7 @@ func UploadImageWithBasicAuth(img Image, baseURL, repo, ref, user, password stri return err } - cdigest := godigest.FromBytes(cblob) + cdigest := digestAlgorithm.FromBytes(cblob) if img.Manifest.Config.MediaType == ispec.MediaTypeEmptyJSON { cblob = ispec.DescriptorEmptyJSON.Data diff --git a/pkg/test/image-utils/write.go b/pkg/test/image-utils/write.go index 9825c18fdd..2bf083e1f8 100644 --- a/pkg/test/image-utils/write.go +++ b/pkg/test/image-utils/write.go @@ -18,9 +18,15 @@ func WriteImageToFileSystem(image Image, repoName, ref string, storeController s return err } + digestAlgorithm := image.digestAlgorithm + + if digestAlgorithm == "" { + digestAlgorithm = godigest.Canonical + } + for _, layerBlob := range image.Layers { layerReader := bytes.NewReader(layerBlob) - layerDigest := godigest.FromBytes(layerBlob) + layerDigest := digestAlgorithm.FromBytes(layerBlob) _, _, err = store.FullBlobUpload(repoName, layerReader, layerDigest) if err != nil { @@ -34,7 +40,7 @@ func WriteImageToFileSystem(image Image, repoName, ref string, storeController s } configReader := bytes.NewReader(configBlob) - configDigest := godigest.FromBytes(configBlob) + configDigest := digestAlgorithm.FromBytes(configBlob) _, _, err = store.FullBlobUpload(repoName, configReader, configDigest) if err != nil { diff --git a/pkg/test/mocks/image_store_mock.go b/pkg/test/mocks/image_store_mock.go index a31220b18c..33e0afda5a 100644 --- a/pkg/test/mocks/image_store_mock.go +++ b/pkg/test/mocks/image_store_mock.go @@ -54,7 +54,7 @@ type MockedImageStore struct { RunDedupeForDigestFn func(ctx context.Context, digest godigest.Digest, dedupe bool, duplicateBlobs []string) error GetNextDigestWithBlobPathsFn func(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error) - GetAllBlobsFn func(repo string) ([]string, error) + GetAllBlobsFn func(repo string) ([]godigest.Digest, error) CleanupRepoFn func(repo string, blobs []godigest.Digest, removeRepo bool) (int, error) PutIndexContentFn func(repo string, index ispec.Index) error PopulateStorageMetricsFn func(interval time.Duration, sch *scheduler.Scheduler) @@ -167,12 +167,12 @@ func (is MockedImageStore) GetImageTags(name string) ([]string, error) { return []string{}, nil } -func (is MockedImageStore) GetAllBlobs(repo string) ([]string, error) { +func (is MockedImageStore) GetAllBlobs(repo string) ([]godigest.Digest, error) { if is.GetAllBlobsFn != nil { return is.GetAllBlobsFn(repo) } - return []string{}, nil + return []godigest.Digest{}, nil } func (is MockedImageStore) DeleteImageManifest(name string, reference string, detectCollision bool) error {