Skip to content

Commit

Permalink
fix(digests): do not mandate sha256 as the only algorithm used for ha…
Browse files Browse the repository at this point in the history
…shing blobs

Signed-off-by: Andrei Aaron <aaaron@luxoft.com>
  • Loading branch information
andaaron committed Nov 22, 2023
1 parent 83f287d commit c4c6abd
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 39 deletions.
11 changes: 5 additions & 6 deletions pkg/storage/gc/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -571,20 +571,19 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio

gcBlobs := make([]godigest.Digest, 0)

for _, blob := range allBlobs {
digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob)
for _, digest := range allBlobs {
if err = digest.Validate(); err != nil {
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
Msg("unable to parse digest")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", digest.String()).Msg("unable to parse digest")

return err
}

if _, ok := refBlobs[digest.String()]; !ok {
canGC, err := isBlobOlderThan(gc.imgStore, repo, digest, delay, log)
if err != nil {
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
Msg("unable to determine GC delay")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", digest.String()).Msg("unable to determine GC delay")

return err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/storage/gc/gc_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -404,8 +404,8 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
GetIndexContentFn: func(repo string) ([]byte, error) {
return returnedIndexJSONBuf, nil
},
GetAllBlobsFn: func(repo string) ([]string, error) {
return []string{}, errGC
GetAllBlobsFn: func(repo string) ([]godigest.Digest, error) {
return []godigest.Digest{}, errGC
},
}

Expand Down
82 changes: 55 additions & 27 deletions pkg/storage/imagestore/imagestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package imagestore

import (
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
Expand Down Expand Up @@ -141,7 +140,7 @@ func (is *ImageStore) initRepo(name string) error {
}

// create "blobs" subdir
err := is.storeDriver.EnsureDir(path.Join(repoDir, "blobs"))
err := is.storeDriver.EnsureDir(path.Join(repoDir, ispec.ImageBlobsDir))
if err != nil {
is.log.Error().Err(err).Msg("error creating blobs subdir")

Expand Down Expand Up @@ -250,7 +249,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) {
return false, err
}

if filename == "blobs" && !fileInfo.IsDir() {
if filename == ispec.ImageBlobsDir && !fileInfo.IsDir() {
return false, nil
}

Expand All @@ -259,7 +258,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) {

// check blobs dir exists only for filesystem, in s3 we can't have empty dirs
if is.storeDriver.Name() == storageConstants.LocalStorageDriverName {
if !is.storeDriver.DirExists(path.Join(dir, "blobs")) {
if !is.storeDriver.DirExists(path.Join(dir, ispec.ImageBlobsDir)) {
return false, nil
}
}
Expand Down Expand Up @@ -576,7 +575,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
}

// write manifest to "blobs"
dir := path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, mDigest.Algorithm().String())
manifestPath := path.Join(dir, mDigest.Encoded())

if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil {
Expand Down Expand Up @@ -695,7 +694,8 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio
}

if toDelete {
p := path.Join(dir, "blobs", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())
p := path.Join(dir, ispec.ImageBlobsDir, manifestDesc.Digest.Algorithm().String(),
manifestDesc.Digest.Encoded())

err = is.storeDriver.Delete(p)
if err != nil {
Expand Down Expand Up @@ -895,11 +895,11 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
return zerr.ErrBadBlobDigest
}

dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, dstDigest.Algorithm().String())

err = is.storeDriver.EnsureDir(dir)
if err != nil {
is.log.Error().Err(err).Msg("error creating blobs/sha256 dir")
is.log.Error().Str("directory", dir).Err(err).Msg("error creating blobs directory")

return err
}
Expand Down Expand Up @@ -948,7 +948,16 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi

uuid := u.String()
src := is.BlobUploadPath(repo, uuid)
digester := sha256.New()

dstDigestAlgorithm := dstDigest.Algorithm()
if !dstDigestAlgorithm.Available() {
is.log.Error().Str("dstDigest", dstDigest.String()).
Msg("expected digest algorithm is not supported")

return "", -1, zerr.ErrBadBlobDigest
}

digester := dstDigestAlgorithm.Hash()
buf := new(bytes.Buffer)

_, err = buf.ReadFrom(body)
Expand All @@ -972,15 +981,15 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
return "", -1, err
}

srcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf("%x", digester.Sum(nil)))
srcDigest := godigest.NewDigestFromEncoded(dstDigestAlgorithm, fmt.Sprintf("%x", digester.Sum(nil)))
if srcDigest != dstDigest {
is.log.Error().Str("srcDigest", srcDigest.String()).
Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")

return "", -1, zerr.ErrBadBlobDigest
}

dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, dstDigestAlgorithm.String())
_ = is.storeDriver.EnsureDir(dir)

var lockLatency time.Time
Expand Down Expand Up @@ -1128,7 +1137,7 @@ func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error {

// BlobPath returns the repository path of a blob.
func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string {
return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded())
return path.Join(is.rootDir, repo, ispec.ImageBlobsDir, digest.Algorithm().String(), digest.Encoded())
}

/*
Expand Down Expand Up @@ -1704,24 +1713,37 @@ func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) {
return digest, nil
}

func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
dir := path.Join(is.rootDir, repo, "blobs", "sha256")
func (is *ImageStore) GetAllBlobs(repo string) ([]godigest.Digest, error) {
blobsDir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir)

files, err := is.storeDriver.List(dir)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
is.log.Debug().Msg("empty rootDir")
algorithms := []godigest.Algorithm{
godigest.SHA256,
godigest.SHA384,
godigest.SHA512,
}

return []string{}, nil
ret := []godigest.Digest{}

for _, algorithm := range algorithms {
dir := path.Join(blobsDir, algorithm.String())

files, err := is.storeDriver.List(dir)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
continue
}

return []godigest.Digest{}, err
}

return []string{}, err
for _, file := range files {
digest := godigest.NewDigestFromEncoded(algorithm, filepath.Base(file))
ret = append(ret, digest)
}
}

ret := []string{}

for _, file := range files {
ret = append(ret, filepath.Base(file))
if len(ret) == 0 {
is.log.Debug().Str("directory", blobsDir).Msg("empty blobs directory")
}

return ret, nil
Expand Down Expand Up @@ -1750,13 +1772,19 @@ func (is *ImageStore) GetNextDigestWithBlobPaths(repos []string, lastDigests []g
if fileInfo.IsDir() {
// skip repositories not found in repos
repo := path.Base(fileInfo.Path())

if !zcommon.Contains(repos, repo) && repo != "blobs" && repo != "sha256" {
if !zcommon.Contains(repos, repo) &&
repo != ispec.ImageBlobsDir &&
repo != godigest.SHA256.String() &&
repo != godigest.SHA384.String() &&
repo != godigest.SHA512.String() {
return driver.ErrSkipDir
}
}

blobDigest := godigest.NewDigestFromEncoded("sha256", path.Base(fileInfo.Path()))
digestHash := path.Base(fileInfo.Path())
digestAlgorithm := godigest.Algorithm(path.Base(path.Dir(fileInfo.Path())))

blobDigest := godigest.NewDigestFromEncoded(digestAlgorithm, digestHash)
if err := blobDigest.Validate(); err != nil { //nolint: nilerr
return nil //nolint: nilerr // ignore files which are not blobs
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ type ImageStore interface { //nolint:interfacebloat
RunDedupeBlobs(interval time.Duration, sch *scheduler.Scheduler)
RunDedupeForDigest(digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
GetNextDigestWithBlobPaths(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error)
GetAllBlobs(repo string) ([]string, error)
GetAllBlobs(repo string) ([]godigest.Digest, error)
PopulateStorageMetrics(interval time.Duration, sch *scheduler.Scheduler)
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/test/mocks/image_store_mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ type MockedImageStore struct {
RunDedupeBlobsFn func(interval time.Duration, sch *scheduler.Scheduler)
RunDedupeForDigestFn func(digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
GetNextDigestWithBlobPathsFn func(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error)
GetAllBlobsFn func(repo string) ([]string, error)
GetAllBlobsFn func(repo string) ([]godigest.Digest, error)
CleanupRepoFn func(repo string, blobs []godigest.Digest, removeRepo bool) (int, error)
PutIndexContentFn func(repo string, index ispec.Index) error
PopulateStorageMetricsFn func(interval time.Duration, sch *scheduler.Scheduler)
Expand Down Expand Up @@ -164,12 +164,12 @@ func (is MockedImageStore) GetImageTags(name string) ([]string, error) {
return []string{}, nil
}

func (is MockedImageStore) GetAllBlobs(repo string) ([]string, error) {
func (is MockedImageStore) GetAllBlobs(repo string) ([]godigest.Digest, error) {
if is.GetAllBlobsFn != nil {
return is.GetAllBlobsFn(repo)
}

return []string{}, nil
return []godigest.Digest{}, nil
}

func (is MockedImageStore) DeleteImageManifest(name string, reference string, detectCollision bool) error {
Expand Down

0 comments on commit c4c6abd

Please sign in to comment.