Skip to content

Commit

Permalink
Merge pull request distribution#556 from stevvooe/remove-uuid-dependency
Browse files Browse the repository at this point in the history
Replace uuid dependency with internal library
  • Loading branch information
stevvooe committed May 26, 2015
2 parents 6019605 + f8c0086 commit 5f553b3
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 14 deletions.
8 changes: 4 additions & 4 deletions docs/client/repository_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
"testing"
"time"

"code.google.com/p/go-uuid/uuid"
"github.com/docker/distribution/uuid"

"github.com/docker/distribution"
"github.com/docker/distribution/context"
Expand Down Expand Up @@ -141,7 +141,7 @@ func TestBlobUploadChunked(t *testing.T) {
b1[513:1024],
}
repo := "test.example.com/uploadrepo"
uuids := []string{uuid.New()}
uuids := []string{uuid.Generate().String()}
m = append(m, testutil.RequestResponseMapping{
Request: testutil.Request{
Method: "POST",
Expand All @@ -159,7 +159,7 @@ func TestBlobUploadChunked(t *testing.T) {
})
offset := 0
for i, chunk := range chunks {
uuids = append(uuids, uuid.New())
uuids = append(uuids, uuid.Generate().String())
newOffset := offset + len(chunk)
m = append(m, testutil.RequestResponseMapping{
Request: testutil.Request{
Expand Down Expand Up @@ -256,7 +256,7 @@ func TestBlobUploadMonolithic(t *testing.T) {
dgst, b1 := newRandomBlob(1024)
var m testutil.RequestResponseMap
repo := "test.example.com/uploadrepo"
uploadID := uuid.New()
uploadID := uuid.Generate().String()
m = append(m, testutil.RequestResponseMapping{
Request: testutil.Request{
Method: "POST",
Expand Down
4 changes: 2 additions & 2 deletions docs/storage/linkedblobstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ import (
"net/http"
"time"

"code.google.com/p/go-uuid/uuid"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/uuid"
)

// linkedBlobStore provides a full BlobService that namespaces the blobs to a
Expand Down Expand Up @@ -85,7 +85,7 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte)
func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) {
context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer")

uuid := uuid.New()
uuid := uuid.Generate().String()
startedAt := time.Now().UTC()

path, err := lbs.blobStore.pm.path(uploadDataPathSpec{
Expand Down
6 changes: 3 additions & 3 deletions docs/storage/purgeuploads.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ import (
"strings"
"time"

"code.google.com/p/go-uuid/uuid"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storageDriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/uuid"
)

// uploadData stored the location of temporary files created during a layer upload
Expand Down Expand Up @@ -116,8 +116,8 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv
func uUIDFromPath(path string) (string, bool) {
components := strings.Split(path, "/")
for i := len(components) - 1; i >= 0; i-- {
if uuid := uuid.Parse(components[i]); uuid != nil {
return uuid.String(), i == len(components)-1
if u, err := uuid.Parse(components[i]); err == nil {
return u.String(), i == len(components)-1
}
}
return "", false
Expand Down
10 changes: 5 additions & 5 deletions docs/storage/purgeuploads_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ import (
"testing"
"time"

"code.google.com/p/go-uuid/uuid"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/uuid"
)

var pm = defaultPathMapper
Expand All @@ -18,7 +18,7 @@ func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.
d := inmemory.New()
ctx := context.Background()
for i := 0; i < numUploads; i++ {
addUploads(ctx, t, d, uuid.New(), repoName, startedAt)
addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt)
}
return d, ctx
}
Expand Down Expand Up @@ -73,7 +73,7 @@ func TestPurgeAll(t *testing.T) {
fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo)

// Ensure > 1 repos are purged
addUploads(ctx, t, fs, uuid.New(), "test-repo2", oneHourAgo)
addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo)
uploadCount++

deleted, errs := PurgeUploads(ctx, fs, time.Now(), true)
Expand All @@ -95,7 +95,7 @@ func TestPurgeSome(t *testing.T) {
newUploadCount := 4

for i := 0; i < newUploadCount; i++ {
addUploads(ctx, t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour))
addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour))
}

deleted, errs := PurgeUploads(ctx, fs, time.Now(), true)
Expand All @@ -115,7 +115,7 @@ func TestPurgeOnlyUploads(t *testing.T) {

// Create a directory tree outside _uploads and ensure
// these files aren't deleted.
dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.New()})
dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()})
if err != nil {
t.Fatalf(err.Error())
}
Expand Down

0 comments on commit 5f553b3

Please sign in to comment.