From 1ff0f8c416206fbd7f9bf80bda0c3a7c516a6fa1 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Tue, 28 Jun 2016 22:48:06 -0700 Subject: [PATCH] unify KeyMemoryStore and KeyFileStore in trustmanager Signed-off-by: David Lawrence (github: endophage) --- .gitignore | 2 + client/backwards_compatibility_test.go | 4 +- client/client.go | 20 +- client/client_test.go | 12 +- client/client_update_test.go | 32 +- client/delegations.go | 2 +- client/helpers.go | 4 +- cmd/notary/delegations.go | 3 +- cmd/notary/delegations_test.go | 14 +- cmd/notary/integration_test.go | 22 +- cmd/notary/keys_test.go | 15 +- cmd/notary/prettyprint_test.go | 7 +- cryptoservice/certificate.go | 4 +- cryptoservice/certificate_test.go | 3 +- cryptoservice/crypto_service.go | 7 +- cryptoservice/crypto_service_test.go | 11 +- server/handlers/default_test.go | 2 +- server/integration_test.go | 4 +- server/server_test.go | 14 +- signer/client/signer_trust_test.go | 3 +- signer/keydbstore/keydbstore_test.go | 13 +- {tuf/store => storage}/errors.go | 13 +- storage/filestore.go | 205 ++++++++ {trustmanager => storage}/filestore_test.go | 206 ++++++-- {tuf/store => storage}/httpstore.go | 23 +- {tuf/store => storage}/httpstore_test.go | 127 +---- storage/interface.go | 8 - {tuf/store => storage}/interfaces.go | 22 +- {tuf/store => storage}/memorystore.go | 75 +-- {tuf/store => storage}/memorystore_test.go | 24 +- {tuf/store => storage}/offlinestore.go | 32 +- {tuf/store => storage}/offlinestore_test.go | 12 +- {tuf/store => storage}/store_test.go | 20 +- trustmanager/filestore.go | 150 ------ trustmanager/interfaces.go | 86 ++++ trustmanager/keyfilestore.go | 476 ------------------ trustmanager/keystore.go | 373 ++++++++++++-- ...{keyfilestore_test.go => keystore_test.go} | 29 +- trustmanager/memorystore.go | 54 -- trustmanager/store.go | 42 -- trustmanager/yubikey/yubikeystore.go | 3 +- trustmanager/yubikey/yubikeystore_test.go | 7 +- trustpinning/certs.go | 12 +- trustpinning/certs_test.go | 17 +- trustpinning/trustpin.go | 7 +- tuf/client/client.go | 14 +- tuf/signed/ed25519.go | 3 +- tuf/signed/sign_test.go | 7 +- tuf/signed/verifiers_test.go | 35 ++ tuf/store/filestore.go | 102 ---- tuf/store/filestore_test.go | 138 ----- tuf/testutils/corrupt_memorystore.go | 20 +- tuf/testutils/interfaces/cryptoservice.go | 14 +- tuf/testutils/repo.go | 5 +- tuf/testutils/swizzler.go | 60 +-- tuf/testutils/swizzler_test.go | 52 +- tuf/tuf_test.go | 11 +- tuf/utils/util.go | 109 ---- tuf/utils/util_test.go | 91 ---- tuf/utils/utils.go | 88 ++++ tuf/utils/utils_test.go | 82 +++ .../x509utils.go => tuf/utils/x509.go | 212 ++++---- .../utils/x509_test.go | 12 +- utils/configuration_test.go | 6 +- 64 files changed, 1496 insertions(+), 1786 deletions(-) rename {tuf/store => storage}/errors.go (59%) create mode 100644 storage/filestore.go rename {trustmanager => storage}/filestore_test.go (55%) rename {tuf/store => storage}/httpstore.go (92%) rename {tuf/store => storage}/httpstore_test.go (56%) delete mode 100644 storage/interface.go rename {tuf/store => storage}/interfaces.go (61%) rename {tuf/store => storage}/memorystore.go (53%) rename {tuf/store => storage}/memorystore_test.go (72%) rename {tuf/store => storage}/offlinestore.go (52%) rename {tuf/store => storage}/offlinestore_test.go (74%) rename {tuf/store => storage}/store_test.go (66%) delete mode 100644 trustmanager/filestore.go create mode 100644 trustmanager/interfaces.go delete mode 100644 trustmanager/keyfilestore.go rename trustmanager/{keyfilestore_test.go => keystore_test.go} (97%) delete mode 100644 trustmanager/memorystore.go delete mode 100644 trustmanager/store.go delete mode 100644 tuf/store/filestore.go delete mode 100644 tuf/store/filestore_test.go delete mode 100644 tuf/utils/util.go delete mode 100644 tuf/utils/util_test.go rename trustmanager/x509utils.go => tuf/utils/x509.go (97%) rename trustmanager/x509utils_test.go => tuf/utils/x509_test.go (94%) diff --git a/.gitignore b/.gitignore index 8439935e6e..7cf4a771e7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +/.vscode /cmd/notary-server/notary-server /cmd/notary-server/local.config.json /cmd/notary-signer/local.config.json @@ -8,4 +9,5 @@ cross *.swp .idea *.iml +*.test coverage.out diff --git a/client/backwards_compatibility_test.go b/client/backwards_compatibility_test.go index de10b1286c..08e65e4790 100644 --- a/client/backwards_compatibility_test.go +++ b/client/backwards_compatibility_test.go @@ -13,9 +13,9 @@ import ( "time" "github.com/docker/notary/passphrase" + store "github.com/docker/notary/storage" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/store" "github.com/stretchr/testify/require" ) @@ -101,7 +101,7 @@ func Test0Dot1RepoFormat(t *testing.T) { // delete the timestamp metadata, since the server will ignore the uploaded // one and try to create a new one from scratch, which will be the wrong version - require.NoError(t, repo.fileStore.RemoveMeta(data.CanonicalTimestampRole)) + require.NoError(t, repo.fileStore.Remove(data.CanonicalTimestampRole)) // rotate the timestamp key, since the server doesn't have that one err = repo.RotateKey(data.CanonicalTimestampRole, true) diff --git a/client/client.go b/client/client.go index eababee600..4b1550212c 100644 --- a/client/client.go +++ b/client/client.go @@ -16,13 +16,13 @@ import ( "github.com/docker/notary" "github.com/docker/notary/client/changelist" "github.com/docker/notary/cryptoservice" + store "github.com/docker/notary/storage" "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf" tufclient "github.com/docker/notary/tuf/client" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/utils" ) @@ -159,7 +159,7 @@ func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) { return nil, err } - x509PublicKey := trustmanager.CertToKey(cert) + x509PublicKey := utils.CertToKey(cert) if x509PublicKey == nil { return nil, fmt.Errorf( "cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm) @@ -668,7 +668,7 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error { return err } - return remote.SetMultiMeta(updatedFiles) + return remote.SetMulti(updatedFiles) } // bootstrapRepo loads the repository from the local file system (i.e. @@ -682,7 +682,7 @@ func (r *NotaryRepository) bootstrapRepo() error { logrus.Debugf("Loading trusted collection.") for _, role := range data.BaseRoles { - jsonBytes, err := r.fileStore.GetMeta(role, store.NoSizeLimit) + jsonBytes, err := r.fileStore.GetSized(role, store.NoSizeLimit) if err != nil { if _, ok := err.(store.ErrMetaNotFound); ok && // server snapshots are supported, and server timestamp management @@ -714,7 +714,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { if err != nil { return err } - err = r.fileStore.SetMeta(data.CanonicalRootRole, rootJSON) + err = r.fileStore.Set(data.CanonicalRootRole, rootJSON) if err != nil { return err } @@ -735,7 +735,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { for role, blob := range targetsToSave { parentDir := filepath.Dir(role) os.MkdirAll(parentDir, 0755) - r.fileStore.SetMeta(role, blob) + r.fileStore.Set(role, blob) } if ignoreSnapshot { @@ -747,7 +747,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { return err } - return r.fileStore.SetMeta(data.CanonicalSnapshotRole, snapshotJSON) + return r.fileStore.Set(data.CanonicalSnapshotRole, snapshotJSON) } // returns a properly constructed ErrRepositoryNotExist error based on this @@ -817,7 +817,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl // during update which will cause us to download a new root and perform a rotation. // If we have an old root, and it's valid, then we overwrite the newBuilder to be one // preloaded with the old root or one which uses the old root for trust bootstrapping. - if rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, store.NoSizeLimit); err == nil { + if rootJSON, err := r.fileStore.GetSized(data.CanonicalRootRole, store.NoSizeLimit); err == nil { // if we can't load the cached root, fail hard because that is how we pin trust if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil { return nil, err @@ -844,7 +844,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl // if remote store successfully set up, try and get root from remote // We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB) - tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, store.NoSizeLimit) + tmpJSON, err := remote.GetSized(data.CanonicalRootRole, store.NoSizeLimit) if err != nil { // we didn't have a root in cache and were unable to load one from // the server. Nothing we can do but error. @@ -857,7 +857,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl return nil, err } - err = r.fileStore.SetMeta(data.CanonicalRootRole, tmpJSON) + err = r.fileStore.Set(data.CanonicalRootRole, tmpJSON) if err != nil { // if we can't write cache we should still continue, just log error logrus.Errorf("could not save root to cache: %s", err.Error()) diff --git a/client/client_test.go b/client/client_test.go index 6daffaafa2..001a82c8b2 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -29,11 +29,11 @@ import ( "github.com/docker/notary/passphrase" "github.com/docker/notary/server" "github.com/docker/notary/server/storage" + store "github.com/docker/notary/storage" "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/utils" "github.com/docker/notary/tuf/validation" ) @@ -102,7 +102,7 @@ func simpleTestServer(t *testing.T, roles ...string) ( mux := http.NewServeMux() for _, role := range roles { - key, err := trustmanager.GenerateECDSAKey(rand.Reader) + key, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) keys[role] = key @@ -1514,7 +1514,7 @@ func testValidateRootKey(t *testing.T, rootType string) { for _, keyid := range keyids { key, ok := decodedRoot.Keys[keyid] require.True(t, ok, "key id not found in keys") - _, err := trustmanager.LoadCertFromPEM(key.Public()) + _, err := utils.LoadCertFromPEM(key.Public()) require.NoError(t, err, "key is not a valid cert") } } @@ -1932,7 +1932,7 @@ func testPublishBadMetadata(t *testing.T, roleName string, repo *NotaryRepositor addTarget(t, repo, "v1", "../fixtures/intermediate-ca.crt") // readable, but corrupt file - repo.fileStore.SetMeta(roleName, []byte("this isn't JSON")) + repo.fileStore.Set(roleName, []byte("this isn't JSON")) err := repo.Publish() if succeeds { require.NoError(t, err) @@ -2029,7 +2029,7 @@ func createKey(t *testing.T, repo *NotaryRepository, role string, x509 bool) dat privKey, role, start, start.AddDate(1, 0, 0), ) require.NoError(t, err) - return data.NewECDSAx509PublicKey(trustmanager.CertToPEM(cert)) + return data.NewECDSAx509PublicKey(utils.CertToPEM(cert)) } return key } @@ -2145,7 +2145,7 @@ func testPublishTargetsDelegationScopeFailIfNoKeys(t *testing.T, clearCache bool // generate a key that isn't in the cryptoservice, so we can't sign this // one - aPrivKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + aPrivKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "error generating key that is not in our cryptoservice") aPubKey := data.PublicKeyFromPrivate(aPrivKey) diff --git a/client/client_update_test.go b/client/client_update_test.go index ef9c3c0edd..94c2d96f47 100644 --- a/client/client_update_test.go +++ b/client/client_update_test.go @@ -17,10 +17,10 @@ import ( "github.com/docker/go/canonical/json" "github.com/docker/notary" "github.com/docker/notary/passphrase" + store "github.com/docker/notary/storage" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/testutils" "github.com/gorilla/mux" "github.com/stretchr/testify/require" @@ -66,7 +66,7 @@ func readOnlyServer(t *testing.T, cache store.MetadataStore, notFoundStatus int, m := mux.NewRouter() handler := func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) - metaBytes, err := cache.GetMeta(vars["role"], store.NoSizeLimit) + metaBytes, err := cache.GetSized(vars["role"], store.NoSizeLimit) if _, ok := err.(store.ErrMetaNotFound); ok { w.WriteHeader(notFoundStatus) } else { @@ -84,11 +84,11 @@ type unwritableStore struct { roleToNotWrite string } -func (u *unwritableStore) SetMeta(role string, serverMeta []byte) error { +func (u *unwritableStore) Set(role string, serverMeta []byte) error { if role == u.roleToNotWrite { return fmt.Errorf("Non-writable") } - return u.MetadataStore.SetMeta(role, serverMeta) + return u.MetadataStore.Set(role, serverMeta) } // Update can succeed even if we cannot write any metadata to the repo (assuming @@ -111,7 +111,7 @@ func TestUpdateSucceedsEvenIfCannotWriteNewRepo(t *testing.T) { require.NoError(t, err) for r, expected := range serverMeta { - actual, err := repo.fileStore.GetMeta(r, store.NoSizeLimit) + actual, err := repo.fileStore.GetSized(r, store.NoSizeLimit) if r == role { require.Error(t, err) require.IsType(t, store.ErrMetaNotFound{}, err, @@ -158,7 +158,7 @@ func TestUpdateSucceedsEvenIfCannotWriteExistingRepo(t *testing.T) { require.NoError(t, err) for r, expected := range serverMeta { - actual, err := repo.fileStore.GetMeta(r, store.NoSizeLimit) + actual, err := repo.fileStore.GetSized(r, store.NoSizeLimit) require.NoError(t, err, "problem getting repo metadata for %s", r) if role == r { require.False(t, bytes.Equal(expected, actual), @@ -244,12 +244,12 @@ func TestUpdateReplacesCorruptOrMissingMetadata(t *testing.T) { require.Error(t, err, "%s for %s: expected to error when bootstrapping root", text, role) // revert our original metadata for role := range origMeta { - require.NoError(t, repo.fileStore.SetMeta(role, origMeta[role])) + require.NoError(t, repo.fileStore.Set(role, origMeta[role])) } } else { require.NoError(t, err) for r, expected := range serverMeta { - actual, err := repo.fileStore.GetMeta(r, store.NoSizeLimit) + actual, err := repo.fileStore.GetSized(r, store.NoSizeLimit) require.NoError(t, err, "problem getting repo metadata for %s", role) require.True(t, bytes.Equal(expected, actual), "%s for %s: expected to recover after update", text, role) @@ -298,7 +298,7 @@ func TestUpdateFailsIfServerRootKeyChangedWithoutMultiSign(t *testing.T) { text, messItUp := expt.desc, expt.swizzle for _, forWrite := range []bool{true, false} { require.NoError(t, messItUp(repoSwizzler, data.CanonicalRootRole), "could not fuzz root (%s)", text) - messedUpMeta, err := repo.fileStore.GetMeta(data.CanonicalRootRole, store.NoSizeLimit) + messedUpMeta, err := repo.fileStore.GetSized(data.CanonicalRootRole, store.NoSizeLimit) if _, ok := err.(store.ErrMetaNotFound); ok { // one of the ways to mess up is to delete metadata @@ -307,7 +307,7 @@ func TestUpdateFailsIfServerRootKeyChangedWithoutMultiSign(t *testing.T) { require.NoError(t, err) // revert our original metadata for role := range origMeta { - require.NoError(t, repo.fileStore.SetMeta(role, origMeta[role])) + require.NoError(t, repo.fileStore.Set(role, origMeta[role])) } } else { @@ -321,7 +321,7 @@ func TestUpdateFailsIfServerRootKeyChangedWithoutMultiSign(t *testing.T) { // same because it has failed to update. for role, expected := range origMeta { if role != data.CanonicalTimestampRole && role != data.CanonicalSnapshotRole { - actual, err := repo.fileStore.GetMeta(role, store.NoSizeLimit) + actual, err := repo.fileStore.GetSized(role, store.NoSizeLimit) require.NoError(t, err, "problem getting repo metadata for %s", role) if role == data.CanonicalRootRole { @@ -336,7 +336,7 @@ func TestUpdateFailsIfServerRootKeyChangedWithoutMultiSign(t *testing.T) { // revert our original root metadata require.NoError(t, - repo.fileStore.SetMeta(data.CanonicalRootRole, origMeta[data.CanonicalRootRole])) + repo.fileStore.Set(data.CanonicalRootRole, origMeta[data.CanonicalRootRole])) } } } @@ -967,7 +967,7 @@ func waysToMessUpServerNonRootPerRole(t *testing.T) map[string][]swizzleExpectat keyIDs = append(keyIDs, k) } // add the keys from root too - rootMeta, err := s.MetadataCache.GetMeta(data.CanonicalRootRole, store.NoSizeLimit) + rootMeta, err := s.MetadataCache.GetSized(data.CanonicalRootRole, store.NoSizeLimit) require.NoError(t, err) signedRoot := &data.SignedRoot{} @@ -1349,7 +1349,7 @@ func signSerializeAndUpdateRoot(t *testing.T, signedRoot data.SignedRoot, require.NoError(t, signed.Sign(serverSwizzler.CryptoService, signedObj, keys, len(keys), nil)) rootBytes, err := json.Marshal(signedObj) require.NoError(t, err) - require.NoError(t, serverSwizzler.MetadataCache.SetMeta(data.CanonicalRootRole, rootBytes)) + require.NoError(t, serverSwizzler.MetadataCache.Set(data.CanonicalRootRole, rootBytes)) // update the hashes on both snapshot and timestamp require.NoError(t, serverSwizzler.UpdateSnapshotHashes()) @@ -1374,7 +1374,7 @@ func TestValidateRootRotationWithOldRole(t *testing.T) { // --- key is saved, but doesn't matter at all for rotation if we're already on // --- the root metadata with the 3 keys) - rootBytes, err := serverSwizzler.MetadataCache.GetMeta(data.CanonicalRootRole, store.NoSizeLimit) + rootBytes, err := serverSwizzler.MetadataCache.GetSized(data.CanonicalRootRole, store.NoSizeLimit) require.NoError(t, err) signedRoot := data.SignedRoot{} require.NoError(t, json.Unmarshal(rootBytes, &signedRoot)) @@ -1626,7 +1626,7 @@ func TestRootOnDiskTrustPinning(t *testing.T) { defer os.RemoveAll(repo.baseDir) repo.trustPinning = restrictiveTrustPinning // put root on disk - require.NoError(t, repo.fileStore.SetMeta(data.CanonicalRootRole, meta[data.CanonicalRootRole])) + require.NoError(t, repo.fileStore.Set(data.CanonicalRootRole, meta[data.CanonicalRootRole])) require.NoError(t, repo.Update(false)) } diff --git a/client/delegations.go b/client/delegations.go index 5925466e2a..5fbee5af2b 100644 --- a/client/delegations.go +++ b/client/delegations.go @@ -8,8 +8,8 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/notary" "github.com/docker/notary/client/changelist" + store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/utils" ) diff --git a/client/helpers.go b/client/helpers.go index bcd3291caa..734fe61072 100644 --- a/client/helpers.go +++ b/client/helpers.go @@ -9,9 +9,9 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/notary/client/changelist" - tuf "github.com/docker/notary/tuf" + store "github.com/docker/notary/storage" + "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/utils" ) diff --git a/cmd/notary/delegations.go b/cmd/notary/delegations.go index a1ebcd8bb6..c3cbe8679e 100644 --- a/cmd/notary/delegations.go +++ b/cmd/notary/delegations.go @@ -7,7 +7,6 @@ import ( "github.com/docker/notary" notaryclient "github.com/docker/notary/client" - "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" "github.com/spf13/cobra" @@ -234,7 +233,7 @@ func (d *delegationCommander) delegationAdd(cmd *cobra.Command, args []string) e } // Parse PEM bytes into type PublicKey - pubKey, err := trustmanager.ParsePEMPublicKey(pubKeyBytes) + pubKey, err := utils.ParsePEMPublicKey(pubKeyBytes) if err != nil { return fmt.Errorf("unable to parse valid public key certificate from PEM file %s: %v", pubKeyPath, err) } diff --git a/cmd/notary/delegations_test.go b/cmd/notary/delegations_test.go index e80f5ca460..399d9dcfbd 100644 --- a/cmd/notary/delegations_test.go +++ b/cmd/notary/delegations_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/docker/notary/cryptoservice" - "github.com/docker/notary/trustmanager" + "github.com/docker/notary/tuf/utils" "github.com/spf13/viper" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestAddInvalidDelegationName(t *testing.T) { tempFile, err := ioutil.TempFile("/tmp", "pemfile") require.NoError(t, err) cert, _, err := generateValidTestCert() - _, err = tempFile.Write(trustmanager.CertToPEM(cert)) + _, err = tempFile.Write(utils.CertToPEM(cert)) require.NoError(t, err) tempFile.Close() defer os.Remove(tempFile.Name()) @@ -56,7 +56,7 @@ func TestAddInvalidDelegationCert(t *testing.T) { tempFile, err := ioutil.TempFile("/tmp", "pemfile") require.NoError(t, err) cert, _, err := generateExpiredTestCert() - _, err = tempFile.Write(trustmanager.CertToPEM(cert)) + _, err = tempFile.Write(utils.CertToPEM(cert)) require.NoError(t, err) tempFile.Close() defer os.Remove(tempFile.Name()) @@ -77,7 +77,7 @@ func TestAddInvalidShortPubkeyCert(t *testing.T) { tempFile, err := ioutil.TempFile("/tmp", "pemfile") require.NoError(t, err) cert, _, err := generateShortRSAKeyTestCert() - _, err = tempFile.Write(trustmanager.CertToPEM(cert)) + _, err = tempFile.Write(utils.CertToPEM(cert)) require.NoError(t, err) tempFile.Close() defer os.Remove(tempFile.Name()) @@ -142,7 +142,7 @@ func TestRemoveInvalidNumArgs(t *testing.T) { } func generateValidTestCert() (*x509.Certificate, string, error) { - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) if err != nil { return nil, "", err } @@ -157,7 +157,7 @@ func generateValidTestCert() (*x509.Certificate, string, error) { } func generateExpiredTestCert() (*x509.Certificate, string, error) { - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) if err != nil { return nil, "", err } @@ -174,7 +174,7 @@ func generateExpiredTestCert() (*x509.Certificate, string, error) { func generateShortRSAKeyTestCert() (*x509.Certificate, string, error) { // 1024 bits is too short - privKey, err := trustmanager.GenerateRSAKey(rand.Reader, 1024) + privKey, err := utils.GenerateRSAKey(rand.Reader, 1024) if err != nil { return nil, "", err } diff --git a/cmd/notary/integration_test.go b/cmd/notary/integration_test.go index e66d7536ac..3337865767 100644 --- a/cmd/notary/integration_test.go +++ b/cmd/notary/integration_test.go @@ -315,19 +315,19 @@ func TestClientDelegationsInteraction(t *testing.T) { tempFile, err := ioutil.TempFile("", "pemfile") require.NoError(t, err) - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) startTime := time.Now() endTime := startTime.AddDate(10, 0, 0) cert, err := cryptoservice.GenerateCertificate(privKey, "gun", startTime, endTime) require.NoError(t, err) - _, err = tempFile.Write(trustmanager.CertToPEM(cert)) + _, err = tempFile.Write(utils.CertToPEM(cert)) require.NoError(t, err) tempFile.Close() defer os.Remove(tempFile.Name()) rawPubBytes, _ := ioutil.ReadFile(tempFile.Name()) - parsedPubKey, _ := trustmanager.ParsePEMPublicKey(rawPubBytes) + parsedPubKey, _ := utils.ParsePEMPublicKey(rawPubBytes) keyID, err := utils.CanonicalKeyID(parsedPubKey) require.NoError(t, err) @@ -403,20 +403,20 @@ func TestClientDelegationsInteraction(t *testing.T) { tempFile2, err := ioutil.TempFile("", "pemfile2") require.NoError(t, err) - privKey, err = trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err = utils.GenerateECDSAKey(rand.Reader) startTime = time.Now() endTime = startTime.AddDate(10, 0, 0) cert, err = cryptoservice.GenerateCertificate(privKey, "gun", startTime, endTime) require.NoError(t, err) - _, err = tempFile2.Write(trustmanager.CertToPEM(cert)) + _, err = tempFile2.Write(utils.CertToPEM(cert)) require.NoError(t, err) require.NoError(t, err) tempFile2.Close() defer os.Remove(tempFile2.Name()) rawPubBytes2, _ := ioutil.ReadFile(tempFile2.Name()) - parsedPubKey2, _ := trustmanager.ParsePEMPublicKey(rawPubBytes2) + parsedPubKey2, _ := utils.ParsePEMPublicKey(rawPubBytes2) keyID2, err := utils.CanonicalKeyID(parsedPubKey2) require.NoError(t, err) @@ -686,24 +686,24 @@ func TestClientDelegationsPublishing(t *testing.T) { tempFile, err := ioutil.TempFile("", "pemfile") require.NoError(t, err) - privKey, err := trustmanager.GenerateRSAKey(rand.Reader, 2048) + privKey, err := utils.GenerateRSAKey(rand.Reader, 2048) require.NoError(t, err) - privKeyBytesNoRole, err := trustmanager.KeyToPEM(privKey, "") + privKeyBytesNoRole, err := utils.KeyToPEM(privKey, "") require.NoError(t, err) - privKeyBytesWithRole, err := trustmanager.KeyToPEM(privKey, "user") + privKeyBytesWithRole, err := utils.KeyToPEM(privKey, "user") require.NoError(t, err) startTime := time.Now() endTime := startTime.AddDate(10, 0, 0) cert, err := cryptoservice.GenerateCertificate(privKey, "gun", startTime, endTime) require.NoError(t, err) - _, err = tempFile.Write(trustmanager.CertToPEM(cert)) + _, err = tempFile.Write(utils.CertToPEM(cert)) require.NoError(t, err) tempFile.Close() defer os.Remove(tempFile.Name()) rawPubBytes, _ := ioutil.ReadFile(tempFile.Name()) - parsedPubKey, _ := trustmanager.ParsePEMPublicKey(rawPubBytes) + parsedPubKey, _ := utils.ParsePEMPublicKey(rawPubBytes) canonicalKeyID, err := utils.CanonicalKeyID(parsedPubKey) require.NoError(t, err) diff --git a/cmd/notary/keys_test.go b/cmd/notary/keys_test.go index 0d5768743f..89d51c5f58 100644 --- a/cmd/notary/keys_test.go +++ b/cmd/notary/keys_test.go @@ -24,6 +24,7 @@ import ( "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/stretchr/testify/require" @@ -50,7 +51,7 @@ func TestRemoveOneKeyAbort(t *testing.T) { nos := []string{"no", "NO", "AAAARGH", " N "} store := trustmanager.NewKeyMemoryStore(ret) - key, err := trustmanager.GenerateED25519Key(rand.Reader) + key, err := utils.GenerateED25519Key(rand.Reader) require.NoError(t, err) err = store.AddKey(trustmanager.KeyInfo{Role: data.CanonicalRootRole, Gun: ""}, key) require.NoError(t, err) @@ -82,7 +83,7 @@ func TestRemoveOneKeyConfirm(t *testing.T) { for _, yesAnswer := range yesses { store := trustmanager.NewKeyMemoryStore(ret) - key, err := trustmanager.GenerateED25519Key(rand.Reader) + key, err := utils.GenerateED25519Key(rand.Reader) require.NoError(t, err) err = store.AddKey(trustmanager.KeyInfo{Role: data.CanonicalRootRole, Gun: ""}, key) require.NoError(t, err) @@ -110,7 +111,7 @@ func TestRemoveMultikeysInvalidInput(t *testing.T) { setUp(t) in := bytes.NewBuffer([]byte("notanumber\n9999\n-3\n0")) - key, err := trustmanager.GenerateED25519Key(rand.Reader) + key, err := utils.GenerateED25519Key(rand.Reader) require.NoError(t, err) stores := []trustmanager.KeyStore{ @@ -159,7 +160,7 @@ func TestRemoveMultikeysAbortChoice(t *testing.T) { setUp(t) in := bytes.NewBuffer([]byte("1\nn\n")) - key, err := trustmanager.GenerateED25519Key(rand.Reader) + key, err := utils.GenerateED25519Key(rand.Reader) require.NoError(t, err) stores := []trustmanager.KeyStore{ @@ -198,7 +199,7 @@ func TestRemoveMultikeysRemoveOnlyChosenKey(t *testing.T) { setUp(t) in := bytes.NewBuffer([]byte("1\ny\n")) - key, err := trustmanager.GenerateED25519Key(rand.Reader) + key, err := utils.GenerateED25519Key(rand.Reader) require.NoError(t, err) stores := []trustmanager.KeyStore{ @@ -535,11 +536,11 @@ func TestChangeKeyPassphraseNonexistentID(t *testing.T) { func generateTempTestKeyFile(t *testing.T, role string) string { setUp(t) - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) if err != nil { return "" } - keyBytes, err := trustmanager.KeyToPEM(privKey, role) + keyBytes, err := utils.KeyToPEM(privKey, role) require.NoError(t, err) tempPrivFile, err := ioutil.TempFile("/tmp", "privfile") diff --git a/cmd/notary/prettyprint_test.go b/cmd/notary/prettyprint_test.go index 4cde238919..9eec0c64cf 100644 --- a/cmd/notary/prettyprint_test.go +++ b/cmd/notary/prettyprint_test.go @@ -15,6 +15,7 @@ import ( "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) @@ -56,7 +57,7 @@ func TestKeyInfoSorter(t *testing.T) { } type otherMemoryStore struct { - trustmanager.KeyMemoryStore + trustmanager.GenericKeyStore } func (l *otherMemoryStore) Name() string { @@ -85,14 +86,14 @@ func TestPrettyPrintRootAndSigningKeys(t *testing.T) { ret := passphrase.ConstantRetriever("pass") keyStores := []trustmanager.KeyStore{ trustmanager.NewKeyMemoryStore(ret), - &otherMemoryStore{KeyMemoryStore: *trustmanager.NewKeyMemoryStore(ret)}, + &otherMemoryStore{GenericKeyStore: *trustmanager.NewKeyMemoryStore(ret)}, } longNameShortened := "..." + strings.Repeat("z", 37) keys := make([]data.PrivateKey, 4) for i := 0; i < 4; i++ { - key, err := trustmanager.GenerateED25519Key(rand.Reader) + key, err := utils.GenerateED25519Key(rand.Reader) require.NoError(t, err) keys[i] = key } diff --git a/cryptoservice/certificate.go b/cryptoservice/certificate.go index ff6f41b473..805a169af7 100644 --- a/cryptoservice/certificate.go +++ b/cryptoservice/certificate.go @@ -7,8 +7,8 @@ import ( "fmt" "time" - "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" ) // GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval @@ -22,7 +22,7 @@ func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime } func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) { - template, err := trustmanager.NewCertificate(gun, startTime, endTime) + template, err := utils.NewCertificate(gun, startTime, endTime) if err != nil { return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err) } diff --git a/cryptoservice/certificate_test.go b/cryptoservice/certificate_test.go index 00ef2653fd..d7395bd02c 100644 --- a/cryptoservice/certificate_test.go +++ b/cryptoservice/certificate_test.go @@ -8,11 +8,12 @@ import ( "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) func TestGenerateCertificate(t *testing.T) { - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate key") keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) diff --git a/cryptoservice/crypto_service.go b/cryptoservice/crypto_service.go index 7c32ba7201..04e14e1d0b 100644 --- a/cryptoservice/crypto_service.go +++ b/cryptoservice/crypto_service.go @@ -7,6 +7,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" ) const ( @@ -31,17 +32,17 @@ func (cs *CryptoService) Create(role, gun, algorithm string) (data.PublicKey, er switch algorithm { case data.RSAKey: - privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaKeySize) + privKey, err = utils.GenerateRSAKey(rand.Reader, rsaKeySize) if err != nil { return nil, fmt.Errorf("failed to generate RSA key: %v", err) } case data.ECDSAKey: - privKey, err = trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err = utils.GenerateECDSAKey(rand.Reader) if err != nil { return nil, fmt.Errorf("failed to generate EC key: %v", err) } case data.ED25519Key: - privKey, err = trustmanager.GenerateED25519Key(rand.Reader) + privKey, err = utils.GenerateED25519Key(rand.Reader) if err != nil { return nil, fmt.Errorf("failed to generate ED25519 key: %v", err) } diff --git a/cryptoservice/crypto_service_test.go b/cryptoservice/crypto_service_test.go index c0958b0ceb..b8426a5880 100644 --- a/cryptoservice/crypto_service_test.go +++ b/cryptoservice/crypto_service_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/testutils/interfaces" + "github.com/docker/notary/tuf/utils" ) var algoToSigType = map[string]data.SigAlgorithm{ @@ -130,7 +131,7 @@ func (c CryptoServiceTester) TestSignWithKey(t *testing.T) { func (c CryptoServiceTester) TestSignNoMatchingKeys(t *testing.T) { cryptoService := c.cryptoServiceFactory() - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, c.errorMsg("error creating key")) // Test Sign @@ -144,7 +145,7 @@ func (c CryptoServiceTester) TestGetPrivateKeyMultipleKeystores(t *testing.T) { cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, c.errorMsg("error creating key")) for _, store := range cryptoService.keyStores { @@ -234,7 +235,7 @@ func (c CryptoServiceTester) TestRemoveFromMultipleKeystores(t *testing.T) { cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, c.errorMsg("error creating key")) for _, store := range cryptoService.keyStores { @@ -264,7 +265,7 @@ func (c CryptoServiceTester) TestListFromMultipleKeystores(t *testing.T) { expectedKeysIDs := make(map[string]bool) // just want to be able to index by key for i := 0; i < 3; i++ { - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, c.errorMsg("error creating key")) expectedKeysIDs[privKey.ID()] = true @@ -308,7 +309,7 @@ func (c CryptoServiceTester) TestAddKey(t *testing.T) { cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) // Add the key to the targets role diff --git a/server/handlers/default_test.go b/server/handlers/default_test.go index 06824a64d0..29646a737b 100644 --- a/server/handlers/default_test.go +++ b/server/handlers/default_test.go @@ -16,9 +16,9 @@ import ( "github.com/docker/distribution/registry/api/errcode" "github.com/docker/notary/server/errors" "github.com/docker/notary/server/storage" + store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/validation" "github.com/docker/notary/tuf/testutils" diff --git a/server/integration_test.go b/server/integration_test.go index 29eb6f3ef5..051030cded 100644 --- a/server/integration_test.go +++ b/server/integration_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/docker/notary/server/storage" + store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/testutils" "github.com/docker/notary/tuf/validation" "github.com/stretchr/testify/require" @@ -45,7 +45,7 @@ func TestValidationErrorFormat(t *testing.T) { // No snapshot is passed, and the server doesn't have the snapshot key, // so ErrBadHierarchy - err = client.SetMultiMeta(map[string][]byte{ + err = client.SetMulti(map[string][]byte{ data.CanonicalRootRole: rs, data.CanonicalTargetsRole: rt, }) diff --git a/server/server_test.go b/server/server_test.go index 0cb9deb141..ea117f3990 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -16,9 +16,9 @@ import ( _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/notary" "github.com/docker/notary/server/storage" + store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/testutils" tufutils "github.com/docker/notary/tuf/utils" "github.com/docker/notary/utils" @@ -77,12 +77,12 @@ func TestRepoPrefixMatches(t *testing.T) { require.NoError(t, err) // uploading is cool - require.NoError(t, uploader.SetMultiMeta(meta)) + require.NoError(t, uploader.SetMulti(meta)) // getting is cool - _, err = uploader.GetMeta(data.CanonicalSnapshotRole, notary.MaxDownloadSize) + _, err = uploader.GetSized(data.CanonicalSnapshotRole, notary.MaxDownloadSize) require.NoError(t, err) - _, err = uploader.GetMeta( + _, err = uploader.GetSized( tufutils.ConsistentName(data.CanonicalSnapshotRole, snChecksumBytes[:]), notary.MaxDownloadSize) require.NoError(t, err) @@ -117,7 +117,7 @@ func TestRepoPrefixDoesNotMatch(t *testing.T) { uploader, err := store.NewHTTPStore(url, "", "json", "key", http.DefaultTransport) require.NoError(t, err) - require.Error(t, uploader.SetMultiMeta(meta)) + require.Error(t, uploader.SetMulti(meta)) // update the storage so we don't fail just because the metadata is missing for _, roleName := range data.BaseRoles { @@ -128,10 +128,10 @@ func TestRepoPrefixDoesNotMatch(t *testing.T) { })) } - _, err = uploader.GetMeta(data.CanonicalSnapshotRole, notary.MaxDownloadSize) + _, err = uploader.GetSized(data.CanonicalSnapshotRole, notary.MaxDownloadSize) require.Error(t, err) - _, err = uploader.GetMeta( + _, err = uploader.GetSized( tufutils.ConsistentName(data.CanonicalSnapshotRole, snChecksumBytes[:]), notary.MaxDownloadSize) require.Error(t, err) diff --git a/signer/client/signer_trust_test.go b/signer/client/signer_trust_test.go index 45168a0b23..5f0193ff07 100644 --- a/signer/client/signer_trust_test.go +++ b/signer/client/signer_trust_test.go @@ -19,6 +19,7 @@ import ( "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/testutils/interfaces" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -109,7 +110,7 @@ func TestHealthCheckConnectionDied(t *testing.T) { var ret = passphrase.ConstantRetriever("pass") func TestGetPrivateKeyAndSignWithExistingKey(t *testing.T) { - key, err := trustmanager.GenerateECDSAKey(rand.Reader) + key, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate key") store := trustmanager.NewKeyMemoryStore(ret) diff --git a/signer/keydbstore/keydbstore_test.go b/signer/keydbstore/keydbstore_test.go index c5eb3c0992..ac6433fe45 100644 --- a/signer/keydbstore/keydbstore_test.go +++ b/signer/keydbstore/keydbstore_test.go @@ -10,6 +10,7 @@ import ( "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/jinzhu/gorm" _ "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/require" @@ -73,7 +74,7 @@ func TestNewKeyDBStorePropagatesDBError(t *testing.T) { // Creating a key, on succcess, populates the cache. func TestCreateSuccessPopulatesCache(t *testing.T) { - testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + testKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) tmpFilename := initializeDB(t) @@ -92,7 +93,7 @@ func TestCreateSuccessPopulatesCache(t *testing.T) { // Getting a key, on succcess, populates the cache. func TestGetSuccessPopulatesCache(t *testing.T) { - testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + testKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) tmpFilename := initializeDB(t) @@ -112,10 +113,10 @@ func TestGetSuccessPopulatesCache(t *testing.T) { } func TestDoubleCreate(t *testing.T) { - testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + testKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) - anotherTestKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + anotherTestKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) tmpFilename := initializeDB(t) @@ -139,7 +140,7 @@ func TestDoubleCreate(t *testing.T) { } func TestCreateDelete(t *testing.T) { - testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + testKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) tmpFilename := initializeDB(t) @@ -163,7 +164,7 @@ func TestCreateDelete(t *testing.T) { } func TestKeyRotation(t *testing.T) { - testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + testKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) tmpFilename := initializeDB(t) diff --git a/tuf/store/errors.go b/storage/errors.go similarity index 59% rename from tuf/store/errors.go rename to storage/errors.go index a7f63d6bbd..2c7b876497 100644 --- a/tuf/store/errors.go +++ b/storage/errors.go @@ -1,6 +1,15 @@ -package store +package storage -import "fmt" +import ( + "errors" + "fmt" +) + +var ( + // ErrPathOutsideStore indicates that the returned path would be + // outside the store + ErrPathOutsideStore = errors.New("path outside file store") +) // ErrMetaNotFound indicates we did not find a particular piece // of metadata in the store diff --git a/storage/filestore.go b/storage/filestore.go new file mode 100644 index 0000000000..f4fbd8f612 --- /dev/null +++ b/storage/filestore.go @@ -0,0 +1,205 @@ +package storage + +import ( + "fmt" + "github.com/docker/notary" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +// NewFilesystemStore creates a new store in a directory tree +func NewFilesystemStore(baseDir, subDir, extension string) (*FilesystemStore, error) { + baseDir = path.Join(baseDir, subDir) + + return NewFileStore(baseDir, extension, notary.PrivKeyPerms) +} + +// NewFileStore creates a fully configurable file store +func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*FilesystemStore, error) { + baseDir = filepath.Clean(baseDir) + if err := createDirectory(baseDir, perms); err != nil { + return nil, err + } + if !strings.HasPrefix(fileExt, ".") { + fileExt = "." + fileExt + } + + return &FilesystemStore{ + baseDir: baseDir, + ext: fileExt, + perms: perms, + }, nil +} + +// NewSimpleFileStore is a convenience wrapper to create a world readable, +// owner writeable filestore +func NewSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) { + return NewFileStore(baseDir, fileExt, notary.PubCertPerms) +} + +// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable +// _only_ filestore +func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) { + return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms) +} + +// FilesystemStore is a store in a locally accessible directory +type FilesystemStore struct { + baseDir string + ext string + perms os.FileMode +} + +func (f *FilesystemStore) getPath(name string) (string, error) { + fileName := fmt.Sprintf("%s%s", name, f.ext) + fullPath := filepath.Join(f.baseDir, fileName) + + if !strings.HasPrefix(fullPath, f.baseDir) { + return "", ErrPathOutsideStore + } + return fullPath, nil +} + +// GetSized returns the meta for the given name (a role) up to size bytes +// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a +// predefined threshold "notary.MaxDownloadSize". +func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) { + p, err := f.getPath(name) + if err != nil { + return nil, err + } + meta, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = ErrMetaNotFound{Resource: name} + } + return nil, err + } + if size == NoSizeLimit { + size = notary.MaxDownloadSize + } + // Only return up to size bytes + if int64(len(meta)) < size { + return meta, nil + } + return meta[:size], nil +} + +// Get returns the meta for the given name. +func (f *FilesystemStore) Get(name string) ([]byte, error) { + p, err := f.getPath(name) + if err != nil { + return nil, err + } + meta, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = ErrMetaNotFound{Resource: name} + } + return nil, err + } + return meta, nil +} + +// SetMulti sets the metadata for multiple roles in one operation +func (f *FilesystemStore) SetMulti(metas map[string][]byte) error { + for role, blob := range metas { + err := f.Set(role, blob) + if err != nil { + return err + } + } + return nil +} + +// Set sets the meta for a single role +func (f *FilesystemStore) Set(name string, meta []byte) error { + fp, err := f.getPath(name) + if err != nil { + return err + } + + // Ensures the parent directories of the file we are about to write exist + err = os.MkdirAll(filepath.Dir(fp), f.perms) + if err != nil { + return err + } + + // if something already exists, just delete it and re-write it + os.RemoveAll(fp) + + // Write the file to disk + if err = ioutil.WriteFile(fp, meta, f.perms); err != nil { + return err + } + return nil +} + +// RemoveAll clears the existing filestore by removing its base directory +func (f *FilesystemStore) RemoveAll() error { + return os.RemoveAll(f.baseDir) +} + +// Remove removes the metadata for a single role - if the metadata doesn't +// exist, no error is returned +func (f *FilesystemStore) Remove(name string) error { + p, err := f.getPath(name) + if err != nil { + return err + } + return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist +} + +// Location returns a human readable name for the storage location +func (f FilesystemStore) Location() string { + return f.baseDir +} + +// ListFiles returns a list of all the filenames that can be used with Get* +// to retrieve content from this filestore +func (f FilesystemStore) ListFiles() []string { + files := make([]string, 0, 0) + filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error { + // If there are errors, ignore this particular file + if err != nil { + return nil + } + // Ignore if it is a directory + if fi.IsDir() { + return nil + } + + // If this is a symlink, ignore it + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + return nil + } + + // Only allow matches that end with our certificate extension (e.g. *.crt) + matched, _ := filepath.Match("*"+f.ext, fi.Name()) + + if matched { + // Find the relative path for this file relative to the base path. + fp, err = filepath.Rel(f.baseDir, fp) + if err != nil { + return err + } + trimmed := strings.TrimSuffix(fp, f.ext) + files = append(files, trimmed) + } + return nil + }) + return files +} + +// createDirectory receives a string of the path to a directory. +// It does not support passing files, so the caller has to remove +// the filename by doing filepath.Dir(full_path_to_file) +func createDirectory(dir string, perms os.FileMode) error { + // This prevents someone passing /path/to/dir and 'dir' not being created + // If two '//' exist, MkdirAll deals it with correctly + dir = dir + "/" + return os.MkdirAll(dir, perms) +} diff --git a/trustmanager/filestore_test.go b/storage/filestore_test.go similarity index 55% rename from trustmanager/filestore_test.go rename to storage/filestore_test.go index d9ae35c549..845d24cc2e 100644 --- a/trustmanager/filestore_test.go +++ b/storage/filestore_test.go @@ -1,16 +1,148 @@ -package trustmanager +package storage import ( - "crypto/rand" - "fmt" - "github.com/stretchr/testify/require" "io/ioutil" "os" + "path" "path/filepath" - "strconv" "testing" + + "crypto/rand" + "fmt" + "github.com/docker/notary" + "github.com/stretchr/testify/require" + "strconv" ) +const testDir = "/tmp/testFilesystemStore/" + +func TestNewFilesystemStore(t *testing.T) { + _, err := NewFilesystemStore(testDir, "metadata", "json") + require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) + defer os.RemoveAll(testDir) + + info, err := os.Stat(path.Join(testDir, "metadata")) + require.Nil(t, err, "Error attempting to stat metadata dir: %v", err) + require.NotNil(t, info, "Nil FileInfo from stat on metadata dir") + require.True(t, 0700&info.Mode() != 0, "Metadata directory is not writable") +} + +func TestSet(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) + defer os.RemoveAll(testDir) + + testContent := []byte("test data") + + err = s.Set("testMeta", testContent) + require.Nil(t, err, "SetMeta returned unexpected error: %v", err) + + content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "testMeta.json")) + require.Nil(t, err, "Error reading file: %v", err) + require.Equal(t, testContent, content, "Content written to file was corrupted.") +} + +func TestSetWithNoParentDirectory(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) + defer os.RemoveAll(testDir) + + testContent := []byte("test data") + + err = s.Set("noexist/"+"testMeta", testContent) + require.Nil(t, err, "SetMeta returned unexpected error: %v", err) + + content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "noexist/testMeta.json")) + require.Nil(t, err, "Error reading file: %v", err) + require.Equal(t, testContent, content, "Content written to file was corrupted.") +} + +// if something already existed there, remove it first and write a new file +func TestSetRemovesExistingFileBeforeWriting(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) + defer os.RemoveAll(testDir) + + // make a directory where we want metadata to go + os.Mkdir(filepath.Join(testDir, "metadata", "root.json"), 0700) + + testContent := []byte("test data") + err = s.Set("root", testContent) + require.NoError(t, err, "SetMeta returned unexpected error: %v", err) + + content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "root.json")) + require.NoError(t, err, "Error reading file: %v", err) + require.Equal(t, testContent, content, "Content written to file was corrupted.") +} + +func TestGetSized(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) + defer os.RemoveAll(testDir) + + testContent := []byte("test data") + + ioutil.WriteFile(path.Join(testDir, "metadata", "testMeta.json"), testContent, 0600) + + content, err := s.GetSized("testMeta", int64(len(testContent))) + require.Nil(t, err, "GetMeta returned unexpected error: %v", err) + + require.Equal(t, testContent, content, "Content read from file was corrupted.") + + // Check that NoSizeLimit size reads everything + content, err = s.GetSized("testMeta", NoSizeLimit) + require.Nil(t, err, "GetMeta returned unexpected error: %v", err) + + require.Equal(t, testContent, content, "Content read from file was corrupted.") + + // Check that we return only up to size bytes + content, err = s.GetSized("testMeta", 4) + require.Nil(t, err, "GetMeta returned unexpected error: %v", err) + + require.Equal(t, []byte("test"), content, "Content read from file was corrupted.") +} + +func TestGetSizedSet(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.NoError(t, err, "Initializing FilesystemStore returned unexpected error", err) + defer os.RemoveAll(testDir) + + testGetSetMeta(t, func() MetadataStore { return s }) +} + +func TestRemove(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.NoError(t, err, "Initializing FilesystemStore returned unexpected error", err) + defer os.RemoveAll(testDir) + + testRemove(t, func() MetadataStore { return s }) +} + +func TestRemoveAll(t *testing.T) { + s, err := NewFilesystemStore(testDir, "metadata", "json") + require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) + defer os.RemoveAll(testDir) + + testContent := []byte("test data") + + // Write some files in metadata and targets dirs + metaPath := path.Join(testDir, "metadata", "testMeta.json") + ioutil.WriteFile(metaPath, testContent, 0600) + + // Remove all + err = s.RemoveAll() + require.Nil(t, err, "Removing all from FilesystemStore returned unexpected error: %v", err) + + // Test that files no longer exist + _, err = ioutil.ReadFile(metaPath) + require.True(t, os.IsNotExist(err)) + + // Removing the empty filestore returns nil + require.Nil(t, s.RemoveAll()) +} + +// Tests originally from Trustmanager ensuring the FilesystemStore satisfies the +// necessary behaviour func TestAddFile(t *testing.T) { testData := []byte("This test data should be part of the file.") testName := "docker.com/notary/certificate" @@ -26,14 +158,14 @@ func TestAddFile(t *testing.T) { expectedFilePath := filepath.Join(tempBaseDir, testName+testExt) // Create our SimpleFileStore - store := &SimpleFileStore{ + store := &FilesystemStore{ baseDir: tempBaseDir, - fileExt: testExt, + ext: testExt, perms: perms, } - // Call the Add function - err = store.Add(testName, testData) + // Call the Set function + err = store.Set(testName, testData) require.NoError(t, err) // Check to see if file exists @@ -59,9 +191,9 @@ func TestRemoveFile(t *testing.T) { require.NoError(t, err) // Create our SimpleFileStore - store := &SimpleFileStore{ + store := &FilesystemStore{ baseDir: tempBaseDir, - fileExt: testExt, + ext: testExt, perms: perms, } @@ -95,9 +227,9 @@ func TestListFiles(t *testing.T) { } // Create our SimpleFileStore - store := &SimpleFileStore{ + store := &FilesystemStore{ baseDir: tempBaseDir, - fileExt: testExt, + ext: testExt, perms: perms, } @@ -111,19 +243,19 @@ func TestGetPath(t *testing.T) { perms := os.FileMode(0755) // Create our SimpleFileStore - store := &SimpleFileStore{ + store := &FilesystemStore{ baseDir: "", - fileExt: testExt, + ext: testExt, perms: perms, } firstPath := "diogomonica.com/openvpn/0xdeadbeef.crt" secondPath := "/docker.io/testing-dashes/@#$%^&().crt" - result, err := store.GetPath("diogomonica.com/openvpn/0xdeadbeef") + result, err := store.getPath("diogomonica.com/openvpn/0xdeadbeef") require.Equal(t, firstPath, result, "unexpected error from GetPath: %v", err) - result, err = store.GetPath("/docker.io/testing-dashes/@#$%^&()") + result, err = store.getPath("/docker.io/testing-dashes/@#$%^&()") require.Equal(t, secondPath, result, "unexpected error from GetPath: %v", err) } @@ -131,46 +263,46 @@ func TestGetPathProtection(t *testing.T) { testExt := ".crt" perms := os.FileMode(0755) - // Create our SimpleFileStore - store := &SimpleFileStore{ + // Create our FilesystemStore + store := &FilesystemStore{ baseDir: "/path/to/filestore/", - fileExt: testExt, + ext: testExt, perms: perms, } // Should deny requests for paths outside the filestore - _, err := store.GetPath("../../etc/passwd") + _, err := store.getPath("../../etc/passwd") require.Error(t, err) require.Equal(t, ErrPathOutsideStore, err) - _, err = store.GetPath("private/../../../etc/passwd") + _, err = store.getPath("private/../../../etc/passwd") require.Error(t, err) require.Equal(t, ErrPathOutsideStore, err) // Convoluted paths should work as long as they end up inside the store expected := "/path/to/filestore/filename.crt" - result, err := store.GetPath("private/../../filestore/./filename") + result, err := store.getPath("private/../../filestore/./filename") require.NoError(t, err) require.Equal(t, expected, result) // Repeat tests with a relative baseDir - relStore := &SimpleFileStore{ + relStore := &FilesystemStore{ baseDir: "relative/file/path", - fileExt: testExt, + ext: testExt, perms: perms, } // Should deny requests for paths outside the filestore - _, err = relStore.GetPath("../../etc/passwd") + _, err = relStore.getPath("../../etc/passwd") require.Error(t, err) require.Equal(t, ErrPathOutsideStore, err) - _, err = relStore.GetPath("private/../../../etc/passwd") + _, err = relStore.getPath("private/../../../etc/passwd") require.Error(t, err) require.Equal(t, ErrPathOutsideStore, err) // Convoluted paths should work as long as they end up inside the store expected = "relative/file/path/filename.crt" - result, err = relStore.GetPath("private/../../path/./filename") + result, err = relStore.getPath("private/../../path/./filename") require.NoError(t, err) require.Equal(t, expected, result) } @@ -191,10 +323,10 @@ func TestGetData(t *testing.T) { expectedData, err := generateRandomFile(expectedFilePath, perms) require.NoError(t, err) - // Create our SimpleFileStore - store := &SimpleFileStore{ + // Create our FilesystemStore + store := &FilesystemStore{ baseDir: tempBaseDir, - fileExt: testExt, + ext: testExt, perms: perms, } testData, err := store.Get(testName) @@ -213,7 +345,7 @@ func TestCreateDirectory(t *testing.T) { dirPath := filepath.Join(tempBaseDir, testDir) // Call createDirectory - createDirectory(dirPath, visible) + createDirectory(dirPath, notary.PubCertPerms) // Check to see if file exists fi, err := os.Stat(dirPath) @@ -237,7 +369,7 @@ func TestCreatePrivateDirectory(t *testing.T) { dirPath := filepath.Join(tempBaseDir, testDir) // Call createDirectory - createDirectory(dirPath, private) + createDirectory(dirPath, notary.PrivKeyPerms) // Check to see if file exists fi, err := os.Stat(dirPath) @@ -297,10 +429,10 @@ func TestFileStoreConsistency(t *testing.T) { file2Path := "path/file2" file3Path := "long/path/file3" - for _, s := range []Storage{s, s2} { - s.Add(file1Path, file1Data) - s.Add(file2Path, file2Data) - s.Add(file3Path, file3Data) + for _, s := range []*FilesystemStore{s, s2} { + s.Set(file1Path, file1Data) + s.Set(file2Path, file2Data) + s.Set(file3Path, file3Data) paths := map[string][]byte{ file1Path: file1Data, diff --git a/tuf/store/httpstore.go b/storage/httpstore.go similarity index 92% rename from tuf/store/httpstore.go rename to storage/httpstore.go index a925786616..5c4f3870cc 100644 --- a/tuf/store/httpstore.go +++ b/storage/httpstore.go @@ -8,7 +8,7 @@ // If writing your own server, please have a look at // github.com/docker/distribution/registry/api/errcode -package store +package storage import ( "bytes" @@ -136,12 +136,12 @@ func translateStatusToError(resp *http.Response, resource string) error { } } -// GetMeta downloads the named meta file with the given size. A short body +// GetSized downloads the named meta file with the given size. A short body // is acceptable because in the case of timestamp.json, the size is a cap, // not an exact length. // If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a // predefined threshold "notary.MaxDownloadSize". -func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) { +func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) { url, err := s.buildMetaURL(name) if err != nil { return nil, err @@ -174,8 +174,8 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) { return body, nil } -// SetMeta uploads a piece of TUF metadata to the server -func (s HTTPStore) SetMeta(name string, blob []byte) error { +// Set uploads a piece of TUF metadata to the server +func (s HTTPStore) Set(name string, blob []byte) error { url, err := s.buildMetaURL("") if err != nil { return err @@ -192,9 +192,9 @@ func (s HTTPStore) SetMeta(name string, blob []byte) error { return translateStatusToError(resp, "POST "+name) } -// RemoveMeta always fails, because we should never be able to delete metadata +// Remove always fails, because we should never be able to delete metadata // remotely -func (s HTTPStore) RemoveMeta(name string) error { +func (s HTTPStore) Remove(name string) error { return ErrInvalidOperation{msg: "cannot delete metadata"} } @@ -222,10 +222,10 @@ func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request return req, nil } -// SetMultiMeta does a single batch upload of multiple pieces of TUF metadata. +// SetMulti does a single batch upload of multiple pieces of TUF metadata. // This should be preferred for updating a remote server as it enable the server // to remain consistent, either accepting or rejecting the complete update. -func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error { +func (s HTTPStore) SetMulti(metas map[string][]byte) error { url, err := s.buildMetaURL("") if err != nil { return err @@ -295,3 +295,8 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) { } return body, nil } + +// Location returns a human readable name for the storage location +func (s HTTPStore) Location() string { + return s.baseURL.String() +} diff --git a/tuf/store/httpstore_test.go b/storage/httpstore_test.go similarity index 56% rename from tuf/store/httpstore_test.go rename to storage/httpstore_test.go index 705a6a09cc..9000af57e8 100644 --- a/tuf/store/httpstore_test.go +++ b/storage/httpstore_test.go @@ -1,9 +1,7 @@ -package store +package storage import ( "bytes" - "encoding/base64" - "encoding/hex" "fmt" "io" "io/ioutil" @@ -14,7 +12,6 @@ import ( "github.com/docker/go/canonical/json" "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/validation" "github.com/stretchr/testify/require" ) @@ -29,7 +26,7 @@ func (rt *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) return http.DefaultClient.Do(req) } -func TestHTTPStoreGetMeta(t *testing.T) { +func TestHTTPStoreGetSized(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(testRoot)) } @@ -42,40 +39,13 @@ func TestHTTPStoreGetMeta(t *testing.T) { "key", &http.Transport{}, ) - if err != nil { - t.Fatal(err) - } - j, err := store.GetMeta("root", 4801) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + j, err := store.GetSized("root", 4801) + require.NoError(t, err) + require.Equal(t, testRoot, string(j)) p := &data.Signed{} err = json.Unmarshal(j, p) - if err != nil { - t.Fatal(err) - } - rootKey, err := base64.StdEncoding.DecodeString(testRootKey) require.NoError(t, err) - k := data.NewPublicKey("ecdsa-x509", rootKey) - - sigBytes := p.Signatures[0].Signature - if err != nil { - t.Fatal(err) - } - var decoded map[string]interface{} - if err := json.Unmarshal(*p.Signed, &decoded); err != nil { - t.Fatal(err) - } - msg, err := json.MarshalCanonical(decoded) - if err != nil { - t.Fatal(err) - } - method := p.Signatures[0].Method - err = signed.Verifiers[method].Verify(k, sigBytes, msg) - if err != nil { - t.Fatal(err) - } - } // Test that passing -1 to httpstore's GetMeta will return all content @@ -92,39 +62,13 @@ func TestHTTPStoreGetAllMeta(t *testing.T) { "key", &http.Transport{}, ) - if err != nil { - t.Fatal(err) - } - j, err := store.GetMeta("root", NoSizeLimit) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + j, err := store.GetSized("root", NoSizeLimit) + require.NoError(t, err) + require.Equal(t, testRoot, string(j)) p := &data.Signed{} err = json.Unmarshal(j, p) - if err != nil { - t.Fatal(err) - } - rootKey, err := base64.StdEncoding.DecodeString(testRootKey) require.NoError(t, err) - k := data.NewPublicKey("ecdsa-x509", rootKey) - - sigBytes := p.Signatures[0].Signature - if err != nil { - t.Fatal(err) - } - var decoded map[string]interface{} - if err := json.Unmarshal(*p.Signed, &decoded); err != nil { - t.Fatal(err) - } - msg, err := json.MarshalCanonical(decoded) - if err != nil { - t.Fatal(err) - } - method := p.Signatures[0].Method - err = signed.Verifiers[method].Verify(k, sigBytes, msg) - if err != nil { - t.Fatal(err) - } } func TestSetMultiMeta(t *testing.T) { @@ -135,9 +79,7 @@ func TestSetMultiMeta(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) { reader, err := r.MultipartReader() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) updates := make(map[string][]byte) for { part, err := reader.NextPart() @@ -146,9 +88,7 @@ func TestSetMultiMeta(t *testing.T) { } role := strings.TrimSuffix(part.FileName(), ".json") updates[role], err = ioutil.ReadAll(part) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } rd, rok := updates["root"] require.True(t, rok) @@ -162,46 +102,9 @@ func TestSetMultiMeta(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(handler)) defer server.Close() store, err := NewHTTPStore(server.URL, "metadata", "json", "key", http.DefaultTransport) - if err != nil { - t.Fatal(err) - } - - store.SetMultiMeta(metas) -} - -func TestPyCryptoRSAPSSCompat(t *testing.T) { - pubPem := "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAnKuXZeefa2LmgxaL5NsM\nzKOHNe+x/nL6ik+lDBCTV6OdcwAhHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5\nVSCuRJ53UronENl6lsa5mFKP8StYLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDf\nBEPIRp28ev/NViwGOEkBu2UAbwCIdnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK\n6pdzJXlhr9yap3UpgQ/iO9JtoEYB2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq\n3xmN4p+R4VGzfdQN+8Kl/IPjqWB535twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrM\nBI8ztvPiogz+MvXb8WvarZ6TMTh8ifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X\n7sXoaqszEtXdq5ef5zKVxkiyIQZcbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj\n1ANMFPxDQpHudCLxwCzjCb+sVa20HBRPTnzo8LSZkI6jAgMBAAE=\n-----END PUBLIC KEY-----" - //privPem := "-----BEGIN RSA PRIVATE KEY-----\nMIIG4wIBAAKCAYEAnKuXZeefa2LmgxaL5NsMzKOHNe+x/nL6ik+lDBCTV6OdcwAh\nHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5VSCuRJ53UronENl6lsa5mFKP8StY\nLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDfBEPIRp28ev/NViwGOEkBu2UAbwCI\ndnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK6pdzJXlhr9yap3UpgQ/iO9JtoEYB\n2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq3xmN4p+R4VGzfdQN+8Kl/IPjqWB5\n35twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrMBI8ztvPiogz+MvXb8WvarZ6TMTh8\nifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X7sXoaqszEtXdq5ef5zKVxkiyIQZc\nbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj1ANMFPxDQpHudCLxwCzjCb+sVa20\nHBRPTnzo8LSZkI6jAgMBAAECggGAdzyI7z/HLt2IfoAsXDLynNRgVYZluzgawiU3\ngeUjnnGhpSKWERXJC2IWDPBk0YOGgcnQxErNTdfXiFZ/xfRlSgqjVwob2lRe4w4B\npLr+CZXcgznv1VrPUvdolOSp3R2Mahfn7u0qVDUQ/g8jWVI6KW7FACmQhzQkPM8o\ntLGrpcmK+PA465uaHKtYccEB02ILqrK8v++tknv7eIZczrsSKlS1h/HHjSaidYxP\n2DAUiF7wnChrwwQEvuEUHhwVgQcoDMBoow0zwHdbFiFO2ZT54H2oiJWLhpR/x6RK\ngM1seqoPH2sYErPJACMcYsMtF4Tx7b5c4WSj3vDCGb+jeqnNS6nFC3aMnv75mUS2\nYDPU1heJFd8pNHVf0RDejLZZUiJSnXf3vpOxt9Xv2+4He0jeMfLV7zX0mO2Ni3MJ\nx6PiVy4xerHImOuuHzSla5crOq2ECiAxd1wEOFDRD2LRHzfhpk1ghiA5xA1qwc7Z\neRnkVfoy6PPZ4lZakZTm0p8YCQURAoHBAMUIC/7vnayLae7POmgy+np/ty7iMfyd\nV1eO6LTO21KAaGGlhaY26WD/5LcG2FUgc5jKKahprGrmiNLzLUeQPckJmuijSEVM\nl/4DlRvCo867l7fLaVqYzsQBBdeGIFNiT+FBOd8atff87ZBEfH/rXbDi7METD/VR\n4TdblnCsKYAXEJUdkw3IK7SUGERiQZIwKXrH/Map4ibDrljJ71iCgEureU0DBwcg\nwLftmjGMISoLscdRxeubX5uf/yxtHBJeRwKBwQDLjzHhb4gNGdBHUl4hZPAGCq1V\nLX/GpfoOVObW64Lud+tI6N9GNua5/vWduL7MWWOzDTMZysganhKwsJCY5SqAA9p0\nb6ohusf9i1nUnOa2F2j+weuYPXrTYm+ZrESBBdaEJPuj3R5YHVujrBA9Xe0kVOe3\nne151A+0xJOI3tX9CttIaQAsXR7cMDinkDITw6i7X4olRMPCSixHLW97cDsVDRGt\necO1d4dP3OGscN+vKCoL6tDKDotzWHYPwjH47sUCgcEAoVI8WCiipbKkMnaTsNsE\ngKXvO0DSgq3k5HjLCbdQldUzIbgfnH7bSKNcBYtiNxjR7OihgRW8qO5GWsnmafCs\n1dy6a/2835id3cnbHRaZflvUFhVDFn2E1bCsstFLyFn3Y0w/cO9yzC/X5sZcVXRF\nit3R0Selakv3JZckru4XMJwx5JWJYMBjIIAc+miknWg3niL+UT6pPun65xG3mXWI\nS+yC7c4rw+dKQ44UMLs2MDHRBoxqi8T0W/x9NkfDszpjAoHAclH7S4ZdvC3RIR0L\nLGoJuvroGbwx1JiGdOINuooNwGuswge2zTIsJi0gN/H3hcB2E6rIFiYid4BrMrwW\nmSeq1LZVS6siu0qw4p4OVy+/CmjfWKQD8j4k6u6PipiK6IMk1JYIlSCr2AS04JjT\njgNgGVVtxVt2cUM9huIXkXjEaRZdzK7boA60NCkIyGJdHWh3LLQdW4zg/A64C0lj\nIMoJBGuQkAKgfRuh7KI6Q6Qom7BM3OCFXdUJUEBQHc2MTyeZAoHAJdBQGBn1RFZ+\nn75AnbTMZJ6Twp2fVjzWUz/+rnXFlo87ynA18MR2BzaDST4Bvda29UBFGb32Mux9\nOHukqLgIE5jDuqWjy4B5eCoxZf/OvwlgXkX9+gprGR3axn/PZBFPbFB4ZmjbWLzn\nbocn7FJCXf+Cm0cMmv1jIIxej19MUU/duq9iq4RkHY2LG+KrSEQIUVmImCftXdN3\n/qNP5JetY0eH6C+KRc8JqDB0nvbqZNOgYXOfYXo/5Gk8XIHTFihm\n-----END RSA PRIVATE KEY-----" - testStr := "The quick brown fox jumps over the lazy dog." - sigHex := "4e05ee9e435653549ac4eddbc43e1a6868636e8ea6dbec2564435afcb0de47e0824cddbd88776ddb20728c53ecc90b5d543d5c37575fda8bd0317025fc07de62ee8084b1a75203b1a23d1ef4ac285da3d1fc63317d5b2cf1aafa3e522acedd366ccd5fe4a7f02a42922237426ca3dc154c57408638b9bfaf0d0213855d4e9ee621db204151bcb13d4dbb18f930ec601469c992c84b14e9e0b6f91ac9517bb3b749dd117e1cbac2e4acb0e549f44558a2005898a226d5b6c8b9291d7abae0d9e0a16858b89662a085f74a202deb867acab792bdbd2c36731217caea8b17bd210c29b890472f11e5afdd1dd7b69004db070e04201778f2c49f5758643881403d45a58d08f51b5c63910c6185892f0b590f191d760b669eff2464456f130239bba94acf54a0cb98f6939ff84ae26a37f9b890be259d9b5d636f6eb367b53e895227d7d79a3a88afd6d28c198ee80f6527437c5fbf63accb81709925c4e03d1c9eaee86f58e4bd1c669d6af042dbd412de0d13b98b1111e2fadbe34b45de52125e9a" - k := data.NewPublicKey(data.RSAKey, []byte(pubPem)) - - sigBytes, err := hex.DecodeString(sigHex) - if err != nil { - t.Fatal(err) - } - v := signed.RSAPyCryptoVerifier{} - err = v.Verify(k, sigBytes, []byte(testStr)) - if err != nil { - t.Fatal(err) - } -} - -func TestPyNaCled25519Compat(t *testing.T) { - pubHex := "846612b43cef909a0e4ea9c818379bca4723a2020619f95e7a0ccc6f0850b7dc" - //privHex := "bf3cdb9b2a664b0460e6755cb689ffca15b6e294f79f9f1fcf90b52e5b063a76" - testStr := "The quick brown fox jumps over the lazy dog." - sigHex := "166e7013e48f26dccb4e68fe4cf558d1cd3af902f8395534336a7f8b4c56588694aa3ac671767246298a59d5ef4224f02c854f41bfcfe70241db4be1546d6a00" - - pub, _ := hex.DecodeString(pubHex) - k := data.NewPublicKey(data.ED25519Key, pub) - - sigBytes, _ := hex.DecodeString(sigHex) + require.NoError(t, err) - err := signed.Verifiers[data.EDDSASignature].Verify(k, sigBytes, []byte(testStr)) - if err != nil { - t.Fatal(err) - } + store.SetMulti(metas) } func testErrorCode(t *testing.T, errorCode int, errType error) { @@ -220,7 +123,7 @@ func testErrorCode(t *testing.T, errorCode int, errType error) { ) require.NoError(t, err) - _, err = store.GetMeta("root", 4801) + _, err = store.GetSized("root", 4801) require.Error(t, err) require.IsType(t, errType, err, fmt.Sprintf("%d should translate to %v", errorCode, errType)) diff --git a/storage/interface.go b/storage/interface.go deleted file mode 100644 index 2951e248fe..0000000000 --- a/storage/interface.go +++ /dev/null @@ -1,8 +0,0 @@ -package storage - -// Bootstrapper is a thing that can set itself up -type Bootstrapper interface { - // Bootstrap instructs a configured Bootstrapper to perform - // its setup operations. - Bootstrap() error -} diff --git a/tuf/store/interfaces.go b/storage/interfaces.go similarity index 61% rename from tuf/store/interfaces.go rename to storage/interfaces.go index 8ccec34831..f7813247ef 100644 --- a/tuf/store/interfaces.go +++ b/storage/interfaces.go @@ -1,4 +1,4 @@ -package store +package storage // NoSizeLimit is represented as -1 for arguments to GetMeta const NoSizeLimit int64 = -1 @@ -6,11 +6,11 @@ const NoSizeLimit int64 = -1 // MetadataStore must be implemented by anything that intends to interact // with a store of TUF files type MetadataStore interface { - GetMeta(name string, size int64) ([]byte, error) - SetMeta(name string, blob []byte) error - SetMultiMeta(map[string][]byte) error + GetSized(name string, size int64) ([]byte, error) + Set(name string, blob []byte) error + SetMulti(map[string][]byte) error RemoveAll() error - RemoveMeta(name string) error + Remove(name string) error } // PublicKeyStore must be implemented by a key service @@ -18,14 +18,16 @@ type PublicKeyStore interface { GetKey(role string) ([]byte, error) } -// LocalStore represents a local TUF sture -type LocalStore interface { - MetadataStore -} - // RemoteStore is similar to LocalStore with the added expectation that it should // provide a way to download targets once located type RemoteStore interface { MetadataStore PublicKeyStore } + +// Bootstrapper is a thing that can set itself up +type Bootstrapper interface { + // Bootstrap instructs a configured Bootstrapper to perform + // its setup operations. + Bootstrap() error +} diff --git a/tuf/store/memorystore.go b/storage/memorystore.go similarity index 53% rename from tuf/store/memorystore.go rename to storage/memorystore.go index 77c03c7198..8a2ade54da 100644 --- a/tuf/store/memorystore.go +++ b/storage/memorystore.go @@ -1,50 +1,46 @@ -package store +package storage import ( "crypto/sha256" - "fmt" "github.com/docker/notary" - "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" ) // NewMemoryStore returns a MetadataStore that operates entirely in memory. // Very useful for testing -func NewMemoryStore(meta map[string][]byte) *MemoryStore { +func NewMemoryStore(initial map[string][]byte) *MemoryStore { var consistent = make(map[string][]byte) - if meta == nil { - meta = make(map[string][]byte) + if initial == nil { + initial = make(map[string][]byte) } else { // add all seed meta to consistent - for name, data := range meta { + for name, data := range initial { checksum := sha256.Sum256(data) path := utils.ConsistentName(name, checksum[:]) consistent[path] = data } } return &MemoryStore{ - meta: meta, + data: initial, consistent: consistent, - keys: make(map[string][]data.PrivateKey), } } // MemoryStore implements a mock RemoteStore entirely in memory. // For testing purposes only. type MemoryStore struct { - meta map[string][]byte + data map[string][]byte consistent map[string][]byte - keys map[string][]data.PrivateKey } -// GetMeta returns up to size bytes of data references by name. +// GetSized returns up to size bytes of data references by name. // If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a // predefined threshold "notary.MaxDownloadSize", as we will always know the // size for everything but a timestamp and sometimes a root, // neither of which should be exceptionally large -func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) { - d, ok := m.meta[name] +func (m MemoryStore) GetSized(name string, size int64) ([]byte, error) { + d, ok := m.data[name] if ok { if size == NoSizeLimit { size = notary.MaxDownloadSize @@ -64,9 +60,20 @@ func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) { return nil, ErrMetaNotFound{Resource: name} } -// SetMeta sets the metadata value for the given name -func (m *MemoryStore) SetMeta(name string, meta []byte) error { - m.meta[name] = meta +// Get returns the data associated with name +func (m MemoryStore) Get(name string) ([]byte, error) { + if d, ok := m.data[name]; ok { + return d, nil + } + if d, ok := m.consistent[name]; ok { + return d, nil + } + return nil, ErrMetaNotFound{Resource: name} +} + +// Set sets the metadata value for the given name +func (m *MemoryStore) Set(name string, meta []byte) error { + m.data[name] = meta checksum := sha256.Sum256(meta) path := utils.ConsistentName(name, checksum[:]) @@ -74,34 +81,44 @@ func (m *MemoryStore) SetMeta(name string, meta []byte) error { return nil } -// SetMultiMeta sets multiple pieces of metadata for multiple names +// SetMulti sets multiple pieces of metadata for multiple names // in a single operation. -func (m *MemoryStore) SetMultiMeta(metas map[string][]byte) error { +func (m *MemoryStore) SetMulti(metas map[string][]byte) error { for role, blob := range metas { - m.SetMeta(role, blob) + m.Set(role, blob) } return nil } -// RemoveMeta removes the metadata for a single role - if the metadata doesn't +// Remove removes the metadata for a single role - if the metadata doesn't // exist, no error is returned -func (m *MemoryStore) RemoveMeta(name string) error { - if meta, ok := m.meta[name]; ok { +func (m *MemoryStore) Remove(name string) error { + if meta, ok := m.data[name]; ok { checksum := sha256.Sum256(meta) path := utils.ConsistentName(name, checksum[:]) - delete(m.meta, name) + delete(m.data, name) delete(m.consistent, path) } return nil } -// GetKey returns the public key for the given role -func (m *MemoryStore) GetKey(role string) ([]byte, error) { - return nil, fmt.Errorf("GetKey is not implemented for the MemoryStore") -} - // RemoveAll clears the existing memory store by setting this store as new empty one func (m *MemoryStore) RemoveAll() error { *m = *NewMemoryStore(nil) return nil } + +// Location provides a human readable name for the storage location +func (m MemoryStore) Location() string { + return "memory" +} + +// ListFiles returns a list of all files. The names returned should be +// usable with Get directly, with no modification. +func (m *MemoryStore) ListFiles() []string { + names := make([]string, 0, len(m.data)) + for n := range m.data { + names = append(names, n) + } + return names +} diff --git a/tuf/store/memorystore_test.go b/storage/memorystore_test.go similarity index 72% rename from tuf/store/memorystore_test.go rename to storage/memorystore_test.go index 716bb7bc2d..3f5903010e 100644 --- a/tuf/store/memorystore_test.go +++ b/storage/memorystore_test.go @@ -1,4 +1,4 @@ -package store +package storage import ( "crypto/sha256" @@ -12,7 +12,7 @@ func TestMemoryStoreMetadataOperations(t *testing.T) { s := NewMemoryStore(nil) // GetMeta of a non-existent metadata fails - _, err := s.GetMeta("nonexistent", 0) + _, err := s.GetSized("nonexistent", 0) require.Error(t, err) require.IsType(t, ErrMetaNotFound{}, err) @@ -23,19 +23,19 @@ func TestMemoryStoreMetadataOperations(t *testing.T) { shasum := sha256.Sum256(metaContent) invalidShasum := sha256.Sum256([]byte{}) - require.NoError(t, s.SetMeta("exists", metaContent)) - require.NoError(t, s.SetMultiMeta(map[string][]byte{"multi1": metaContent, "multi2": metaContent})) + require.NoError(t, s.Set("exists", metaContent)) + require.NoError(t, s.SetMulti(map[string][]byte{"multi1": metaContent, "multi2": metaContent})) for _, metaName := range []string{"exists", "multi1", "multi2"} { - meta, err := s.GetMeta(metaName, metaSize) + meta, err := s.GetSized(metaName, metaSize) require.NoError(t, err) require.Equal(t, metaContent, meta) - meta, err = s.GetMeta(utils.ConsistentName(metaName, shasum[:]), metaSize) + meta, err = s.GetSized(utils.ConsistentName(metaName, shasum[:]), metaSize) require.NoError(t, err) require.Equal(t, metaContent, meta) - _, err = s.GetMeta(utils.ConsistentName(metaName, invalidShasum[:]), metaSize) + _, err = s.GetSized(utils.ConsistentName(metaName, invalidShasum[:]), metaSize) require.Error(t, err) require.IsType(t, ErrMetaNotFound{}, err) } @@ -44,7 +44,7 @@ func TestMemoryStoreMetadataOperations(t *testing.T) { err = s.RemoveAll() require.NoError(t, err) - _, err = s.GetMeta("exists", 0) + _, err = s.GetSized("exists", 0) require.Error(t, err) require.IsType(t, ErrMetaNotFound{}, err) } @@ -54,22 +54,22 @@ func TestMemoryStoreGetMetaSize(t *testing.T) { s := NewMemoryStore(map[string][]byte{"content": content}) // we can get partial size - meta, err := s.GetMeta("content", 3) + meta, err := s.GetSized("content", 3) require.NoError(t, err) require.Equal(t, []byte("con"), meta) // we can get zero size - meta, err = s.GetMeta("content", 0) + meta, err = s.GetSized("content", 0) require.NoError(t, err) require.Equal(t, []byte{}, meta) // we can get the whole thing by passing NoSizeLimit (-1) - meta, err = s.GetMeta("content", NoSizeLimit) + meta, err = s.GetSized("content", NoSizeLimit) require.NoError(t, err) require.Equal(t, content, meta) // a size much larger than the actual length will return the whole thing - meta, err = s.GetMeta("content", 8000) + meta, err = s.GetSized("content", 8000) require.NoError(t, err) require.Equal(t, content, meta) } diff --git a/tuf/store/offlinestore.go b/storage/offlinestore.go similarity index 52% rename from tuf/store/offlinestore.go rename to storage/offlinestore.go index b0f057b2b8..a9433c1ad3 100644 --- a/tuf/store/offlinestore.go +++ b/storage/offlinestore.go @@ -1,8 +1,4 @@ -package store - -import ( - "io" -) +package storage // ErrOffline is used to indicate we are operating offline type ErrOffline struct{} @@ -17,23 +13,23 @@ var err = ErrOffline{} // returns ErrOffline for every operation type OfflineStore struct{} -// GetMeta returns ErrOffline -func (es OfflineStore) GetMeta(name string, size int64) ([]byte, error) { +// GetSized returns ErrOffline +func (es OfflineStore) GetSized(name string, size int64) ([]byte, error) { return nil, err } -// SetMeta returns ErrOffline -func (es OfflineStore) SetMeta(name string, blob []byte) error { +// Set returns ErrOffline +func (es OfflineStore) Set(name string, blob []byte) error { return err } -// SetMultiMeta returns ErrOffline -func (es OfflineStore) SetMultiMeta(map[string][]byte) error { +// SetMulti returns ErrOffline +func (es OfflineStore) SetMulti(map[string][]byte) error { return err } -// RemoveMeta returns ErrOffline -func (es OfflineStore) RemoveMeta(name string) error { +// Remove returns ErrOffline +func (es OfflineStore) Remove(name string) error { return err } @@ -42,12 +38,12 @@ func (es OfflineStore) GetKey(role string) ([]byte, error) { return nil, err } -// GetTarget returns ErrOffline -func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) { - return nil, err -} - // RemoveAll return ErrOffline func (es OfflineStore) RemoveAll() error { return err } + +// Location returns a human readable name for the storage location +func (es OfflineStore) Location() string { + return "offline" +} diff --git a/tuf/store/offlinestore_test.go b/storage/offlinestore_test.go similarity index 74% rename from tuf/store/offlinestore_test.go rename to storage/offlinestore_test.go index 66de915e78..659211e422 100644 --- a/tuf/store/offlinestore_test.go +++ b/storage/offlinestore_test.go @@ -1,4 +1,4 @@ -package store +package storage import ( "testing" @@ -8,15 +8,15 @@ import ( func TestOfflineStore(t *testing.T) { s := OfflineStore{} - _, err := s.GetMeta("", 0) + _, err := s.GetSized("", 0) require.Error(t, err) require.IsType(t, ErrOffline{}, err) - err = s.SetMeta("", nil) + err = s.Set("", nil) require.Error(t, err) require.IsType(t, ErrOffline{}, err) - err = s.SetMultiMeta(nil) + err = s.SetMulti(nil) require.Error(t, err) require.IsType(t, ErrOffline{}, err) @@ -24,10 +24,6 @@ func TestOfflineStore(t *testing.T) { require.Error(t, err) require.IsType(t, ErrOffline{}, err) - _, err = s.GetTarget("") - require.Error(t, err) - require.IsType(t, ErrOffline{}, err) - err = s.RemoveAll() require.Error(t, err) require.IsType(t, ErrOffline{}, err) diff --git a/tuf/store/store_test.go b/storage/store_test.go similarity index 66% rename from tuf/store/store_test.go rename to storage/store_test.go index 6ce9be12d0..a1dc6b309b 100644 --- a/tuf/store/store_test.go +++ b/storage/store_test.go @@ -1,4 +1,4 @@ -package store +package storage import ( "testing" @@ -11,32 +11,32 @@ type storeFactory func() MetadataStore // Verifies that the metadata store can get and set metadata func testGetSetMeta(t *testing.T, factory storeFactory) { s := factory() - metaBytes, err := s.GetMeta("root", 300) + metaBytes, err := s.GetSized("root", 300) require.Error(t, err) require.Nil(t, metaBytes) require.IsType(t, ErrMetaNotFound{}, err) content := []byte("root bytes") - require.NoError(t, s.SetMeta("root", content)) + require.NoError(t, s.Set("root", content)) - metaBytes, err = s.GetMeta("root", 300) + metaBytes, err = s.GetSized("root", 300) require.NoError(t, err) require.Equal(t, content, metaBytes) } // Verifies that the metadata store can delete metadata -func testRemoveMeta(t *testing.T, factory storeFactory) { +func testRemove(t *testing.T, factory storeFactory) { s := factory() - require.NoError(t, s.SetMeta("root", []byte("test data"))) + require.NoError(t, s.Set("root", []byte("test data"))) - require.NoError(t, s.RemoveMeta("root")) - _, err := s.GetMeta("root", 300) + require.NoError(t, s.Remove("root")) + _, err := s.GetSized("root", 300) require.Error(t, err) require.IsType(t, ErrMetaNotFound{}, err) // delete metadata should be successful even if the metadata doesn't exist - require.NoError(t, s.RemoveMeta("root")) + require.NoError(t, s.Remove("root")) } func TestMemoryStoreMetadata(t *testing.T) { @@ -45,5 +45,5 @@ func TestMemoryStoreMetadata(t *testing.T) { } testGetSetMeta(t, factory) - testRemoveMeta(t, factory) + testRemove(t, factory) } diff --git a/trustmanager/filestore.go b/trustmanager/filestore.go deleted file mode 100644 index 7927413a13..0000000000 --- a/trustmanager/filestore.go +++ /dev/null @@ -1,150 +0,0 @@ -package trustmanager - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -// SimpleFileStore implements FileStore -type SimpleFileStore struct { - baseDir string - fileExt string - perms os.FileMode -} - -// NewFileStore creates a fully configurable file store -func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*SimpleFileStore, error) { - baseDir = filepath.Clean(baseDir) - if err := createDirectory(baseDir, perms); err != nil { - return nil, err - } - if !strings.HasPrefix(fileExt, ".") { - fileExt = "." + fileExt - } - - return &SimpleFileStore{ - baseDir: baseDir, - fileExt: fileExt, - perms: perms, - }, nil -} - -// NewSimpleFileStore is a convenience wrapper to create a world readable, -// owner writeable filestore -func NewSimpleFileStore(baseDir, fileExt string) (*SimpleFileStore, error) { - return NewFileStore(baseDir, fileExt, visible) -} - -// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable -// _only_ filestore -func NewPrivateSimpleFileStore(baseDir, fileExt string) (*SimpleFileStore, error) { - return NewFileStore(baseDir, fileExt, private) -} - -// Add writes data to a file with a given name -func (f *SimpleFileStore) Add(name string, data []byte) error { - filePath, err := f.GetPath(name) - if err != nil { - return err - } - createDirectory(filepath.Dir(filePath), f.perms) - return ioutil.WriteFile(filePath, data, f.perms) -} - -// Remove removes a file identified by name -func (f *SimpleFileStore) Remove(name string) error { - // Attempt to remove - filePath, err := f.GetPath(name) - if err != nil { - return err - } - return os.Remove(filePath) -} - -// Get returns the data given a file name -func (f *SimpleFileStore) Get(name string) ([]byte, error) { - filePath, err := f.GetPath(name) - if err != nil { - return nil, err - } - data, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - - return data, nil -} - -// GetPath returns the full final path of a file with a given name -func (f *SimpleFileStore) GetPath(name string) (string, error) { - fileName := f.genFileName(name) - fullPath := filepath.Clean(filepath.Join(f.baseDir, fileName)) - - if !strings.HasPrefix(fullPath, f.baseDir) { - return "", ErrPathOutsideStore - } - return fullPath, nil -} - -// ListFiles lists all the files inside of a store -func (f *SimpleFileStore) ListFiles() []string { - return f.list(f.baseDir) -} - -// list lists all the files in a directory given a full path. Ignores symlinks. -func (f *SimpleFileStore) list(path string) []string { - files := make([]string, 0, 0) - filepath.Walk(path, func(fp string, fi os.FileInfo, err error) error { - // If there are errors, ignore this particular file - if err != nil { - return nil - } - // Ignore if it is a directory - if fi.IsDir() { - return nil - } - - // If this is a symlink, ignore it - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - return nil - } - - // Only allow matches that end with our certificate extension (e.g. *.crt) - matched, _ := filepath.Match("*"+f.fileExt, fi.Name()) - - if matched { - // Find the relative path for this file relative to the base path. - fp, err = filepath.Rel(path, fp) - if err != nil { - return err - } - trimmed := strings.TrimSuffix(fp, f.fileExt) - files = append(files, trimmed) - } - return nil - }) - return files -} - -// genFileName returns the name using the right extension -func (f *SimpleFileStore) genFileName(name string) string { - return fmt.Sprintf("%s%s", name, f.fileExt) -} - -// BaseDir returns the base directory of the filestore -func (f *SimpleFileStore) BaseDir() string { - return f.baseDir -} - -// createDirectory receives a string of the path to a directory. -// It does not support passing files, so the caller has to remove -// the filename by doing filepath.Dir(full_path_to_file) -func createDirectory(dir string, perms os.FileMode) error { - // This prevents someone passing /path/to/dir and 'dir' not being created - // If two '//' exist, MkdirAll deals it with correctly - dir = dir + "/" - return os.MkdirAll(dir, perms) -} diff --git a/trustmanager/interfaces.go b/trustmanager/interfaces.go new file mode 100644 index 0000000000..2611d436a5 --- /dev/null +++ b/trustmanager/interfaces.go @@ -0,0 +1,86 @@ +package trustmanager + +import ( + "fmt" + + "github.com/docker/notary/tuf/data" +) + +// Storage implements the bare bones primitives (no hierarchy) +type Storage interface { + // Add writes a file to the specified location, returning an error if this + // is not possible (reasons may include permissions errors). The path is cleaned + // before being made absolute against the store's base dir. + Set(fileName string, data []byte) error + + // Remove deletes a file from the store relative to the store's base directory. + // The path is cleaned before being made absolute to ensure no path traversal + // outside the base directory is possible. + Remove(fileName string) error + + // Get returns the file content found at fileName relative to the base directory + // of the file store. The path is cleaned before being made absolute to ensure + // path traversal outside the store is not possible. If the file is not found + // an error to that effect is returned. + Get(fileName string) ([]byte, error) + + // ListFiles returns a list of paths relative to the base directory of the + // filestore. Any of these paths must be retrievable via the + // Storage.Get method. + ListFiles() []string + + // Location returns a human readable name indicating where the implementer + // is storing keys + Location() string +} + +// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key +type ErrAttemptsExceeded struct{} + +// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key +func (err ErrAttemptsExceeded) Error() string { + return "maximum number of passphrase attempts exceeded" +} + +// ErrPasswordInvalid is returned when signing fails. It could also mean the signing +// key file was corrupted, but we have no way to distinguish. +type ErrPasswordInvalid struct{} + +// ErrPasswordInvalid is returned when signing fails. It could also mean the signing +// key file was corrupted, but we have no way to distinguish. +func (err ErrPasswordInvalid) Error() string { + return "password invalid, operation has failed." +} + +// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. +type ErrKeyNotFound struct { + KeyID string +} + +// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. +func (err ErrKeyNotFound) Error() string { + return fmt.Sprintf("signing key not found: %s", err.KeyID) +} + +const ( + keyExtension = "key" +) + +// KeyStore is a generic interface for private key storage +type KeyStore interface { + // AddKey adds a key to the KeyStore, and if the key already exists, + // succeeds. Otherwise, returns an error if it cannot add. + AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error + // Should fail with ErrKeyNotFound if the keystore is operating normally + // and knows that it does not store the requested key. + GetKey(keyID string) (data.PrivateKey, string, error) + GetKeyInfo(keyID string) (KeyInfo, error) + ListKeys() map[string]KeyInfo + RemoveKey(keyID string) error + Name() string +} + +type cachedKey struct { + alias string + key data.PrivateKey +} diff --git a/trustmanager/keyfilestore.go b/trustmanager/keyfilestore.go deleted file mode 100644 index c82af3e861..0000000000 --- a/trustmanager/keyfilestore.go +++ /dev/null @@ -1,476 +0,0 @@ -package trustmanager - -import ( - "encoding/pem" - "fmt" - "path/filepath" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/tuf/data" -) - -type keyInfoMap map[string]KeyInfo - -// KeyFileStore persists and manages private keys on disk -type KeyFileStore struct { - sync.Mutex - SimpleFileStore - notary.PassRetriever - cachedKeys map[string]*cachedKey - keyInfoMap -} - -// KeyMemoryStore manages private keys in memory -type KeyMemoryStore struct { - sync.Mutex - MemoryFileStore - notary.PassRetriever - cachedKeys map[string]*cachedKey - keyInfoMap -} - -// KeyInfo stores the role, path, and gun for a corresponding private key ID -// It is assumed that each private key ID is unique -type KeyInfo struct { - Gun string - Role string -} - -// NewKeyFileStore returns a new KeyFileStore creating a private directory to -// hold the keys. -func NewKeyFileStore(baseDir string, passphraseRetriever notary.PassRetriever) (*KeyFileStore, error) { - baseDir = filepath.Join(baseDir, notary.PrivDir) - fileStore, err := NewPrivateSimpleFileStore(baseDir, keyExtension) - if err != nil { - return nil, err - } - cachedKeys := make(map[string]*cachedKey) - keyInfoMap := make(keyInfoMap) - - keyStore := &KeyFileStore{SimpleFileStore: *fileStore, - PassRetriever: passphraseRetriever, - cachedKeys: cachedKeys, - keyInfoMap: keyInfoMap, - } - - // Load this keystore's ID --> gun/role map - keyStore.loadKeyInfo() - return keyStore, nil -} - -func generateKeyInfoMap(s Storage) map[string]KeyInfo { - keyInfoMap := make(map[string]KeyInfo) - for _, keyPath := range s.ListFiles() { - d, err := s.Get(keyPath) - if err != nil { - logrus.Error(err) - continue - } - keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath) - if err != nil { - logrus.Error(err) - continue - } - keyInfoMap[keyID] = keyInfo - } - return keyInfoMap -} - -// Attempts to infer the keyID, role, and GUN from the specified key path. -// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key -func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) { - var keyID, role, gun string - keyID = filepath.Base(keyPath) - underscoreIndex := strings.LastIndex(keyID, "_") - - // This is the legacy KEYID_ROLE filename - // The keyID is the first part of the keyname - // The keyRole is the second part of the keyname - // in a key named abcde_root, abcde is the keyID and root is the KeyAlias - if underscoreIndex != -1 { - role = keyID[underscoreIndex+1:] - keyID = keyID[:underscoreIndex] - } - - if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") { - return keyID, data.CanonicalRootRole, "" - } - - keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/") - gun = getGunFromFullID(keyPath) - return keyID, role, gun -} - -func getGunFromFullID(fullKeyID string) string { - keyGun := filepath.Dir(fullKeyID) - // If the gun is empty, Dir will return . - if keyGun == "." { - keyGun = "" - } - return keyGun -} - -func (s *KeyFileStore) loadKeyInfo() { - s.keyInfoMap = generateKeyInfoMap(s) -} - -func (s *KeyMemoryStore) loadKeyInfo() { - s.keyInfoMap = generateKeyInfoMap(s) -} - -// GetKeyInfo returns the corresponding gun and role key info for a keyID -func (s *KeyFileStore) GetKeyInfo(keyID string) (KeyInfo, error) { - if info, ok := s.keyInfoMap[keyID]; ok { - return info, nil - } - return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID) -} - -// GetKeyInfo returns the corresponding gun and role key info for a keyID -func (s *KeyMemoryStore) GetKeyInfo(keyID string) (KeyInfo, error) { - if info, ok := s.keyInfoMap[keyID]; ok { - return info, nil - } - return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID) -} - -// Name returns a user friendly name for the location this store -// keeps its data -func (s *KeyFileStore) Name() string { - return fmt.Sprintf("file (%s)", s.SimpleFileStore.BaseDir()) -} - -// AddKey stores the contents of a PEM-encoded private key as a PEM block -func (s *KeyFileStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error { - s.Lock() - defer s.Unlock() - if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) { - keyInfo.Gun = "" - } - err := addKey(s, s.PassRetriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey) - if err != nil { - return err - } - s.keyInfoMap[privKey.ID()] = keyInfo - return nil -} - -// GetKey returns the PrivateKey given a KeyID -func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) { - s.Lock() - defer s.Unlock() - // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds - if keyInfo, ok := s.keyInfoMap[name]; ok { - name = filepath.Join(keyInfo.Gun, name) - } - return getKey(s, s.PassRetriever, s.cachedKeys, name) -} - -// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap -func (s *KeyFileStore) ListKeys() map[string]KeyInfo { - return copyKeyInfoMap(s.keyInfoMap) -} - -// RemoveKey removes the key from the keyfilestore -func (s *KeyFileStore) RemoveKey(keyID string) error { - s.Lock() - defer s.Unlock() - // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds - if keyInfo, ok := s.keyInfoMap[keyID]; ok { - keyID = filepath.Join(keyInfo.Gun, keyID) - } - err := removeKey(s, s.cachedKeys, keyID) - if err != nil { - return err - } - // Remove this key from our keyInfo map if we removed from our filesystem - delete(s.keyInfoMap, filepath.Base(keyID)) - return nil -} - -// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory -func NewKeyMemoryStore(passphraseRetriever notary.PassRetriever) *KeyMemoryStore { - memStore := NewMemoryFileStore() - cachedKeys := make(map[string]*cachedKey) - - keyInfoMap := make(keyInfoMap) - - keyStore := &KeyMemoryStore{ - MemoryFileStore: *memStore, - PassRetriever: passphraseRetriever, - cachedKeys: cachedKeys, - keyInfoMap: keyInfoMap, - } - - // Load this keystore's ID --> gun/role map - keyStore.loadKeyInfo() - return keyStore -} - -// Name returns a user friendly name for the location this store -// keeps its data -func (s *KeyMemoryStore) Name() string { - return "memory" -} - -// AddKey stores the contents of a PEM-encoded private key as a PEM block -func (s *KeyMemoryStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error { - s.Lock() - defer s.Unlock() - if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) { - keyInfo.Gun = "" - } - err := addKey(s, s.PassRetriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey) - if err != nil { - return err - } - s.keyInfoMap[privKey.ID()] = keyInfo - return nil -} - -// GetKey returns the PrivateKey given a KeyID -func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) { - s.Lock() - defer s.Unlock() - // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds - if keyInfo, ok := s.keyInfoMap[name]; ok { - name = filepath.Join(keyInfo.Gun, name) - } - return getKey(s, s.PassRetriever, s.cachedKeys, name) -} - -// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap -func (s *KeyMemoryStore) ListKeys() map[string]KeyInfo { - return copyKeyInfoMap(s.keyInfoMap) -} - -// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap -func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo { - copyMap := make(map[string]KeyInfo) - for keyID, keyInfo := range keyInfoMap { - copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun} - } - return copyMap -} - -// RemoveKey removes the key from the keystore -func (s *KeyMemoryStore) RemoveKey(keyID string) error { - s.Lock() - defer s.Unlock() - // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds - if keyInfo, ok := s.keyInfoMap[keyID]; ok { - keyID = filepath.Join(keyInfo.Gun, keyID) - } - err := removeKey(s, s.cachedKeys, keyID) - if err != nil { - return err - } - // Remove this key from our keyInfo map if we removed from our filesystem - delete(s.keyInfoMap, filepath.Base(keyID)) - return nil -} - -// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key -func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) { - keyID, role, gun := inferKeyInfoFromKeyPath(filename) - if role == "" { - block, _ := pem.Decode(pemBytes) - if block == nil { - return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename) - } - if keyRole, ok := block.Headers["role"]; ok { - role = keyRole - } - } - return keyID, KeyInfo{Gun: gun, Role: role}, nil -} - -func addKey(s Storage, passphraseRetriever notary.PassRetriever, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error { - - var ( - chosenPassphrase string - giveup bool - err error - ) - - for attempts := 0; ; attempts++ { - chosenPassphrase, giveup, err = passphraseRetriever(name, role, true, attempts) - if err != nil { - continue - } - if giveup { - return ErrAttemptsExceeded{} - } - if attempts > 10 { - return ErrAttemptsExceeded{} - } - break - } - - return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, role, privKey) -} - -// getKeyRole finds the role for the given keyID. It attempts to look -// both in the newer format PEM headers, and also in the legacy filename -// format. It returns: the role, whether it was found in the legacy format -// (true == legacy), and an error -func getKeyRole(s Storage, keyID string) (string, bool, error) { - name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) - - for _, file := range s.ListFiles() { - filename := filepath.Base(file) - - if strings.HasPrefix(filename, name) { - d, err := s.Get(file) - if err != nil { - return "", false, err - } - block, _ := pem.Decode(d) - if block != nil { - if role, ok := block.Headers["role"]; ok { - return role, false, nil - } - } - - role := strings.TrimPrefix(filename, name+"_") - return role, true, nil - } - } - - return "", false, ErrKeyNotFound{KeyID: keyID} -} - -// GetKey returns the PrivateKey given a KeyID -func getKey(s Storage, passphraseRetriever notary.PassRetriever, cachedKeys map[string]*cachedKey, name string) (data.PrivateKey, string, error) { - cachedKeyEntry, ok := cachedKeys[name] - if ok { - return cachedKeyEntry.key, cachedKeyEntry.alias, nil - } - - keyBytes, keyAlias, err := getRawKey(s, name) - if err != nil { - return nil, "", err - } - - // See if the key is encrypted. If its encrypted we'll fail to parse the private key - privKey, err := ParsePEMPrivateKey(keyBytes, "") - if err != nil { - privKey, _, err = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias)) - if err != nil { - return nil, "", err - } - } - cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey} - return privKey, keyAlias, nil -} - -// RemoveKey removes the key from the keyfilestore -func removeKey(s Storage, cachedKeys map[string]*cachedKey, name string) error { - role, legacy, err := getKeyRole(s, name) - if err != nil { - return err - } - - delete(cachedKeys, name) - - if legacy { - name = name + "_" + role - } - - // being in a subdirectory is for backwards compatibliity - err = s.Remove(filepath.Join(getSubdir(role), name)) - if err != nil { - return err - } - return nil -} - -// Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys -func getSubdir(alias string) string { - if alias == data.CanonicalRootRole { - return notary.RootKeysSubdir - } - return notary.NonRootKeysSubdir -} - -// Given a key ID, gets the bytes and alias belonging to that key if the key -// exists -func getRawKey(s Storage, name string) ([]byte, string, error) { - role, legacy, err := getKeyRole(s, name) - if err != nil { - return nil, "", err - } - - if legacy { - name = name + "_" + role - } - - var keyBytes []byte - keyBytes, err = s.Get(filepath.Join(getSubdir(role), name)) - if err != nil { - return nil, "", err - } - return keyBytes, role, nil -} - -// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes. -// Returns the password and private key -func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) { - var ( - passwd string - retErr error - privKey data.PrivateKey - ) - for attempts := 0; ; attempts++ { - var ( - giveup bool - err error - ) - passwd, giveup, err = passphraseRetriever(name, alias, false, attempts) - // Check if the passphrase retriever got an error or if it is telling us to give up - if giveup || err != nil { - return nil, "", ErrPasswordInvalid{} - } - if attempts > 10 { - return nil, "", ErrAttemptsExceeded{} - } - - // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase - privKey, err = ParsePEMPrivateKey(pemBytes, passwd) - if err != nil { - retErr = ErrPasswordInvalid{} - } else { - // We managed to parse the PrivateKey. We've succeeded! - retErr = nil - break - } - } - if retErr != nil { - return nil, "", retErr - } - return privKey, passwd, nil -} - -func encryptAndAddKey(s Storage, passwd string, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error { - - var ( - pemPrivKey []byte - err error - ) - - if passwd != "" { - pemPrivKey, err = EncryptPrivateKey(privKey, role, passwd) - } else { - pemPrivKey, err = KeyToPEM(privKey, role) - } - - if err != nil { - return err - } - - cachedKeys[name] = &cachedKey{alias: role, key: privKey} - return s.Add(filepath.Join(getSubdir(role), name), pemPrivKey) -} diff --git a/trustmanager/keystore.go b/trustmanager/keystore.go index e620aa4537..c57d28f44c 100644 --- a/trustmanager/keystore.go +++ b/trustmanager/keystore.go @@ -1,58 +1,355 @@ package trustmanager import ( + "encoding/pem" "fmt" + "path/filepath" + "strings" + "sync" + "github.com/Sirupsen/logrus" + "github.com/docker/notary" + store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" ) -// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key -type ErrAttemptsExceeded struct{} +type keyInfoMap map[string]KeyInfo -// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key -func (err ErrAttemptsExceeded) Error() string { - return "maximum number of passphrase attempts exceeded" +// KeyInfo stores the role, path, and gun for a corresponding private key ID +// It is assumed that each private key ID is unique +type KeyInfo struct { + Gun string + Role string } -// ErrPasswordInvalid is returned when signing fails. It could also mean the signing -// key file was corrupted, but we have no way to distinguish. -type ErrPasswordInvalid struct{} +// GenericKeyStore is a wrapper for Storage instances that provides +// translation between the []byte form and Public/PrivateKey objects +type GenericKeyStore struct { + store Storage + sync.Mutex + notary.PassRetriever + cachedKeys map[string]*cachedKey + keyInfoMap +} -// ErrPasswordInvalid is returned when signing fails. It could also mean the signing -// key file was corrupted, but we have no way to distinguish. -func (err ErrPasswordInvalid) Error() string { - return "password invalid, operation has failed." +// NewKeyFileStore returns a new KeyFileStore creating a private directory to +// hold the keys. +func NewKeyFileStore(baseDir string, p notary.PassRetriever) (*GenericKeyStore, error) { + baseDir = filepath.Join(baseDir, notary.PrivDir) + fileStore, err := store.NewPrivateSimpleFileStore(baseDir, keyExtension) + if err != nil { + return nil, err + } + return NewGenericKeyStore(fileStore, p), nil } -// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. -type ErrKeyNotFound struct { - KeyID string +// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory +func NewKeyMemoryStore(p notary.PassRetriever) *GenericKeyStore { + memStore := store.NewMemoryStore(nil) + return NewGenericKeyStore(memStore, p) } -// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. -func (err ErrKeyNotFound) Error() string { - return fmt.Sprintf("signing key not found: %s", err.KeyID) +// NewGenericKeyStore creates a GenericKeyStore wrapping the provided +// Storage instance, using the PassRetriever to enc/decrypt keys +func NewGenericKeyStore(s Storage, p notary.PassRetriever) *GenericKeyStore { + ks := GenericKeyStore{ + store: s, + PassRetriever: p, + cachedKeys: make(map[string]*cachedKey), + keyInfoMap: make(keyInfoMap), + } + ks.loadKeyInfo() + return &ks } -const ( - keyExtension = "key" -) +func generateKeyInfoMap(s Storage) map[string]KeyInfo { + keyInfoMap := make(map[string]KeyInfo) + for _, keyPath := range s.ListFiles() { + d, err := s.Get(keyPath) + if err != nil { + logrus.Error(err) + continue + } + keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath) + if err != nil { + logrus.Error(err) + continue + } + keyInfoMap[keyID] = keyInfo + } + return keyInfoMap +} + +// Attempts to infer the keyID, role, and GUN from the specified key path. +// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key +func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) { + var keyID, role, gun string + keyID = filepath.Base(keyPath) + underscoreIndex := strings.LastIndex(keyID, "_") + + // This is the legacy KEYID_ROLE filename + // The keyID is the first part of the keyname + // The keyRole is the second part of the keyname + // in a key named abcde_root, abcde is the keyID and root is the KeyAlias + if underscoreIndex != -1 { + role = keyID[underscoreIndex+1:] + keyID = keyID[:underscoreIndex] + } + + if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") { + return keyID, data.CanonicalRootRole, "" + } + + keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/") + gun = getGunFromFullID(keyPath) + return keyID, role, gun +} + +func getGunFromFullID(fullKeyID string) string { + keyGun := filepath.Dir(fullKeyID) + // If the gun is empty, Dir will return . + if keyGun == "." { + keyGun = "" + } + return keyGun +} + +func (s *GenericKeyStore) loadKeyInfo() { + s.keyInfoMap = generateKeyInfoMap(s.store) +} + +// GetKeyInfo returns the corresponding gun and role key info for a keyID +func (s *GenericKeyStore) GetKeyInfo(keyID string) (KeyInfo, error) { + if info, ok := s.keyInfoMap[keyID]; ok { + return info, nil + } + return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID) +} + +// AddKey stores the contents of a PEM-encoded private key as a PEM block +func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error { + var ( + chosenPassphrase string + giveup bool + err error + pemPrivKey []byte + ) + s.Lock() + defer s.Unlock() + if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) { + keyInfo.Gun = "" + } + name := filepath.Join(keyInfo.Gun, privKey.ID()) + for attempts := 0; ; attempts++ { + chosenPassphrase, giveup, err = s.PassRetriever(name, keyInfo.Role, true, attempts) + if err != nil { + continue + } + if giveup || attempts > 10 { + return ErrAttemptsExceeded{} + } + break + } + + if chosenPassphrase != "" { + pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, chosenPassphrase) + } else { + pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role) + } + + if err != nil { + return err + } + + s.cachedKeys[name] = &cachedKey{alias: keyInfo.Role, key: privKey} + err = s.store.Set(filepath.Join(getSubdir(keyInfo.Role), name), pemPrivKey) + if err != nil { + return err + } + s.keyInfoMap[privKey.ID()] = keyInfo + return nil +} + +// GetKey returns the PrivateKey given a KeyID +func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) { + s.Lock() + defer s.Unlock() + // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds + if keyInfo, ok := s.keyInfoMap[name]; ok { + name = filepath.Join(keyInfo.Gun, name) + } + + cachedKeyEntry, ok := s.cachedKeys[name] + if ok { + return cachedKeyEntry.key, cachedKeyEntry.alias, nil + } + + keyAlias, legacy, err := getKeyRole(s.store, name) + if err != nil { + return nil, "", err + } + + if legacy { + name = name + "_" + keyAlias + } + + keyBytes, err := s.store.Get(filepath.Join(getSubdir(keyAlias), name)) + if err != nil { + return nil, "", err + } + + // See if the key is encrypted. If its encrypted we'll fail to parse the private key + privKey, err := utils.ParsePEMPrivateKey(keyBytes, "") + if err != nil { + privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, name, string(keyAlias)) + if err != nil { + return nil, "", err + } + } + s.cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey} + return privKey, keyAlias, nil +} + +// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap +func (s *GenericKeyStore) ListKeys() map[string]KeyInfo { + return copyKeyInfoMap(s.keyInfoMap) +} + +// RemoveKey removes the key from the keyfilestore +func (s *GenericKeyStore) RemoveKey(keyID string) error { + s.Lock() + defer s.Unlock() + // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds + if keyInfo, ok := s.keyInfoMap[keyID]; ok { + keyID = filepath.Join(keyInfo.Gun, keyID) + } + + role, legacy, err := getKeyRole(s.store, keyID) + if err != nil { + return err + } + + delete(s.cachedKeys, keyID) + + name := keyID + if legacy { + name = keyID + "_" + role + } + + // being in a subdirectory is for backwards compatibliity + err = s.store.Remove(filepath.Join(getSubdir(role), name)) + if err != nil { + return err + } + + // Remove this key from our keyInfo map if we removed from our filesystem + delete(s.keyInfoMap, filepath.Base(keyID)) + return nil +} + +// Name returns a user friendly name for the location this store +// keeps its data +func (s *GenericKeyStore) Name() string { + return s.store.Location() +} + +// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap +func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo { + copyMap := make(map[string]KeyInfo) + for keyID, keyInfo := range keyInfoMap { + copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun} + } + return copyMap +} + +// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key +func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) { + keyID, role, gun := inferKeyInfoFromKeyPath(filename) + if role == "" { + block, _ := pem.Decode(pemBytes) + if block == nil { + return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename) + } + if keyRole, ok := block.Headers["role"]; ok { + role = keyRole + } + } + return keyID, KeyInfo{Gun: gun, Role: role}, nil +} + +// getKeyRole finds the role for the given keyID. It attempts to look +// both in the newer format PEM headers, and also in the legacy filename +// format. It returns: the role, whether it was found in the legacy format +// (true == legacy), and an error +func getKeyRole(s Storage, keyID string) (string, bool, error) { + name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) + + for _, file := range s.ListFiles() { + filename := filepath.Base(file) + + if strings.HasPrefix(filename, name) { + d, err := s.Get(file) + if err != nil { + return "", false, err + } + block, _ := pem.Decode(d) + if block != nil { + if role, ok := block.Headers["role"]; ok { + return role, false, nil + } + } + + role := strings.TrimPrefix(filename, name+"_") + return role, true, nil + } + } + + return "", false, ErrKeyNotFound{KeyID: keyID} +} + +// Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys +func getSubdir(alias string) string { + if alias == data.CanonicalRootRole { + return notary.RootKeysSubdir + } + return notary.NonRootKeysSubdir +} + +// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes. +// Returns the password and private key +func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) { + var ( + passwd string + retErr error + privKey data.PrivateKey + ) + for attempts := 0; ; attempts++ { + var ( + giveup bool + err error + ) + passwd, giveup, err = passphraseRetriever(name, alias, false, attempts) + // Check if the passphrase retriever got an error or if it is telling us to give up + if giveup || err != nil { + return nil, "", ErrPasswordInvalid{} + } + if attempts > 10 { + return nil, "", ErrAttemptsExceeded{} + } -// KeyStore is a generic interface for private key storage -type KeyStore interface { - // AddKey adds a key to the KeyStore, and if the key already exists, - // succeeds. Otherwise, returns an error if it cannot add. - AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error - // Should fail with ErrKeyNotFound if the keystore is operating normally - // and knows that it does not store the requested key. - GetKey(keyID string) (data.PrivateKey, string, error) - GetKeyInfo(keyID string) (KeyInfo, error) - ListKeys() map[string]KeyInfo - RemoveKey(keyID string) error - Name() string -} - -type cachedKey struct { - alias string - key data.PrivateKey + // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase + privKey, err = utils.ParsePEMPrivateKey(pemBytes, passwd) + if err != nil { + retErr = ErrPasswordInvalid{} + } else { + // We managed to parse the PrivateKey. We've succeeded! + retErr = nil + break + } + } + if retErr != nil { + return nil, "", retErr + } + return privKey, passwd, nil } diff --git a/trustmanager/keyfilestore_test.go b/trustmanager/keystore_test.go similarity index 97% rename from trustmanager/keyfilestore_test.go rename to trustmanager/keystore_test.go index 82e4f6412b..12ecb2297b 100644 --- a/trustmanager/keyfilestore_test.go +++ b/trustmanager/keystore_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/notary" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) @@ -45,7 +46,7 @@ func testAddKeyWithRole(t *testing.T, role, expectedSubdir string) { store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) require.NoError(t, err, "failed to create new key filestore") - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Since we're generating this manually we need to add the extension '.' @@ -85,11 +86,11 @@ func TestKeyStoreInternalState(t *testing.T) { roleToID := make(map[string]string) for _, role := range roles { // generate a key for the role - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // generate the correct PEM role header - privKeyPEM, err := KeyToPEM(privKey, role) + privKeyPEM, err := utils.KeyToPEM(privKey, role) require.NoError(t, err, "could not generate PEM") // write the key file to the correct location @@ -149,7 +150,7 @@ func TestKeyStoreInternalState(t *testing.T) { require.False(t, ok) // Generate a new targets key and add it with its gun, check that the map gets updated back - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") require.NoError(t, store.AddKey(KeyInfo{Role: data.CanonicalTargetsRole, Gun: gun}, privKey)) require.Equal(t, gun, store.keyInfoMap[privKey.ID()].Gun) @@ -245,7 +246,7 @@ EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= if success { require.NoError(t, err, "failed to get %s key from store (it's in %s)", role, expectedSubdir) - pemPrivKey, err := KeyToPEM(privKey, role) + pemPrivKey, err := utils.KeyToPEM(privKey, role) require.NoError(t, err, "failed to convert key to PEM") require.Equal(t, testData, pemPrivKey) @@ -336,7 +337,7 @@ func TestListKeys(t *testing.T) { for i, role := range roles { // Make a new key for each role - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function @@ -379,7 +380,7 @@ func TestAddGetKeyMemStore(t *testing.T) { // Create our store store := NewKeyMemoryStore(passphraseRetriever) - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function @@ -401,7 +402,7 @@ func TestAddGetKeyInfoMemStore(t *testing.T) { // Create our store store := NewKeyMemoryStore(passphraseRetriever) - rootKey, err := GenerateECDSAKey(rand.Reader) + rootKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function @@ -414,7 +415,7 @@ func TestAddGetKeyInfoMemStore(t *testing.T) { require.Equal(t, data.CanonicalRootRole, rootInfo.Role) require.Equal(t, "", rootInfo.Gun) - targetsKey, err := GenerateECDSAKey(rand.Reader) + targetsKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function @@ -427,7 +428,7 @@ func TestAddGetKeyInfoMemStore(t *testing.T) { require.Equal(t, data.CanonicalTargetsRole, targetsInfo.Role) require.Equal(t, gun, targetsInfo.Gun) - delgKey, err := GenerateECDSAKey(rand.Reader) + delgKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function @@ -455,7 +456,7 @@ func TestGetDecryptedWithTamperedCipherText(t *testing.T) { require.NoError(t, err, "failed to create new key filestore") // Generate a new Private Key - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddEncryptedKey function @@ -546,7 +547,7 @@ func testGetDecryptedWithInvalidPassphrase(t *testing.T, store KeyStore, newStor testAlias := data.CanonicalRootRole // Generate a new random RSA Key - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function @@ -581,7 +582,7 @@ func testRemoveKeyWithRole(t *testing.T, role, expectedSubdir string) { store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) require.NoError(t, err, "failed to create new key filestore") - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Since we're generating this manually we need to add the extension '.' @@ -624,7 +625,7 @@ func TestKeysAreCached(t *testing.T) { store, err := NewKeyFileStore(tempBaseDir, countingPassphraseRetriever) require.NoError(t, err, "failed to create new key filestore") - privKey, err := GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Call the AddKey function diff --git a/trustmanager/memorystore.go b/trustmanager/memorystore.go deleted file mode 100644 index d93e6c72c8..0000000000 --- a/trustmanager/memorystore.go +++ /dev/null @@ -1,54 +0,0 @@ -package trustmanager - -import "os" - -// MemoryFileStore is an implementation of Storage that keeps the contents in -// memory. It is not thread-safe and should be used by a higher-level interface -// that provides locking. -type MemoryFileStore struct { - files map[string][]byte -} - -// NewMemoryFileStore creates a MemoryFileStore -func NewMemoryFileStore() *MemoryFileStore { - return &MemoryFileStore{ - files: make(map[string][]byte), - } -} - -// Add writes data to a file with a given name -func (f *MemoryFileStore) Add(name string, data []byte) error { - f.files[name] = data - return nil -} - -// Remove removes a file identified by name -func (f *MemoryFileStore) Remove(name string) error { - if _, present := f.files[name]; !present { - return os.ErrNotExist - } - delete(f.files, name) - - return nil -} - -// Get returns the data given a file name -func (f *MemoryFileStore) Get(name string) ([]byte, error) { - fileData, present := f.files[name] - if !present { - return nil, os.ErrNotExist - } - - return fileData, nil -} - -// ListFiles lists all the files inside of a store -func (f *MemoryFileStore) ListFiles() []string { - var list []string - - for name := range f.files { - list = append(list, name) - } - - return list -} diff --git a/trustmanager/store.go b/trustmanager/store.go deleted file mode 100644 index c3b23469ba..0000000000 --- a/trustmanager/store.go +++ /dev/null @@ -1,42 +0,0 @@ -package trustmanager - -import ( - "errors" - - "github.com/docker/notary" -) - -const ( - visible = notary.PubCertPerms - private = notary.PrivKeyPerms -) - -var ( - // ErrPathOutsideStore indicates that the returned path would be - // outside the store - ErrPathOutsideStore = errors.New("path outside file store") -) - -// Storage implements the bare bones primitives (no hierarchy) -type Storage interface { - // Add writes a file to the specified location, returning an error if this - // is not possible (reasons may include permissions errors). The path is cleaned - // before being made absolute against the store's base dir. - Add(fileName string, data []byte) error - - // Remove deletes a file from the store relative to the store's base directory. - // The path is cleaned before being made absolute to ensure no path traversal - // outside the base directory is possible. - Remove(fileName string) error - - // Get returns the file content found at fileName relative to the base directory - // of the file store. The path is cleaned before being made absolute to ensure - // path traversal outside the store is not possible. If the file is not found - // an error to that effect is returned. - Get(fileName string) ([]byte, error) - - // ListFiles returns a list of paths relative to the base directory of the - // filestore. Any of these paths must be retrievable via the - // Storage.Get method. - ListFiles() []string -} diff --git a/trustmanager/yubikey/yubikeystore.go b/trustmanager/yubikey/yubikeystore.go index 86b4e718a2..ed13f04110 100644 --- a/trustmanager/yubikey/yubikeystore.go +++ b/trustmanager/yubikey/yubikeystore.go @@ -21,6 +21,7 @@ import ( "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" + "github.com/docker/notary/tuf/utils" "github.com/miekg/pkcs11" ) @@ -249,7 +250,7 @@ func addECDSAKey( // Hard-coded policy: the generated certificate expires in 10 years. startTime := time.Now() - template, err := trustmanager.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0)) + template, err := utils.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0)) if err != nil { return fmt.Errorf("failed to create the certificate template: %v", err) } diff --git a/trustmanager/yubikey/yubikeystore_test.go b/trustmanager/yubikey/yubikeystore_test.go index e9b120a0aa..c3d1a691c6 100644 --- a/trustmanager/yubikey/yubikeystore_test.go +++ b/trustmanager/yubikey/yubikeystore_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/miekg/pkcs11" "github.com/stretchr/testify/require" ) @@ -57,7 +58,7 @@ func TestEnsurePrivateKeySizePadsLessThanRequiredSizeArrays(t *testing.T) { } func testAddKey(t *testing.T, store trustmanager.KeyStore) (data.PrivateKey, error) { - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) err = store.AddKey(trustmanager.KeyInfo{Role: data.CanonicalRootRole, Gun: ""}, privKey) @@ -250,7 +251,7 @@ func TestYubiAddKeyCanAddToMiddleSlot(t *testing.T) { } type nonworkingBackup struct { - trustmanager.KeyMemoryStore + trustmanager.GenericKeyStore } // AddKey stores the contents of a PEM-encoded private key as a PEM block @@ -273,7 +274,7 @@ func TestYubiAddKeyRollsBackIfCannotBackup(t *testing.T) { }() backup := &nonworkingBackup{ - KeyMemoryStore: *trustmanager.NewKeyMemoryStore(ret), + GenericKeyStore: *trustmanager.NewKeyMemoryStore(ret), } store, err := NewYubiStore(backup, ret) require.NoError(t, err) diff --git a/trustpinning/certs.go b/trustpinning/certs.go index 0e584b743e..05487d5751 100644 --- a/trustpinning/certs.go +++ b/trustpinning/certs.go @@ -8,9 +8,9 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" + "github.com/docker/notary/tuf/utils" ) // ErrValidationFail is returned when there is no valid trusted certificates @@ -123,7 +123,7 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus } err = signed.VerifySignatures( - root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold}) + root, data.BaseRole{Keys: utils.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold}) if err != nil { logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err) return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"} @@ -152,7 +152,7 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus // Note that certsFromRoot is guaranteed to be unchanged only if we had prior cert data for this GUN or enabled TOFUS // If we attempted to pin a certain certificate or CA, certsFromRoot could have been pruned accordingly err = signed.VerifySignatures(root, data.BaseRole{ - Keys: trustmanager.CertsToKeys(certsFromRoot, allIntCerts), Threshold: rootRole.Threshold}) + Keys: utils.CertsToKeys(certsFromRoot, allIntCerts), Threshold: rootRole.Threshold}) if err != nil { logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err) return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"} @@ -233,14 +233,14 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m // Decode all the x509 certificates that were bundled with this // Specific root key - decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public()) + decodedCerts, err := utils.LoadCertBundleFromPEM(key.Public()) if err != nil { logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err) continue } // Get all non-CA certificates in the decoded certificates - leafCertList := trustmanager.GetLeafCerts(decodedCerts) + leafCertList := utils.GetLeafCerts(decodedCerts) // If we got no leaf certificates or we got more than one, fail if len(leafCertList) != 1 { @@ -260,7 +260,7 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m leafCerts[key.ID()] = leafCert // Get all the remainder certificates marked as a CA to be used as intermediates - intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts) + intermediateCerts := utils.GetIntermediateCerts(decodedCerts) intCerts[key.ID()] = intermediateCerts } diff --git a/trustpinning/certs_test.go b/trustpinning/certs_test.go index 4f26936d10..a037723c4e 100644 --- a/trustpinning/certs_test.go +++ b/trustpinning/certs_test.go @@ -24,6 +24,7 @@ import ( "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/testutils" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) @@ -444,13 +445,13 @@ func TestValidateRootWithPinnedCA(t *testing.T) { require.Error(t, err) // Now construct a new root with a valid cert chain, such that signatures are correct over the 'notary-signer' GUN. Pin the root-ca and validate - leafCert, err := trustmanager.LoadCertFromFile("../fixtures/notary-signer.crt") + leafCert, err := utils.LoadCertFromFile("../fixtures/notary-signer.crt") require.NoError(t, err) - intermediateCert, err := trustmanager.LoadCertFromFile("../fixtures/intermediate-ca.crt") + intermediateCert, err := utils.LoadCertFromFile("../fixtures/intermediate-ca.crt") require.NoError(t, err) - pemChainBytes, err := trustmanager.CertChainToPEM([]*x509.Certificate{leafCert, intermediateCert}) + pemChainBytes, err := utils.CertChainToPEM([]*x509.Certificate{leafCert, intermediateCert}) require.NoError(t, err) newRootKey := data.NewPublicKey(data.RSAx509Key, pemChainBytes) @@ -474,7 +475,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) { require.NoError(t, err, "could not open key file") pemBytes, err := ioutil.ReadAll(keyReader) require.NoError(t, err, "could not read key file") - privKey, err := trustmanager.ParsePEMPrivateKey(pemBytes, "") + privKey, err := utils.ParsePEMPrivateKey(pemBytes, "") require.NoError(t, err) store, err := trustmanager.NewKeyFileStore(tempBaseDir, passphraseRetriever) @@ -500,7 +501,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) { require.Equal(t, newTypedSignedRoot, validatedRoot) // Add an expired CA for the same gun to our previous pinned bundle, ensure that we still validate correctly - goodRootCABundle, err := trustmanager.LoadCertBundleFromFile(validCAFilepath) + goodRootCABundle, err := utils.LoadCertBundleFromFile(validCAFilepath) require.NoError(t, err) memKeyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService(memKeyStore) @@ -510,7 +511,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) { require.NoError(t, err) expiredCert, err := generateExpiredTestingCertificate(testPrivKey, "notary-signer") require.NoError(t, err) - bundleWithExpiredCert, err := trustmanager.CertChainToPEM(append(goodRootCABundle, expiredCert)) + bundleWithExpiredCert, err := utils.CertChainToPEM(append(goodRootCABundle, expiredCert)) require.NoError(t, err) bundleWithExpiredCertPath := filepath.Join(tempBaseDir, "bundle_with_expired_cert.pem") require.NoError(t, ioutil.WriteFile(bundleWithExpiredCertPath, bundleWithExpiredCert, 0644)) @@ -526,7 +527,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) { require.NoError(t, err) expiredCert2, err := generateExpiredTestingCertificate(testPrivKey2, "notary-signer") require.NoError(t, err) - allExpiredCertBundle, err := trustmanager.CertChainToPEM([]*x509.Certificate{expiredCert, expiredCert2}) + allExpiredCertBundle, err := utils.CertChainToPEM([]*x509.Certificate{expiredCert, expiredCert2}) require.NoError(t, err) allExpiredCertPath := filepath.Join(tempBaseDir, "all_expired_cert.pem") require.NoError(t, ioutil.WriteFile(allExpiredCertPath, allExpiredCertBundle, 0644)) @@ -541,7 +542,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) { require.NoError(t, err) validCert, err := cryptoservice.GenerateCertificate(testPrivKey3, "notary-signer", time.Now(), time.Now().AddDate(1, 0, 0)) require.NoError(t, err) - bundleWithWrongCert, err := trustmanager.CertChainToPEM([]*x509.Certificate{validCert}) + bundleWithWrongCert, err := utils.CertChainToPEM([]*x509.Certificate{validCert}) require.NoError(t, err) bundleWithWrongCertPath := filepath.Join(tempBaseDir, "bundle_with_expired_cert.pem") require.NoError(t, ioutil.WriteFile(bundleWithWrongCertPath, bundleWithWrongCert, 0644)) diff --git a/trustpinning/trustpin.go b/trustpinning/trustpin.go index 351aed266a..5ddfe14e5b 100644 --- a/trustpinning/trustpin.go +++ b/trustpinning/trustpin.go @@ -4,7 +4,6 @@ import ( "crypto/x509" "fmt" "github.com/Sirupsen/logrus" - "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/utils" "strings" ) @@ -39,14 +38,14 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker, if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil { // Try to add the CA certs from its bundle file to our certificate store, // and use it to validate certs in the root.json later - caCerts, err := trustmanager.LoadCertBundleFromFile(caFilepath) + caCerts, err := utils.LoadCertBundleFromFile(caFilepath) if err != nil { return nil, fmt.Errorf("could not load root cert from CA path") } // Now only consider certificates that are direct children from this CA cert chain caRootPool := x509.NewCertPool() for _, caCert := range caCerts { - if err = trustmanager.ValidateCertificate(caCert); err != nil { + if err = utils.ValidateCertificate(caCert); err != nil { continue } caRootPool.AddCert(caCert) @@ -68,7 +67,7 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker, func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool { // reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...}, // in order to get the matching id in the root file - key, err := trustmanager.CertBundleToKey(leafCert, intCerts) + key, err := utils.CertBundleToKey(leafCert, intCerts) if err != nil { logrus.Debug("error creating cert bundle: ", err.Error()) return false diff --git a/tuf/client/client.go b/tuf/client/client.go index 90ab698f43..4b7b4bc1aa 100644 --- a/tuf/client/client.go +++ b/tuf/client/client.go @@ -5,9 +5,9 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/notary" - tuf "github.com/docker/notary/tuf" + store "github.com/docker/notary/storage" + "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/store" ) // Client is a usability wrapper around a raw TUF repo @@ -88,7 +88,7 @@ func (c *Client) downloadRoot() error { logrus.Debugf("Loading root with no expected checksum") // get the cached root, if it exists, just for version checking - cachedRoot, _ := c.cache.GetMeta(role, -1) + cachedRoot, _ := c.cache.GetSized(role, -1) // prefer to download a new root _, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot) return remoteErr @@ -107,7 +107,7 @@ func (c *Client) downloadTimestamp() error { consistentInfo := c.newBuilder.GetConsistentInfo(role) // get the cached timestamp, if it exists - cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize) + cachedTS, cachedErr := c.cache.GetSized(role, notary.MaxTimestampSize) // always get the remote timestamp, since it supercedes the local one _, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS) @@ -188,7 +188,7 @@ func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) } func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) { - cachedTS, err := c.cache.GetMeta(consistentInfo.RoleName, consistentInfo.Length()) + cachedTS, err := c.cache.GetSized(consistentInfo.RoleName, consistentInfo.Length()) if err != nil { logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName) return c.tryLoadRemote(consistentInfo, nil) @@ -205,7 +205,7 @@ func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]by func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) { consistentName := consistentInfo.ConsistentName() - raw, err := c.remote.GetMeta(consistentName, consistentInfo.Length()) + raw, err := c.remote.GetSized(consistentName, consistentInfo.Length()) if err != nil { logrus.Debugf("error downloading %s: %s", consistentName, err) return old, err @@ -222,7 +222,7 @@ func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([ return raw, err } logrus.Debugf("successfully verified downloaded %s", consistentName) - if err := c.cache.SetMeta(consistentInfo.RoleName, raw); err != nil { + if err := c.cache.Set(consistentInfo.RoleName, raw); err != nil { logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err) } return raw, nil diff --git a/tuf/signed/ed25519.go b/tuf/signed/ed25519.go index bc884bdbe6..eef673b9da 100644 --- a/tuf/signed/ed25519.go +++ b/tuf/signed/ed25519.go @@ -6,6 +6,7 @@ import ( "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" ) type edCryptoKey struct { @@ -72,7 +73,7 @@ func (e *Ed25519) Create(role, gun, algorithm string) (data.PublicKey, error) { return nil, errors.New("only ED25519 supported by this cryptoservice") } - private, err := trustmanager.GenerateED25519Key(rand.Reader) + private, err := utils.GenerateED25519Key(rand.Reader) if err != nil { return nil, err } diff --git a/tuf/signed/sign_test.go b/tuf/signed/sign_test.go index 022c5c40c3..482092ad35 100644 --- a/tuf/signed/sign_test.go +++ b/tuf/signed/sign_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/notary/cryptoservice" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) @@ -208,14 +209,14 @@ func TestSignReturnsNoSigs(t *testing.T) { func TestSignWithX509(t *testing.T) { // generate a key becase we need a cert - privKey, err := trustmanager.GenerateRSAKey(rand.Reader, 1024) + privKey, err := utils.GenerateRSAKey(rand.Reader, 1024) require.NoError(t, err) // make a RSA x509 key cert, err := cryptoservice.GenerateCertificate(privKey, "test", time.Now(), time.Now().AddDate(10, 0, 0)) require.NoError(t, err) - tufRSAx509Key := trustmanager.CertToKey(cert) + tufRSAx509Key := utils.CertToKey(cert) require.NoError(t, err) // test signing against a service that only recognizes a RSAKey (not @@ -335,7 +336,7 @@ func TestSignMinSignatures(t *testing.T) { } func TestSignFailingKeys(t *testing.T) { - privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) + privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err) cs := &MockCryptoService{FailingPrivateKey{privKey}} diff --git a/tuf/signed/verifiers_test.go b/tuf/signed/verifiers_test.go index a9209cff53..0f761d1dd1 100644 --- a/tuf/signed/verifiers_test.go +++ b/tuf/signed/verifiers_test.go @@ -433,6 +433,41 @@ func TestRSAPyCryptoVerifierInvalidKeyType(t *testing.T) { require.IsType(t, ErrInvalidKeyType{}, err) } +func TestPyCryptoRSAPSSCompat(t *testing.T) { + pubPem := "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAnKuXZeefa2LmgxaL5NsM\nzKOHNe+x/nL6ik+lDBCTV6OdcwAhHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5\nVSCuRJ53UronENl6lsa5mFKP8StYLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDf\nBEPIRp28ev/NViwGOEkBu2UAbwCIdnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK\n6pdzJXlhr9yap3UpgQ/iO9JtoEYB2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq\n3xmN4p+R4VGzfdQN+8Kl/IPjqWB535twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrM\nBI8ztvPiogz+MvXb8WvarZ6TMTh8ifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X\n7sXoaqszEtXdq5ef5zKVxkiyIQZcbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj\n1ANMFPxDQpHudCLxwCzjCb+sVa20HBRPTnzo8LSZkI6jAgMBAAE=\n-----END PUBLIC KEY-----" + //privPem := "-----BEGIN RSA PRIVATE KEY-----\nMIIG4wIBAAKCAYEAnKuXZeefa2LmgxaL5NsMzKOHNe+x/nL6ik+lDBCTV6OdcwAh\nHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5VSCuRJ53UronENl6lsa5mFKP8StY\nLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDfBEPIRp28ev/NViwGOEkBu2UAbwCI\ndnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK6pdzJXlhr9yap3UpgQ/iO9JtoEYB\n2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq3xmN4p+R4VGzfdQN+8Kl/IPjqWB5\n35twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrMBI8ztvPiogz+MvXb8WvarZ6TMTh8\nifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X7sXoaqszEtXdq5ef5zKVxkiyIQZc\nbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj1ANMFPxDQpHudCLxwCzjCb+sVa20\nHBRPTnzo8LSZkI6jAgMBAAECggGAdzyI7z/HLt2IfoAsXDLynNRgVYZluzgawiU3\ngeUjnnGhpSKWERXJC2IWDPBk0YOGgcnQxErNTdfXiFZ/xfRlSgqjVwob2lRe4w4B\npLr+CZXcgznv1VrPUvdolOSp3R2Mahfn7u0qVDUQ/g8jWVI6KW7FACmQhzQkPM8o\ntLGrpcmK+PA465uaHKtYccEB02ILqrK8v++tknv7eIZczrsSKlS1h/HHjSaidYxP\n2DAUiF7wnChrwwQEvuEUHhwVgQcoDMBoow0zwHdbFiFO2ZT54H2oiJWLhpR/x6RK\ngM1seqoPH2sYErPJACMcYsMtF4Tx7b5c4WSj3vDCGb+jeqnNS6nFC3aMnv75mUS2\nYDPU1heJFd8pNHVf0RDejLZZUiJSnXf3vpOxt9Xv2+4He0jeMfLV7zX0mO2Ni3MJ\nx6PiVy4xerHImOuuHzSla5crOq2ECiAxd1wEOFDRD2LRHzfhpk1ghiA5xA1qwc7Z\neRnkVfoy6PPZ4lZakZTm0p8YCQURAoHBAMUIC/7vnayLae7POmgy+np/ty7iMfyd\nV1eO6LTO21KAaGGlhaY26WD/5LcG2FUgc5jKKahprGrmiNLzLUeQPckJmuijSEVM\nl/4DlRvCo867l7fLaVqYzsQBBdeGIFNiT+FBOd8atff87ZBEfH/rXbDi7METD/VR\n4TdblnCsKYAXEJUdkw3IK7SUGERiQZIwKXrH/Map4ibDrljJ71iCgEureU0DBwcg\nwLftmjGMISoLscdRxeubX5uf/yxtHBJeRwKBwQDLjzHhb4gNGdBHUl4hZPAGCq1V\nLX/GpfoOVObW64Lud+tI6N9GNua5/vWduL7MWWOzDTMZysganhKwsJCY5SqAA9p0\nb6ohusf9i1nUnOa2F2j+weuYPXrTYm+ZrESBBdaEJPuj3R5YHVujrBA9Xe0kVOe3\nne151A+0xJOI3tX9CttIaQAsXR7cMDinkDITw6i7X4olRMPCSixHLW97cDsVDRGt\necO1d4dP3OGscN+vKCoL6tDKDotzWHYPwjH47sUCgcEAoVI8WCiipbKkMnaTsNsE\ngKXvO0DSgq3k5HjLCbdQldUzIbgfnH7bSKNcBYtiNxjR7OihgRW8qO5GWsnmafCs\n1dy6a/2835id3cnbHRaZflvUFhVDFn2E1bCsstFLyFn3Y0w/cO9yzC/X5sZcVXRF\nit3R0Selakv3JZckru4XMJwx5JWJYMBjIIAc+miknWg3niL+UT6pPun65xG3mXWI\nS+yC7c4rw+dKQ44UMLs2MDHRBoxqi8T0W/x9NkfDszpjAoHAclH7S4ZdvC3RIR0L\nLGoJuvroGbwx1JiGdOINuooNwGuswge2zTIsJi0gN/H3hcB2E6rIFiYid4BrMrwW\nmSeq1LZVS6siu0qw4p4OVy+/CmjfWKQD8j4k6u6PipiK6IMk1JYIlSCr2AS04JjT\njgNgGVVtxVt2cUM9huIXkXjEaRZdzK7boA60NCkIyGJdHWh3LLQdW4zg/A64C0lj\nIMoJBGuQkAKgfRuh7KI6Q6Qom7BM3OCFXdUJUEBQHc2MTyeZAoHAJdBQGBn1RFZ+\nn75AnbTMZJ6Twp2fVjzWUz/+rnXFlo87ynA18MR2BzaDST4Bvda29UBFGb32Mux9\nOHukqLgIE5jDuqWjy4B5eCoxZf/OvwlgXkX9+gprGR3axn/PZBFPbFB4ZmjbWLzn\nbocn7FJCXf+Cm0cMmv1jIIxej19MUU/duq9iq4RkHY2LG+KrSEQIUVmImCftXdN3\n/qNP5JetY0eH6C+KRc8JqDB0nvbqZNOgYXOfYXo/5Gk8XIHTFihm\n-----END RSA PRIVATE KEY-----" + testStr := "The quick brown fox jumps over the lazy dog." + sigHex := "4e05ee9e435653549ac4eddbc43e1a6868636e8ea6dbec2564435afcb0de47e0824cddbd88776ddb20728c53ecc90b5d543d5c37575fda8bd0317025fc07de62ee8084b1a75203b1a23d1ef4ac285da3d1fc63317d5b2cf1aafa3e522acedd366ccd5fe4a7f02a42922237426ca3dc154c57408638b9bfaf0d0213855d4e9ee621db204151bcb13d4dbb18f930ec601469c992c84b14e9e0b6f91ac9517bb3b749dd117e1cbac2e4acb0e549f44558a2005898a226d5b6c8b9291d7abae0d9e0a16858b89662a085f74a202deb867acab792bdbd2c36731217caea8b17bd210c29b890472f11e5afdd1dd7b69004db070e04201778f2c49f5758643881403d45a58d08f51b5c63910c6185892f0b590f191d760b669eff2464456f130239bba94acf54a0cb98f6939ff84ae26a37f9b890be259d9b5d636f6eb367b53e895227d7d79a3a88afd6d28c198ee80f6527437c5fbf63accb81709925c4e03d1c9eaee86f58e4bd1c669d6af042dbd412de0d13b98b1111e2fadbe34b45de52125e9a" + k := data.NewPublicKey(data.RSAKey, []byte(pubPem)) + + sigBytes, err := hex.DecodeString(sigHex) + if err != nil { + t.Fatal(err) + } + v := RSAPyCryptoVerifier{} + err = v.Verify(k, sigBytes, []byte(testStr)) + if err != nil { + t.Fatal(err) + } +} + +func TestPyNaCled25519Compat(t *testing.T) { + pubHex := "846612b43cef909a0e4ea9c818379bca4723a2020619f95e7a0ccc6f0850b7dc" + //privHex := "bf3cdb9b2a664b0460e6755cb689ffca15b6e294f79f9f1fcf90b52e5b063a76" + testStr := "The quick brown fox jumps over the lazy dog." + sigHex := "166e7013e48f26dccb4e68fe4cf558d1cd3af902f8395534336a7f8b4c56588694aa3ac671767246298a59d5ef4224f02c854f41bfcfe70241db4be1546d6a00" + + pub, _ := hex.DecodeString(pubHex) + k := data.NewPublicKey(data.ED25519Key, pub) + + sigBytes, _ := hex.DecodeString(sigHex) + + err := Verifiers[data.EDDSASignature].Verify(k, sigBytes, []byte(testStr)) + if err != nil { + t.Fatal(err) + } +} + func rsaPSSSign(privKey data.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { if privKey, ok := privKey.(*data.RSAPrivateKey); !ok { return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm()) diff --git a/tuf/store/filestore.go b/tuf/store/filestore.go deleted file mode 100644 index 401e7ee421..0000000000 --- a/tuf/store/filestore.go +++ /dev/null @@ -1,102 +0,0 @@ -package store - -import ( - "fmt" - "github.com/docker/notary" - "io/ioutil" - "os" - "path" - "path/filepath" -) - -// NewFilesystemStore creates a new store in a directory tree -func NewFilesystemStore(baseDir, metaSubDir, metaExtension string) (*FilesystemStore, error) { - metaDir := path.Join(baseDir, metaSubDir) - - // Make sure we can create the necessary dirs and they are writable - err := os.MkdirAll(metaDir, 0700) - if err != nil { - return nil, err - } - - return &FilesystemStore{ - baseDir: baseDir, - metaDir: metaDir, - metaExtension: metaExtension, - }, nil -} - -// FilesystemStore is a store in a locally accessible directory -type FilesystemStore struct { - baseDir string - metaDir string - metaExtension string -} - -func (f *FilesystemStore) getPath(name string) string { - fileName := fmt.Sprintf("%s.%s", name, f.metaExtension) - return filepath.Join(f.metaDir, fileName) -} - -// GetMeta returns the meta for the given name (a role) up to size bytes -// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a -// predefined threshold "notary.MaxDownloadSize". -func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) { - meta, err := ioutil.ReadFile(f.getPath(name)) - if err != nil { - if os.IsNotExist(err) { - err = ErrMetaNotFound{Resource: name} - } - return nil, err - } - if size == NoSizeLimit { - size = notary.MaxDownloadSize - } - // Only return up to size bytes - if int64(len(meta)) < size { - return meta, nil - } - return meta[:size], nil -} - -// SetMultiMeta sets the metadata for multiple roles in one operation -func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error { - for role, blob := range metas { - err := f.SetMeta(role, blob) - if err != nil { - return err - } - } - return nil -} - -// SetMeta sets the meta for a single role -func (f *FilesystemStore) SetMeta(name string, meta []byte) error { - fp := f.getPath(name) - - // Ensures the parent directories of the file we are about to write exist - err := os.MkdirAll(filepath.Dir(fp), 0700) - if err != nil { - return err - } - - // if something already exists, just delete it and re-write it - os.RemoveAll(fp) - - // Write the file to disk - if err = ioutil.WriteFile(fp, meta, 0600); err != nil { - return err - } - return nil -} - -// RemoveAll clears the existing filestore by removing its base directory -func (f *FilesystemStore) RemoveAll() error { - return os.RemoveAll(f.baseDir) -} - -// RemoveMeta removes the metadata for a single role - if the metadata doesn't -// exist, no error is returned -func (f *FilesystemStore) RemoveMeta(name string) error { - return os.RemoveAll(f.getPath(name)) // RemoveAll succeeds if path doesn't exist -} diff --git a/tuf/store/filestore_test.go b/tuf/store/filestore_test.go deleted file mode 100644 index ebdd1944cb..0000000000 --- a/tuf/store/filestore_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package store - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" -) - -const testDir = "/tmp/testFilesystemStore/" - -func TestNewFilesystemStore(t *testing.T) { - _, err := NewFilesystemStore(testDir, "metadata", "json") - require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) - defer os.RemoveAll(testDir) - - info, err := os.Stat(path.Join(testDir, "metadata")) - require.Nil(t, err, "Error attempting to stat metadata dir: %v", err) - require.NotNil(t, info, "Nil FileInfo from stat on metadata dir") - require.True(t, 0700&info.Mode() != 0, "Metadata directory is not writable") -} - -func TestSetMeta(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) - defer os.RemoveAll(testDir) - - testContent := []byte("test data") - - err = s.SetMeta("testMeta", testContent) - require.Nil(t, err, "SetMeta returned unexpected error: %v", err) - - content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "testMeta.json")) - require.Nil(t, err, "Error reading file: %v", err) - require.Equal(t, testContent, content, "Content written to file was corrupted.") -} - -func TestSetMetaWithNoParentDirectory(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) - defer os.RemoveAll(testDir) - - testContent := []byte("test data") - - err = s.SetMeta("noexist/"+"testMeta", testContent) - require.Nil(t, err, "SetMeta returned unexpected error: %v", err) - - content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "noexist/testMeta.json")) - require.Nil(t, err, "Error reading file: %v", err) - require.Equal(t, testContent, content, "Content written to file was corrupted.") -} - -// if something already existed there, remove it first and write a new file -func TestSetMetaRemovesExistingFileBeforeWriting(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) - defer os.RemoveAll(testDir) - - // make a directory where we want metadata to go - os.Mkdir(filepath.Join(testDir, "metadata", "root.json"), 0700) - - testContent := []byte("test data") - err = s.SetMeta("root", testContent) - require.NoError(t, err, "SetMeta returned unexpected error: %v", err) - - content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "root.json")) - require.NoError(t, err, "Error reading file: %v", err) - require.Equal(t, testContent, content, "Content written to file was corrupted.") -} - -func TestGetMeta(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) - defer os.RemoveAll(testDir) - - testContent := []byte("test data") - - ioutil.WriteFile(path.Join(testDir, "metadata", "testMeta.json"), testContent, 0600) - - content, err := s.GetMeta("testMeta", int64(len(testContent))) - require.Nil(t, err, "GetMeta returned unexpected error: %v", err) - - require.Equal(t, testContent, content, "Content read from file was corrupted.") - - // Check that NoSizeLimit size reads everything - content, err = s.GetMeta("testMeta", NoSizeLimit) - require.Nil(t, err, "GetMeta returned unexpected error: %v", err) - - require.Equal(t, testContent, content, "Content read from file was corrupted.") - - // Check that we return only up to size bytes - content, err = s.GetMeta("testMeta", 4) - require.Nil(t, err, "GetMeta returned unexpected error: %v", err) - - require.Equal(t, []byte("test"), content, "Content read from file was corrupted.") -} - -func TestGetSetMetadata(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.NoError(t, err, "Initializing FilesystemStore returned unexpected error", err) - defer os.RemoveAll(testDir) - - testGetSetMeta(t, func() MetadataStore { return s }) -} - -func TestRemoveMetadata(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.NoError(t, err, "Initializing FilesystemStore returned unexpected error", err) - defer os.RemoveAll(testDir) - - testRemoveMeta(t, func() MetadataStore { return s }) -} - -func TestRemoveAll(t *testing.T) { - s, err := NewFilesystemStore(testDir, "metadata", "json") - require.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) - defer os.RemoveAll(testDir) - - testContent := []byte("test data") - - // Write some files in metadata and targets dirs - metaPath := path.Join(testDir, "metadata", "testMeta.json") - ioutil.WriteFile(metaPath, testContent, 0600) - - // Remove all - err = s.RemoveAll() - require.Nil(t, err, "Removing all from FilesystemStore returned unexpected error: %v", err) - - // Test that files no longer exist - _, err = ioutil.ReadFile(metaPath) - require.True(t, os.IsNotExist(err)) - - // Removing the empty filestore returns nil - require.Nil(t, s.RemoveAll()) -} diff --git a/tuf/testutils/corrupt_memorystore.go b/tuf/testutils/corrupt_memorystore.go index ee5afa39d9..857d5d7e65 100644 --- a/tuf/testutils/corrupt_memorystore.go +++ b/tuf/testutils/corrupt_memorystore.go @@ -1,7 +1,7 @@ package testutils import ( - "github.com/docker/notary/tuf/store" + store "github.com/docker/notary/storage" ) // CorruptingMemoryStore corrupts all data returned by GetMeta @@ -16,10 +16,10 @@ func NewCorruptingMemoryStore(meta map[string][]byte) *CorruptingMemoryStore { return &CorruptingMemoryStore{MemoryStore: *s} } -// GetMeta returns up to size bytes of meta identified by string. It will +// GetSized returns up to size bytes of meta identified by string. It will // always be corrupted by setting the first character to } -func (cm CorruptingMemoryStore) GetMeta(name string, size int64) ([]byte, error) { - d, err := cm.MemoryStore.GetMeta(name, size) +func (cm CorruptingMemoryStore) GetSized(name string, size int64) ([]byte, error) { + d, err := cm.MemoryStore.GetSized(name, size) if err != nil { return nil, err } @@ -39,9 +39,9 @@ func NewLongMemoryStore(meta map[string][]byte) *LongMemoryStore { return &LongMemoryStore{MemoryStore: *s} } -// GetMeta returns one byte too much -func (lm LongMemoryStore) GetMeta(name string, size int64) ([]byte, error) { - d, err := lm.MemoryStore.GetMeta(name, size) +// GetSized returns one byte too much +func (lm LongMemoryStore) GetSized(name string, size int64) ([]byte, error) { + d, err := lm.MemoryStore.GetSized(name, size) if err != nil { return nil, err } @@ -61,9 +61,9 @@ func NewShortMemoryStore(meta map[string][]byte) *ShortMemoryStore { return &ShortMemoryStore{MemoryStore: *s} } -// GetMeta returns one byte too few -func (sm ShortMemoryStore) GetMeta(name string, size int64) ([]byte, error) { - d, err := sm.MemoryStore.GetMeta(name, size) +// GetSized returns one byte too few +func (sm ShortMemoryStore) GetSized(name string, size int64) ([]byte, error) { + d, err := sm.MemoryStore.GetSized(name, size) if err != nil { return nil, err } diff --git a/tuf/testutils/interfaces/cryptoservice.go b/tuf/testutils/interfaces/cryptoservice.go index 3c26228b53..d1c31dda12 100644 --- a/tuf/testutils/interfaces/cryptoservice.go +++ b/tuf/testutils/interfaces/cryptoservice.go @@ -4,9 +4,9 @@ import ( "crypto/rand" "testing" - "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) @@ -91,11 +91,11 @@ func AddGetKeyCryptoServiceInterfaceBehaviorTests(t *testing.T, cs signed.Crypto role := data.BaseRoles[i+1] switch algo { case data.RSAKey: - addedPrivKey, err = trustmanager.GenerateRSAKey(rand.Reader, 2048) + addedPrivKey, err = utils.GenerateRSAKey(rand.Reader, 2048) case data.ECDSAKey: - addedPrivKey, err = trustmanager.GenerateECDSAKey(rand.Reader) + addedPrivKey, err = utils.GenerateECDSAKey(rand.Reader) case data.ED25519Key: - addedPrivKey, err = trustmanager.GenerateED25519Key(rand.Reader) + addedPrivKey, err = utils.GenerateED25519Key(rand.Reader) default: require.FailNow(t, "invalid algorithm %s", algo) } @@ -123,11 +123,11 @@ func AddListKeyCryptoServiceInterfaceBehaviorTests(t *testing.T, cs signed.Crypt role := data.BaseRoles[i+1] switch algo { case data.RSAKey: - addedPrivKey, err = trustmanager.GenerateRSAKey(rand.Reader, 2048) + addedPrivKey, err = utils.GenerateRSAKey(rand.Reader, 2048) case data.ECDSAKey: - addedPrivKey, err = trustmanager.GenerateECDSAKey(rand.Reader) + addedPrivKey, err = utils.GenerateECDSAKey(rand.Reader) case data.ED25519Key: - addedPrivKey, err = trustmanager.GenerateED25519Key(rand.Reader) + addedPrivKey, err = utils.GenerateED25519Key(rand.Reader) default: require.FailNow(t, "invalid algorithm %s", algo) } diff --git a/tuf/testutils/repo.go b/tuf/testutils/repo.go index 96c8de8cc9..17caa755c0 100644 --- a/tuf/testutils/repo.go +++ b/tuf/testutils/repo.go @@ -12,6 +12,7 @@ import ( "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" tuf "github.com/docker/notary/tuf" @@ -40,9 +41,9 @@ func CreateKey(cs signed.CryptoService, gun, role, keyAlgorithm string) (data.Pu // Keep the x509 key type consistent with the key's algorithm switch keyAlgorithm { case data.RSAKey: - key = data.NewRSAx509PublicKey(trustmanager.CertToPEM(cert)) + key = data.NewRSAx509PublicKey(utils.CertToPEM(cert)) case data.ECDSAKey: - key = data.NewECDSAx509PublicKey(trustmanager.CertToPEM(cert)) + key = data.NewECDSAx509PublicKey(utils.CertToPEM(cert)) default: // This should be impossible because of the Create() call above, but just in case return nil, fmt.Errorf("invalid key algorithm type") diff --git a/tuf/testutils/swizzler.go b/tuf/testutils/swizzler.go index b66a793f76..39fd210a7a 100644 --- a/tuf/testutils/swizzler.go +++ b/tuf/testutils/swizzler.go @@ -8,11 +8,11 @@ import ( "github.com/docker/go/canonical/json" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/passphrase" + store "github.com/docker/notary/storage" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" ) // ErrNoKeyForRole returns an error when the cryptoservice provided to @@ -89,7 +89,7 @@ func serializeMetadata(cs signed.CryptoService, s *data.Signed, role string, // gets a Signed from the metadata store func signedFromStore(cache store.MetadataStore, role string) (*data.Signed, error) { - b, err := cache.GetMeta(role, store.NoSizeLimit) + b, err := cache.GetSized(role, store.NoSizeLimit) if err != nil { return nil, err } @@ -122,23 +122,23 @@ func NewMetadataSwizzler(gun string, initialMetadata map[string][]byte, // SetInvalidJSON corrupts metadata into something that is no longer valid JSON func (m *MetadataSwizzler) SetInvalidJSON(role string) error { - metaBytes, err := m.MetadataCache.GetMeta(role, store.NoSizeLimit) + metaBytes, err := m.MetadataCache.GetSized(role, store.NoSizeLimit) if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes[5:]) + return m.MetadataCache.Set(role, metaBytes[5:]) } // AddExtraSpace adds an extra space to the beginning and end of the serialized // JSON bytes, which should not affect serialization, but will change the checksum // of the file. func (m *MetadataSwizzler) AddExtraSpace(role string) error { - metaBytes, err := m.MetadataCache.GetMeta(role, store.NoSizeLimit) + metaBytes, err := m.MetadataCache.GetSized(role, store.NoSizeLimit) if err != nil { return err } newBytes := append(append([]byte{' '}, metaBytes...), ' ') - return m.MetadataCache.SetMeta(role, newBytes) + return m.MetadataCache.Set(role, newBytes) } // SetInvalidSigned corrupts the metadata into something that is valid JSON, @@ -155,7 +155,7 @@ func (m *MetadataSwizzler) SetInvalidSigned(role string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // SetInvalidSignedMeta corrupts the metadata into something that is unmarshallable @@ -190,7 +190,7 @@ func (m *MetadataSwizzler) SetInvalidSignedMeta(role string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // TODO: corrupt metadata in such a way that it can be unmarshalled as a @@ -225,7 +225,7 @@ func (m *MetadataSwizzler) SetInvalidMetadataType(role string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // InvalidateMetadataSignatures signs with the right key(s) but wrong hash @@ -248,7 +248,7 @@ func (m *MetadataSwizzler) InvalidateMetadataSignatures(role string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // TODO: AddExtraSignedInfo - add an extra field to Signed that doesn't get @@ -257,7 +257,7 @@ func (m *MetadataSwizzler) InvalidateMetadataSignatures(role string) error { // RemoveMetadata deletes the metadata entirely func (m *MetadataSwizzler) RemoveMetadata(role string) error { - return m.MetadataCache.RemoveMeta(role) + return m.MetadataCache.Remove(role) } // SignMetadataWithInvalidKey signs the metadata with the wrong key @@ -278,7 +278,7 @@ func (m *MetadataSwizzler) SignMetadataWithInvalidKey(role string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // OffsetMetadataVersion updates the metadata version @@ -313,7 +313,7 @@ func (m *MetadataSwizzler) OffsetMetadataVersion(role string, offset int) error if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // ExpireMetadata expires the metadata, which would make it invalid - don't do anything if @@ -345,7 +345,7 @@ func (m *MetadataSwizzler) ExpireMetadata(role string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(role, metaBytes) + return m.MetadataCache.Set(role, metaBytes) } // SetThreshold sets a threshold for a metadata role - can invalidate metadata for which @@ -357,7 +357,7 @@ func (m *MetadataSwizzler) SetThreshold(role string, newThreshold int) error { roleSpecifier = path.Dir(role) } - b, err := m.MetadataCache.GetMeta(roleSpecifier, store.NoSizeLimit) + b, err := m.MetadataCache.GetSized(roleSpecifier, store.NoSizeLimit) if err != nil { return err } @@ -401,7 +401,7 @@ func (m *MetadataSwizzler) SetThreshold(role string, newThreshold int) error { if err != nil { return err } - return m.MetadataCache.SetMeta(roleSpecifier, metaBytes) + return m.MetadataCache.Set(roleSpecifier, metaBytes) } // RotateKey rotates the key for a role - this can invalidate that role's metadata @@ -413,7 +413,7 @@ func (m *MetadataSwizzler) RotateKey(role string, key data.PublicKey) error { roleSpecifier = path.Dir(role) } - b, err := m.MetadataCache.GetMeta(roleSpecifier, store.NoSizeLimit) + b, err := m.MetadataCache.GetSized(roleSpecifier, store.NoSizeLimit) if err != nil { return err } @@ -460,7 +460,7 @@ func (m *MetadataSwizzler) RotateKey(role string, key data.PublicKey) error { if err != nil { return err } - return m.MetadataCache.SetMeta(roleSpecifier, metaBytes) + return m.MetadataCache.Set(roleSpecifier, metaBytes) } // ChangeRootKey swaps out the root key with a new key, and re-signs the metadata @@ -471,7 +471,7 @@ func (m *MetadataSwizzler) ChangeRootKey() error { return err } - b, err := m.MetadataCache.GetMeta(data.CanonicalRootRole, store.NoSizeLimit) + b, err := m.MetadataCache.GetSized(data.CanonicalRootRole, store.NoSizeLimit) if err != nil { return err } @@ -498,7 +498,7 @@ func (m *MetadataSwizzler) ChangeRootKey() error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalRootRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalRootRole, metaBytes) } // UpdateSnapshotHashes updates the snapshot to reflect the latest hash changes, to @@ -509,7 +509,7 @@ func (m *MetadataSwizzler) UpdateSnapshotHashes(roles ...string) error { snapshotSigned *data.Signed err error ) - if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalSnapshotRole, store.NoSizeLimit); err != nil { + if metaBytes, err = m.MetadataCache.GetSized(data.CanonicalSnapshotRole, store.NoSizeLimit); err != nil { return err } @@ -525,7 +525,7 @@ func (m *MetadataSwizzler) UpdateSnapshotHashes(roles ...string) error { for _, role := range roles { if role != data.CanonicalSnapshotRole && role != data.CanonicalTimestampRole { - if metaBytes, err = m.MetadataCache.GetMeta(role, store.NoSizeLimit); err != nil { + if metaBytes, err = m.MetadataCache.GetSized(role, store.NoSizeLimit); err != nil { return err } @@ -549,7 +549,7 @@ func (m *MetadataSwizzler) UpdateSnapshotHashes(roles ...string) error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalSnapshotRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalSnapshotRole, metaBytes) } // UpdateTimestampHash updates the timestamp to reflect the latest snapshot changes, to @@ -561,7 +561,7 @@ func (m *MetadataSwizzler) UpdateTimestampHash() error { timestampSigned *data.Signed err error ) - if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalTimestampRole, store.NoSizeLimit); err != nil { + if metaBytes, err = m.MetadataCache.GetSized(data.CanonicalTimestampRole, store.NoSizeLimit); err != nil { return err } // we can't just create a new timestamp, because then the expiry would be @@ -570,7 +570,7 @@ func (m *MetadataSwizzler) UpdateTimestampHash() error { return err } - if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalSnapshotRole, store.NoSizeLimit); err != nil { + if metaBytes, err = m.MetadataCache.GetSized(data.CanonicalSnapshotRole, store.NoSizeLimit); err != nil { return err } @@ -593,7 +593,7 @@ func (m *MetadataSwizzler) UpdateTimestampHash() error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalTimestampRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalTimestampRole, metaBytes) } // MutateRoot takes a function that mutates the root metadata - once done, it @@ -632,7 +632,7 @@ func (m *MetadataSwizzler) MutateRoot(mutate func(*data.Root)) error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalRootRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalRootRole, metaBytes) } // MutateTimestamp takes a function that mutates the timestamp metadata - once done, it @@ -665,7 +665,7 @@ func (m *MetadataSwizzler) MutateTimestamp(mutate func(*data.Timestamp)) error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalTimestampRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalTimestampRole, metaBytes) } // MutateSnapshot takes a function that mutates the snapshot metadata - once done, it @@ -698,7 +698,7 @@ func (m *MetadataSwizzler) MutateSnapshot(mutate func(*data.Snapshot)) error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalSnapshotRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalSnapshotRole, metaBytes) } // MutateTargets takes a function that mutates the targets metadata - once done, it @@ -731,5 +731,5 @@ func (m *MetadataSwizzler) MutateTargets(mutate func(*data.Targets)) error { if err != nil { return err } - return m.MetadataCache.SetMeta(data.CanonicalTargetsRole, metaBytes) + return m.MetadataCache.Set(data.CanonicalTargetsRole, metaBytes) } diff --git a/tuf/testutils/swizzler_test.go b/tuf/testutils/swizzler_test.go index 87f12b9619..61f7b5c960 100644 --- a/tuf/testutils/swizzler_test.go +++ b/tuf/testutils/swizzler_test.go @@ -11,9 +11,9 @@ import ( "testing" "time" + store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" "github.com/stretchr/testify/require" ) @@ -80,7 +80,7 @@ func TestSwizzlerSetInvalidJSON(t *testing.T) { f.SetInvalidJSON(data.CanonicalSnapshotRole) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalSnapshotRole { @@ -105,7 +105,7 @@ func TestSwizzlerAddExtraSpace(t *testing.T) { require.NoError(t, json.Unmarshal(origMeta[data.CanonicalSnapshotRole], snapshot)) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalTargetsRole { @@ -136,7 +136,7 @@ func TestSwizzlerSetInvalidSigned(t *testing.T) { f.SetInvalidSigned(data.CanonicalTargetsRole) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalTargetsRole { @@ -161,7 +161,7 @@ func TestSwizzlerSetInvalidSignedMeta(t *testing.T) { require.NoError(t, err) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalRootRole { @@ -185,7 +185,7 @@ func TestSwizzlerSetInvalidMetadataType(t *testing.T) { f.SetInvalidMetadataType(data.CanonicalTargetsRole) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalTargetsRole { @@ -208,7 +208,7 @@ func TestSwizzlerInvalidateMetadataSignatures(t *testing.T) { f.InvalidateMetadataSignatures(data.CanonicalRootRole) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalRootRole { @@ -240,7 +240,7 @@ func TestSwizzlerRemoveMetadata(t *testing.T) { f.RemoveMetadata("targets/a") for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) if role != "targets/a" { require.NoError(t, err) require.True(t, bytes.Equal(metaBytes, newMeta), "bytes have changed for role %s", role) @@ -258,7 +258,7 @@ func TestSwizzlerSignMetadataWithInvalidKey(t *testing.T) { f.SignMetadataWithInvalidKey(data.CanonicalTimestampRole) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalTimestampRole { @@ -285,7 +285,7 @@ func TestSwizzlerOffsetMetadataVersion(t *testing.T) { f.OffsetMetadataVersion("targets/a", -2) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != "targets/a" { @@ -309,7 +309,7 @@ func TestSwizzlerExpireMetadata(t *testing.T) { require.NoError(t, err) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalRootRole { @@ -334,7 +334,7 @@ func TestSwizzlerSetThresholdBaseRole(t *testing.T) { require.NoError(t, err) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) // the threshold for base roles is set in root @@ -362,7 +362,7 @@ func TestSwizzlerSetThresholdDelegatedRole(t *testing.T) { f.SetThreshold("targets/a/b", 3) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) // the threshold for "targets/a/b" is in "targets/a" @@ -392,7 +392,7 @@ func TestSwizzlerChangeRootKey(t *testing.T) { for _, role := range roles { origMeta := origMeta[role] - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) // the threshold for base roles is set in root @@ -437,7 +437,7 @@ func TestSwizzlerUpdateSnapshotHashesSpecifiedRoles(t *testing.T) { // nothing has changed, signed data should be the same (signatures might // change because signatures may have random elements f.UpdateSnapshotHashes(data.CanonicalTargetsRole) - newMeta, err := f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(data.CanonicalSnapshotRole, store.NoSizeLimit) origSigned, newSigned := &data.Signed{}, &data.Signed{} require.NoError(t, json.Unmarshal(origMeta[data.CanonicalSnapshotRole], origSigned)) @@ -451,7 +451,7 @@ func TestSwizzlerUpdateSnapshotHashesSpecifiedRoles(t *testing.T) { // update the snapshot with just 1 role f.UpdateSnapshotHashes(data.CanonicalTargetsRole) - newMeta, err = f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, store.NoSizeLimit) + newMeta, err = f.MetadataCache.GetSized(data.CanonicalSnapshotRole, store.NoSizeLimit) require.NoError(t, err) require.False(t, bytes.Equal(origMeta[data.CanonicalSnapshotRole], newMeta)) @@ -481,7 +481,7 @@ func TestSwizzlerUpdateSnapshotHashesNoSpecifiedRoles(t *testing.T) { // nothing has changed, signed data should be the same (signatures might // change because signatures may have random elements f.UpdateSnapshotHashes() - newMeta, err := f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(data.CanonicalSnapshotRole, store.NoSizeLimit) require.NoError(t, err) origSigned, newSigned := &data.Signed{}, &data.Signed{} @@ -496,7 +496,7 @@ func TestSwizzlerUpdateSnapshotHashesNoSpecifiedRoles(t *testing.T) { // update the snapshot with just no specified roles f.UpdateSnapshotHashes() - newMeta, err = f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, store.NoSizeLimit) + newMeta, err = f.MetadataCache.GetSized(data.CanonicalSnapshotRole, store.NoSizeLimit) require.NoError(t, err) require.False(t, bytes.Equal(origMeta[data.CanonicalSnapshotRole], newMeta)) @@ -527,7 +527,7 @@ func TestSwizzlerUpdateTimestamp(t *testing.T) { // nothing has changed, signed data should be the same (signatures might // change because signatures may have random elements f.UpdateTimestampHash() - newMeta, err := f.MetadataCache.GetMeta(data.CanonicalTimestampRole, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(data.CanonicalTimestampRole, store.NoSizeLimit) require.NoError(t, err) origSigned, newSigned := &data.Signed{}, &data.Signed{} @@ -540,7 +540,7 @@ func TestSwizzlerUpdateTimestamp(t *testing.T) { // update the timestamp f.UpdateTimestampHash() - newMeta, err = f.MetadataCache.GetMeta(data.CanonicalTimestampRole, store.NoSizeLimit) + newMeta, err = f.MetadataCache.GetSized(data.CanonicalTimestampRole, store.NoSizeLimit) require.NoError(t, err) require.False(t, bytes.Equal(origMeta[data.CanonicalTimestampRole], newMeta)) @@ -584,7 +584,7 @@ func TestSwizzlerMutateRoot(t *testing.T) { require.NoError(t, f.MutateRoot(func(r *data.Root) { r.Roles["hello"] = nil })) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalRootRole { @@ -610,7 +610,7 @@ func TestSwizzlerMutateTimestamp(t *testing.T) { require.NoError(t, f.MutateTimestamp(func(t *data.Timestamp) { t.Meta["hello"] = data.FileMeta{} })) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalTimestampRole { @@ -633,7 +633,7 @@ func TestSwizzlerMutateSnapshot(t *testing.T) { require.NoError(t, f.MutateSnapshot(func(s *data.Snapshot) { s.Meta["hello"] = data.FileMeta{} })) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalSnapshotRole { @@ -656,7 +656,7 @@ func TestSwizzlerMutateTargets(t *testing.T) { require.NoError(t, f.MutateTargets(func(t *data.Targets) { t.Targets["hello"] = data.FileMeta{} })) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalTargetsRole { @@ -684,7 +684,7 @@ func TestSwizzlerRotateKeyBaseRole(t *testing.T) { require.NoError(t, f.RotateKey(theRole, pubKey)) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalRootRole { @@ -716,7 +716,7 @@ func TestSwizzlerRotateKeyDelegationRole(t *testing.T) { require.NoError(t, f.RotateKey(theRole, pubKey)) for role, metaBytes := range origMeta { - newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) + newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != "targets/a" { diff --git a/tuf/tuf_test.go b/tuf/tuf_test.go index 6753895d44..69a3363be3 100644 --- a/tuf/tuf_test.go +++ b/tuf/tuf_test.go @@ -15,6 +15,7 @@ import ( "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" + "github.com/docker/notary/tuf/utils" "github.com/stretchr/testify/require" ) @@ -1175,7 +1176,7 @@ func TestSignRootOldKeyCertExists(t *testing.T) { oldRootCert, err := cryptoservice.GenerateCertificate(rootPrivateKey, gun, referenceTime.AddDate(-9, 0, 0), referenceTime.AddDate(1, 0, 0)) require.NoError(t, err) - oldRootCertKey := trustmanager.CertToKey(oldRootCert) + oldRootCertKey := utils.CertToKey(oldRootCert) repo := initRepoWithRoot(t, cs, oldRootCertKey) @@ -1189,7 +1190,7 @@ func TestSignRootOldKeyCertExists(t *testing.T) { // Create a new certificate newRootCert, err := cryptoservice.GenerateCertificate(rootPrivateKey, gun, referenceTime, referenceTime.AddDate(10, 0, 0)) require.NoError(t, err) - newRootCertKey := trustmanager.CertToKey(newRootCert) + newRootCertKey := utils.CertToKey(newRootCert) require.NotEqual(t, oldRootCertKey.ID(), newRootCertKey.ID()) // Only trust the new certificate @@ -1228,7 +1229,7 @@ func TestSignRootOldKeyCertMissing(t *testing.T) { oldRootCert, err := cryptoservice.GenerateCertificate(rootPrivateKey, gun, referenceTime.AddDate(-9, 0, 0), referenceTime.AddDate(1, 0, 0)) require.NoError(t, err) - oldRootCertKey := trustmanager.CertToKey(oldRootCert) + oldRootCertKey := utils.CertToKey(oldRootCert) repo := initRepoWithRoot(t, cs, oldRootCertKey) @@ -1242,7 +1243,7 @@ func TestSignRootOldKeyCertMissing(t *testing.T) { // Create a new certificate newRootCert, err := cryptoservice.GenerateCertificate(rootPrivateKey, gun, referenceTime, referenceTime.AddDate(10, 0, 0)) require.NoError(t, err) - newRootCertKey := trustmanager.CertToKey(newRootCert) + newRootCertKey := utils.CertToKey(newRootCert) require.NotEqual(t, oldRootCertKey.ID(), newRootCertKey.ID()) // Only trust the new certificate @@ -1293,7 +1294,7 @@ func TestSignRootOldRootRolesAndOldSigs(t *testing.T) { rootCert, err := cryptoservice.GenerateCertificate(rootPrivateKey, gun, referenceTime.AddDate(-9, 0, 0), referenceTime.AddDate(1, 0, 0)) require.NoError(t, err) - rootCertKeys[i] = trustmanager.CertToKey(rootCert) + rootCertKeys[i] = utils.CertToKey(rootCert) rootPrivKeys[i] = rootPrivateKey } diff --git a/tuf/utils/util.go b/tuf/utils/util.go deleted file mode 100644 index a3836f6802..0000000000 --- a/tuf/utils/util.go +++ /dev/null @@ -1,109 +0,0 @@ -package utils - -import ( - "crypto/hmac" - "encoding/hex" - "errors" - "fmt" - gopath "path" - "path/filepath" - - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" -) - -// ErrWrongLength indicates the length was different to that expected -var ErrWrongLength = errors.New("wrong length") - -// ErrWrongHash indicates the hash was different to that expected -type ErrWrongHash struct { - Type string - Expected []byte - Actual []byte -} - -// Error implements error interface -func (e ErrWrongHash) Error() string { - return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual) -} - -// ErrNoCommonHash indicates the metadata did not provide any hashes this -// client recognizes -type ErrNoCommonHash struct { - Expected data.Hashes - Actual data.Hashes -} - -// Error implements error interface -func (e ErrNoCommonHash) Error() string { - types := func(a data.Hashes) []string { - t := make([]string, 0, len(a)) - for typ := range a { - t = append(t, typ) - } - return t - } - return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual)) -} - -// ErrUnknownHashAlgorithm - client was ashed to use a hash algorithm -// it is not familiar with -type ErrUnknownHashAlgorithm struct { - Name string -} - -// Error implements error interface -func (e ErrUnknownHashAlgorithm) Error() string { - return fmt.Sprintf("unknown hash algorithm: %s", e.Name) -} - -// PassphraseFunc type for func that request a passphrase -type PassphraseFunc func(role string, confirm bool) ([]byte, error) - -// FileMetaEqual checks whether 2 FileMeta objects are consistent with eachother -func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error { - if actual.Length != expected.Length { - return ErrWrongLength - } - hashChecked := false - for typ, hash := range expected.Hashes { - if h, ok := actual.Hashes[typ]; ok { - hashChecked = true - if !hmac.Equal(h, hash) { - return ErrWrongHash{typ, hash, h} - } - } - } - if !hashChecked { - return ErrNoCommonHash{expected.Hashes, actual.Hashes} - } - return nil -} - -// NormalizeTarget adds a slash, if required, to the front of a target path -func NormalizeTarget(path string) string { - return gopath.Join("/", path) -} - -// HashedPaths prefixes the filename with the known hashes for the file, -// returning a list of possible consistent paths. -func HashedPaths(path string, hashes data.Hashes) []string { - paths := make([]string, 0, len(hashes)) - for _, hash := range hashes { - hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path)) - paths = append(paths, hashedPath) - } - return paths -} - -// CanonicalKeyID returns the ID of the public bytes version of a TUF key. -// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA -// TUF keys, this is the key ID of the public key part of the key in the leaf cert -func CanonicalKeyID(k data.PublicKey) (string, error) { - switch k.Algorithm() { - case data.ECDSAx509Key, data.RSAx509Key: - return trustmanager.X509PublicKeyID(k) - default: - return k.ID(), nil - } -} diff --git a/tuf/utils/util_test.go b/tuf/utils/util_test.go deleted file mode 100644 index 4c043a135f..0000000000 --- a/tuf/utils/util_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package utils - -import ( - "encoding/hex" - "testing" - - "github.com/docker/notary/tuf/data" - "github.com/stretchr/testify/require" -) - -func TestFileMetaEqual(t *testing.T) { - type test struct { - name string - b data.FileMeta - a data.FileMeta - err func(test) error - } - fileMeta := func(length int64, hashes map[string]string) data.FileMeta { - m := data.FileMeta{Length: length, Hashes: make(map[string][]byte, len(hashes))} - for typ, hash := range hashes { - v, err := hex.DecodeString(hash) - require.NoError(t, err, "hash not in hex") - m.Hashes[typ] = v - } - return m - } - tests := []test{ - { - name: "wrong length", - a: data.FileMeta{Length: 1}, - b: data.FileMeta{Length: 2}, - err: func(test) error { return ErrWrongLength }, - }, - { - name: "wrong sha512 hash", - a: fileMeta(10, map[string]string{"sha512": "111111"}), - b: fileMeta(10, map[string]string{"sha512": "222222"}), - err: func(t test) error { return ErrWrongHash{"sha512", t.b.Hashes["sha512"], t.a.Hashes["sha512"]} }, - }, - { - name: "intersecting hashes", - a: fileMeta(10, map[string]string{"sha512": "111111", "md5": "222222"}), - b: fileMeta(10, map[string]string{"sha512": "111111", "sha256": "333333"}), - err: func(test) error { return nil }, - }, - { - name: "no common hashes", - a: fileMeta(10, map[string]string{"sha512": "111111"}), - b: fileMeta(10, map[string]string{"sha256": "222222", "md5": "333333"}), - err: func(t test) error { return ErrNoCommonHash{t.b.Hashes, t.a.Hashes} }, - }, - } - for _, run := range tests { - require.Equal(t, FileMetaEqual(run.a, run.b), run.err(run), "Files not equivalent") - } -} - -func TestNormalizeTarget(t *testing.T) { - for before, after := range map[string]string{ - "": "/", - "foo.txt": "/foo.txt", - "/bar.txt": "/bar.txt", - "foo//bar.txt": "/foo/bar.txt", - "/with/./a/dot": "/with/a/dot", - "/with/double/../dot": "/with/dot", - } { - require.Equal(t, NormalizeTarget(before), after, "Path normalization did not output expected.") - } -} - -func TestHashedPaths(t *testing.T) { - hexBytes := func(s string) []byte { - v, err := hex.DecodeString(s) - require.NoError(t, err, "String was not hex") - return v - } - hashes := data.Hashes{ - "sha512": hexBytes("abc123"), - "sha256": hexBytes("def456"), - } - paths := HashedPaths("foo/bar.txt", hashes) - // cannot use DeepEquals as the returned order is non-deterministic - require.Len(t, paths, 2, "Expected 2 paths") - expected := map[string]struct{}{"foo/abc123.bar.txt": {}, "foo/def456.bar.txt": {}} - for _, path := range paths { - if _, ok := expected[path]; !ok { - t.Fatalf("unexpected path: %s", path) - } - delete(expected, path) - } -} diff --git a/tuf/utils/utils.go b/tuf/utils/utils.go index 8de72b6797..407b8ff87c 100644 --- a/tuf/utils/utils.go +++ b/tuf/utils/utils.go @@ -1,15 +1,19 @@ package utils import ( + "crypto/hmac" "crypto/sha256" "crypto/sha512" "crypto/tls" "encoding/hex" + "errors" "fmt" "io" "net/http" "net/url" "os" + gopath "path" + "path/filepath" "strings" "github.com/docker/notary/tuf/data" @@ -150,3 +154,87 @@ func ConsistentName(role string, hashSha256 []byte) string { } return role } + +// ErrWrongLength indicates the length was different to that expected +var ErrWrongLength = errors.New("wrong length") + +// ErrWrongHash indicates the hash was different to that expected +type ErrWrongHash struct { + Type string + Expected []byte + Actual []byte +} + +// Error implements error interface +func (e ErrWrongHash) Error() string { + return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual) +} + +// ErrNoCommonHash indicates the metadata did not provide any hashes this +// client recognizes +type ErrNoCommonHash struct { + Expected data.Hashes + Actual data.Hashes +} + +// Error implements error interface +func (e ErrNoCommonHash) Error() string { + types := func(a data.Hashes) []string { + t := make([]string, 0, len(a)) + for typ := range a { + t = append(t, typ) + } + return t + } + return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual)) +} + +// ErrUnknownHashAlgorithm - client was ashed to use a hash algorithm +// it is not familiar with +type ErrUnknownHashAlgorithm struct { + Name string +} + +// Error implements error interface +func (e ErrUnknownHashAlgorithm) Error() string { + return fmt.Sprintf("unknown hash algorithm: %s", e.Name) +} + +// PassphraseFunc type for func that request a passphrase +type PassphraseFunc func(role string, confirm bool) ([]byte, error) + +// FileMetaEqual checks whether 2 FileMeta objects are consistent with eachother +func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error { + if actual.Length != expected.Length { + return ErrWrongLength + } + hashChecked := false + for typ, hash := range expected.Hashes { + if h, ok := actual.Hashes[typ]; ok { + hashChecked = true + if !hmac.Equal(h, hash) { + return ErrWrongHash{typ, hash, h} + } + } + } + if !hashChecked { + return ErrNoCommonHash{expected.Hashes, actual.Hashes} + } + return nil +} + +// NormalizeTarget adds a slash, if required, to the front of a target path +func NormalizeTarget(path string) string { + return gopath.Join("/", path) +} + +// HashedPaths prefixes the filename with the known hashes for the file, +// returning a list of possible consistent paths. +func HashedPaths(path string, hashes data.Hashes) []string { + paths := make([]string, 0, len(hashes)) + for _, hash := range hashes { + hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path)) + paths = append(paths, hashedPath) + } + return paths +} diff --git a/tuf/utils/utils_test.go b/tuf/utils/utils_test.go index 0e8ffe0cdc..cb3fd09a57 100644 --- a/tuf/utils/utils_test.go +++ b/tuf/utils/utils_test.go @@ -1,6 +1,7 @@ package utils import ( + "encoding/hex" "testing" "github.com/docker/notary/tuf/data" @@ -68,3 +69,84 @@ func TestFindRoleIndexNotFound(t *testing.T) { FindRoleIndex(nil, role.Name), ) } +func TestFileMetaEqual(t *testing.T) { + type test struct { + name string + b data.FileMeta + a data.FileMeta + err func(test) error + } + fileMeta := func(length int64, hashes map[string]string) data.FileMeta { + m := data.FileMeta{Length: length, Hashes: make(map[string][]byte, len(hashes))} + for typ, hash := range hashes { + v, err := hex.DecodeString(hash) + require.NoError(t, err, "hash not in hex") + m.Hashes[typ] = v + } + return m + } + tests := []test{ + { + name: "wrong length", + a: data.FileMeta{Length: 1}, + b: data.FileMeta{Length: 2}, + err: func(test) error { return ErrWrongLength }, + }, + { + name: "wrong sha512 hash", + a: fileMeta(10, map[string]string{"sha512": "111111"}), + b: fileMeta(10, map[string]string{"sha512": "222222"}), + err: func(t test) error { return ErrWrongHash{"sha512", t.b.Hashes["sha512"], t.a.Hashes["sha512"]} }, + }, + { + name: "intersecting hashes", + a: fileMeta(10, map[string]string{"sha512": "111111", "md5": "222222"}), + b: fileMeta(10, map[string]string{"sha512": "111111", "sha256": "333333"}), + err: func(test) error { return nil }, + }, + { + name: "no common hashes", + a: fileMeta(10, map[string]string{"sha512": "111111"}), + b: fileMeta(10, map[string]string{"sha256": "222222", "md5": "333333"}), + err: func(t test) error { return ErrNoCommonHash{t.b.Hashes, t.a.Hashes} }, + }, + } + for _, run := range tests { + require.Equal(t, FileMetaEqual(run.a, run.b), run.err(run), "Files not equivalent") + } +} + +func TestNormalizeTarget(t *testing.T) { + for before, after := range map[string]string{ + "": "/", + "foo.txt": "/foo.txt", + "/bar.txt": "/bar.txt", + "foo//bar.txt": "/foo/bar.txt", + "/with/./a/dot": "/with/a/dot", + "/with/double/../dot": "/with/dot", + } { + require.Equal(t, NormalizeTarget(before), after, "Path normalization did not output expected.") + } +} + +func TestHashedPaths(t *testing.T) { + hexBytes := func(s string) []byte { + v, err := hex.DecodeString(s) + require.NoError(t, err, "String was not hex") + return v + } + hashes := data.Hashes{ + "sha512": hexBytes("abc123"), + "sha256": hexBytes("def456"), + } + paths := HashedPaths("foo/bar.txt", hashes) + // cannot use DeepEquals as the returned order is non-deterministic + require.Len(t, paths, 2, "Expected 2 paths") + expected := map[string]struct{}{"foo/abc123.bar.txt": {}, "foo/def456.bar.txt": {}} + for _, path := range paths { + if _, ok := expected[path]; !ok { + t.Fatalf("unexpected path: %s", path) + } + delete(expected, path) + } +} diff --git a/trustmanager/x509utils.go b/tuf/utils/x509.go similarity index 97% rename from trustmanager/x509utils.go rename to tuf/utils/x509.go index 4289a2890f..b5faeb15af 100644 --- a/trustmanager/x509utils.go +++ b/tuf/utils/x509.go @@ -1,4 +1,4 @@ -package trustmanager +package utils import ( "bytes" @@ -22,22 +22,16 @@ import ( "github.com/docker/notary/tuf/data" ) -// CertToPEM is a utility function returns a PEM encoded x509 Certificate -func CertToPEM(cert *x509.Certificate) []byte { - pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) - - return pemCert -} - -// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed -func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) { - var pemBytes bytes.Buffer - for _, cert := range certChain { - if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { - return nil, err - } +// CanonicalKeyID returns the ID of the public bytes version of a TUF key. +// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA +// TUF keys, this is the key ID of the public key part of the key in the leaf cert +func CanonicalKeyID(k data.PublicKey) (string, error) { + switch k.Algorithm() { + case data.ECDSAx509Key, data.RSAx509Key: + return X509PublicKeyID(k) + default: + return k.ID(), nil } - return pemBytes.Bytes(), nil } // LoadCertFromPEM returns the first certificate found in a bunch of bytes or error @@ -64,6 +58,108 @@ func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) { return nil, errors.New("no certificates found in PEM data") } +// X509PublicKeyID returns a public key ID as a string, given a +// data.PublicKey that contains an X509 Certificate +func X509PublicKeyID(certPubKey data.PublicKey) (string, error) { + // Note that this only loads the first certificate from the public key + cert, err := LoadCertFromPEM(certPubKey.Public()) + if err != nil { + return "", err + } + pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) + if err != nil { + return "", err + } + + var key data.PublicKey + switch certPubKey.Algorithm() { + case data.ECDSAx509Key: + key = data.NewECDSAPublicKey(pubKeyBytes) + case data.RSAx509Key: + key = data.NewRSAPublicKey(pubKeyBytes) + } + + return key.ID(), nil +} + +// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It +// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. +func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("no valid private key found") + } + + var privKeyBytes []byte + var err error + if x509.IsEncryptedPEMBlock(block) { + privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) + if err != nil { + return nil, errors.New("could not decrypt private key") + } + } else { + privKeyBytes = block.Bytes + } + + switch block.Type { + case "RSA PRIVATE KEY": + rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes) + if err != nil { + return nil, fmt.Errorf("could not parse DER encoded key: %v", err) + } + + tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey) + if err != nil { + return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufRSAPrivateKey, nil + case "EC PRIVATE KEY": + ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes) + if err != nil { + return nil, fmt.Errorf("could not parse DER encoded private key: %v", err) + } + + tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey) + if err != nil { + return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufECDSAPrivateKey, nil + case "ED25519 PRIVATE KEY": + // We serialize ED25519 keys by concatenating the private key + // to the public key and encoding with PEM. See the + // ED25519ToPrivateKey function. + tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes) + if err != nil { + return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufECDSAPrivateKey, nil + + default: + return nil, fmt.Errorf("unsupported key type %q", block.Type) + } +} + +// CertToPEM is a utility function returns a PEM encoded x509 Certificate +func CertToPEM(cert *x509.Certificate) []byte { + pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + + return pemCert +} + +// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed +func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) { + var pemBytes bytes.Buffer + for _, cert := range certChain { + if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return nil, err + } + } + return pemBytes.Bytes(), nil +} + // LoadCertFromFile loads the first certificate from the file provided. The // data is expected to be PEM Encoded and contain one of more certificates // with PEM type "CERTIFICATE" @@ -138,66 +234,6 @@ func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate { return intCerts } -// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It -// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. -func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("no valid private key found") - } - - var privKeyBytes []byte - var err error - if x509.IsEncryptedPEMBlock(block) { - privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) - if err != nil { - return nil, errors.New("could not decrypt private key") - } - } else { - privKeyBytes = block.Bytes - } - - switch block.Type { - case "RSA PRIVATE KEY": - rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes) - if err != nil { - return nil, fmt.Errorf("could not parse DER encoded key: %v", err) - } - - tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey) - if err != nil { - return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err) - } - - return tufRSAPrivateKey, nil - case "EC PRIVATE KEY": - ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes) - if err != nil { - return nil, fmt.Errorf("could not parse DER encoded private key: %v", err) - } - - tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey) - if err != nil { - return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) - } - - return tufECDSAPrivateKey, nil - case "ED25519 PRIVATE KEY": - // We serialize ED25519 keys by concatenating the private key - // to the public key and encoding with PEM. See the - // ED25519ToPrivateKey function. - tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes) - if err != nil { - return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) - } - - return tufECDSAPrivateKey, nil - - default: - return nil, fmt.Errorf("unsupported key type %q", block.Type) - } -} - // ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate. func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) { pemBlock, _ := pem.Decode(pubKeyBytes) @@ -498,27 +534,3 @@ func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate BasicConstraintsValid: true, }, nil } - -// X509PublicKeyID returns a public key ID as a string, given a -// data.PublicKey that contains an X509 Certificate -func X509PublicKeyID(certPubKey data.PublicKey) (string, error) { - // Note that this only loads the first certificate from the public key - cert, err := LoadCertFromPEM(certPubKey.Public()) - if err != nil { - return "", err - } - pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) - if err != nil { - return "", err - } - - var key data.PublicKey - switch certPubKey.Algorithm() { - case data.ECDSAx509Key: - key = data.NewECDSAPublicKey(pubKeyBytes) - case data.RSAx509Key: - key = data.NewRSAPublicKey(pubKeyBytes) - } - - return key.ID(), nil -} diff --git a/trustmanager/x509utils_test.go b/tuf/utils/x509_test.go similarity index 94% rename from trustmanager/x509utils_test.go rename to tuf/utils/x509_test.go index 11710cb382..4b98f05c3e 100644 --- a/trustmanager/x509utils_test.go +++ b/tuf/utils/x509_test.go @@ -1,4 +1,4 @@ -package trustmanager +package utils import ( "crypto/ecdsa" @@ -16,15 +16,15 @@ import ( func TestCertsToKeys(t *testing.T) { // Get root certificate - rootCA, err := LoadCertFromFile("../fixtures/root-ca.crt") + rootCA, err := LoadCertFromFile("../../fixtures/root-ca.crt") require.NoError(t, err) // Get intermediate certificate - intermediateCA, err := LoadCertFromFile("../fixtures/intermediate-ca.crt") + intermediateCA, err := LoadCertFromFile("../../fixtures/intermediate-ca.crt") require.NoError(t, err) // Get leaf certificate - leafCert, err := LoadCertFromFile("../fixtures/secure.example.com.crt") + leafCert, err := LoadCertFromFile("../../fixtures/secure.example.com.crt") require.NoError(t, err) // Get our certList with Leaf Cert and Intermediate @@ -170,14 +170,14 @@ func TestKeyOperations(t *testing.T) { // X509PublickeyID returns the public key ID of a RSA X509 key rather than the // cert ID func TestRSAX509PublickeyID(t *testing.T) { - fileBytes, err := ioutil.ReadFile("../fixtures/notary-server.key") + fileBytes, err := ioutil.ReadFile("../../fixtures/notary-server.key") require.NoError(t, err) privKey, err := ParsePEMPrivateKey(fileBytes, "") require.NoError(t, err) expectedTUFID := privKey.ID() - cert, err := LoadCertFromFile("../fixtures/notary-server.crt") + cert, err := LoadCertFromFile("../../fixtures/notary-server.crt") require.NoError(t, err) rsaKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) diff --git a/utils/configuration_test.go b/utils/configuration_test.go index 5077bc0a50..743031b45d 100644 --- a/utils/configuration_test.go +++ b/utils/configuration_test.go @@ -12,7 +12,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/bugsnag/bugsnag-go" "github.com/docker/notary" - "github.com/docker/notary/trustmanager" + "github.com/docker/notary/tuf/utils" "github.com/spf13/viper" "github.com/stretchr/testify/require" ) @@ -390,7 +390,7 @@ func TestParseTLSWithTLS(t *testing.T) { expectedCert, err := tls.LoadX509KeyPair(Cert, Key) require.NoError(t, err) - expectedRoot, err := trustmanager.LoadCertFromFile(Root) + expectedRoot, err := utils.LoadCertFromFile(Root) require.NoError(t, err) require.Len(t, tlsConfig.Certificates, 1) @@ -449,7 +449,7 @@ func TestParseTLSWithEnvironmentVariables(t *testing.T) { expectedCert, err := tls.LoadX509KeyPair(Cert, Key) require.NoError(t, err) - expectedRoot, err := trustmanager.LoadCertFromFile(Root) + expectedRoot, err := utils.LoadCertFromFile(Root) require.NoError(t, err) require.Len(t, tlsConfig.Certificates, 1)