diff --git a/README.md b/README.md index c5da104..41d8187 100644 --- a/README.md +++ b/README.md @@ -1,71 +1,135 @@ ## Secret Sync -Enables secret synchronization between two secret store services (e.g. between Vault and AWS) in a configurable manner. +Enables secret synchronization between two secret store services (e.g. between Hashicorp Vault and AWS) in a configurable and explicit manner. > [!WARNING] > This is an early alpha version and there will be changes made to the API. You can support us with your feedback. ### Supported secret stores -- Vault -- FileDir (regular system directory) +- Hashicorp Vault +- FileDir (store is a folder, secrets are plain unencrypted files) -### Quick usage -Synchronize secrets every hour from Vault-A to Vault-B instance. +### Examples -#### Define stores and sync job strategy +
+Synchronize specific secrets every hour between two Hashicorp Vault instance + +#### Define stores ```yaml ### Vault-A - Source ### SecretStore: path/to/vault-source.yaml -permissions: Read -provider: - vault: +vault: address: "http://0.0.0.0:8200" - unseal-keys-path: "secret" + storePath: "secret" role: "" - auth-path: "userpass" - token-path: "" + authPath: "userpass" + tokenPath: "" token: "root" ``` ```yaml -### Vault-B - Dest -### SecretStore: path/to/vault-dest.yaml -permissions: Write -provider: - vault: +### Vault-B - Target +### SecretStore: path/to/vault-target.yaml +vault: address: "http://0.0.0.0:8201" - unseal-keys-path: "secret" + storePath: "secret" role: "" - auth-path: "userpass" - token-path: "" + authPath: "userpass" + tokenPath: "" token: "root" ``` + +#### Define sync strategy ```yaml ### SyncJob: path/to/sync-job.yaml schedule: "@every 1h" -plan: - - secret: - key: "a" - - secret: - key: "b/b" - - secret: - key: "c/c/c" - - query: - path: "d/d/d" +## Defines how the secrets will be synced +sync: + ## 1. Usage: Sync key from ref + - secretRef: + key: /source/credentials/username + target: # If not specified, will be synced under the same key + key: /target/example-1 + + ## 2. Usage: Sync all keys from query + - secretQuery: + path: /source/credentials + key: + regexp: .* + target: # If not specified, all keys will be synced under the same path + keyPrefix: /target/example-2/ + + ## 3. Usage: Sync key from ref with templating + - secretRef: + key: /source/credentials/password + target: + key: /target/example-3 + + # Template defines how the secret will be synced to target store. + # Either "rawData" or "data" should be specified, not both. + template: + rawData: '{{ .Data }}' # Save as raw (accepts multiline string) + data: # Save as map (accepts nested values) + example: '{{ .Data }}' + + ## 4. Usage: Sync all keys from query with templating + - secretQuery: + path: /source/credentials key: - regexp: ".*" - key-transform: - - regexp: - source: "d/d/d/(.*)" - target: "d/d/d/$1-final" + regexp: .* + target: + keyPrefix: /target/example-4/ + template: + rawData: 'SECRET-PREFIX-{{ .Data }}' + + ## 5. Usage: Sync single key from query with templating + - secretQuery: + path: /source/credentials/query-data/ + key: + regexp: (username|password) + flatten: true + target: + key: /target/example-5 + + template: + data: + user: '{{ .Data.username }}' + pass: '{{ .Data.password }}' + + ## 6. Usage: Sync single key from multiple sources with templating + - secretSources: + - name: username # Username mapping, available as ".Data.username" + secretRef: + key: /source/credentials/username + + - name: password # Password mapping, available as ".Data.password" + secretRef: + key: /source/credentials/password + + - name: dynamic_query # Query mapping, available as "Data.dynamic_query." + secretQuery: + path: /source/credentials + key: + regexp: .* + + target: + key: /target/example-6 + + template: + data: + username: '{{ .Data.username }}' + password: '{{ .Data.password }}' + userpass: '{{ .Data.dynamic_query.username }}/{{ .Data.dynamic_query.password }}' ``` #### Perform sync ```bash secret-sync --source path/to/vault-source.yaml \ - --dest path/to/vault-dest.yaml \ + --target path/to/vault-target.yaml \ --sync path/to/sync-job.yaml # Use --schedule "@every 1m" to override sync job file config. ``` +
+ ### Docs -Check documentation and example usage at [PROPOSAL](docs/proposal.md). +Check documentation and example usage at [DOCS](docs/). diff --git a/cmd/sync.go b/cmd/sync.go index 853cd9f..4ea977e 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -17,7 +17,6 @@ package cmd import ( "context" "encoding/json" - "fmt" "os" "os/signal" @@ -36,7 +35,7 @@ func NewSyncCmd() *cobra.Command { cmd := &syncCmd{} cobraCmd := &cobra.Command{ Use: "sync", - Short: "Synchronizes a key-value destination store from source store", + Short: "Synchronizes secrets from a source to a target store based on sync strategy.", RunE: func(_ *cobra.Command, _ []string) error { if err := cmd.init(); err != nil { return err @@ -46,15 +45,20 @@ func NewSyncCmd() *cobra.Command { } // Register cmd flags - cobraCmd.Flags().StringVar(&cmd.flgSrcFile, "source", "", "Source store config file") + cobraCmd.Flags().StringVar(&cmd.flgSrcFile, "source", "", "Source store config file. "+ + "This is the store where the data will be fetched from.") _ = cobraCmd.MarkFlagRequired("source") - cobraCmd.Flags().StringVar(&cmd.flagDstFile, "dest", "", "Destination store config file") - _ = cobraCmd.MarkFlagRequired("dest") - cobraCmd.Flags().StringVar(&cmd.flagSyncFile, "sync", "", "Sync job config file") + cobraCmd.Flags().StringVar(&cmd.flagDstFile, "target", "", "Target store config file. "+ + "This is the store where the data will be synced to.") + _ = cobraCmd.MarkFlagRequired("target") + cobraCmd.Flags().StringVar(&cmd.flagSyncFile, "sync", "", "Sync job config file. "+ + "This is the strategy sync template.") _ = cobraCmd.MarkFlagRequired("sync") - cobraCmd.Flags().StringVar(&cmd.flagSchedule, "schedule", v1alpha1.DefaultSyncJobSchedule, "Synchronization CRON schedule. Overrides --sync params") - cobraCmd.Flags().BoolVar(&cmd.flagOnce, "once", false, "Synchronize once and exit. Overrides --sync params") + cobraCmd.Flags().StringVar(&cmd.flagSchedule, "schedule", v1alpha1.DefaultSyncJobSchedule, + "Sync on CRON schedule. Either --schedule or --once should be specified.") + cobraCmd.Flags().BoolVar(&cmd.flagOnce, "once", false, + "Synchronize once and exit. Either --schedule or --once should be specified.") return cobraCmd } @@ -67,8 +71,8 @@ type syncCmd struct { flagOnce bool source v1alpha1.StoreReader - dest v1alpha1.StoreWriter - sync *v1alpha1.SyncJobSpec + target v1alpha1.StoreWriter + sync *v1alpha1.SyncJob } func (cmd *syncCmd) init() error { @@ -79,29 +83,23 @@ func (cmd *syncCmd) init() error { if err != nil { return err } - if !srcStore.GetPermissions().CanPerform(v1alpha1.SecretStorePermissionsRead) { - return fmt.Errorf("source does not have Read permissions") - } - cmd.source, err = provider.NewClient(context.Background(), &srcStore.Provider) + cmd.source, err = provider.NewClient(context.Background(), srcStore) if err != nil { return err } - // Init dest - destStore, err := loadStore(cmd.flagDstFile) + // Init target + targetStore, err := loadStore(cmd.flagDstFile) if err != nil { return err } - if !destStore.GetPermissions().CanPerform(v1alpha1.SecretStorePermissionsWrite) { - return fmt.Errorf("dest does not have Write permissions") - } - cmd.dest, err = provider.NewClient(context.Background(), &destStore.Provider) + cmd.target, err = provider.NewClient(context.Background(), targetStore) if err != nil { return err } // Init sync request by loading from file and overriding from cli - cmd.sync, err = loadRequest(cmd.flagSyncFile) + cmd.sync, err = loadStrategy(cmd.flagSyncFile) if err != nil { return err } @@ -115,10 +113,10 @@ func (cmd *syncCmd) init() error { return nil } -func (cmd *syncCmd) run(syncReq *v1alpha1.SyncJobSpec) error { +func (cmd *syncCmd) run(syncReq *v1alpha1.SyncJob) error { // Run once if syncReq.RunOnce { - resp, err := storesync.Sync(context.Background(), cmd.source, cmd.dest, syncReq.Plan) + resp, err := storesync.Sync(context.Background(), cmd.source, cmd.target, syncReq.Sync) if err != nil { return err } @@ -138,7 +136,7 @@ func (cmd *syncCmd) run(syncReq *v1alpha1.SyncJobSpec) error { select { case <-cronTicker.C: logrus.Info("Handling a new sync request...") - resp, err := storesync.Sync(context.Background(), cmd.source, cmd.dest, syncReq.Plan) + resp, err := storesync.Sync(context.Background(), cmd.source, cmd.target, syncReq.Sync) if err != nil { return err } @@ -150,8 +148,7 @@ func (cmd *syncCmd) run(syncReq *v1alpha1.SyncJobSpec) error { } } -// loadRequest loads apis.SyncJobSpec data from a YAML file. -func loadRequest(path string) (*v1alpha1.SyncJobSpec, error) { +func loadStrategy(path string) (*v1alpha1.SyncJob, error) { // Load file yamlBytes, err := os.ReadFile(path) if err != nil { @@ -159,7 +156,7 @@ func loadRequest(path string) (*v1alpha1.SyncJobSpec, error) { } // Unmarshal (convert YAML to JSON) - var ruleCfg v1alpha1.SyncJobSpec + var ruleCfg v1alpha1.SyncJob jsonBytes, err := yaml.YAMLToJSON(yamlBytes) if err != nil { return nil, err @@ -170,8 +167,7 @@ func loadRequest(path string) (*v1alpha1.SyncJobSpec, error) { return &ruleCfg, nil } -// loadStore loads apis.SecretStoreSpec from a YAML file. -func loadStore(path string) (*v1alpha1.SecretStoreSpec, error) { +func loadStore(path string) (*v1alpha1.ProviderBackend, error) { // Load file yamlBytes, err := os.ReadFile(path) if err != nil { @@ -179,7 +175,7 @@ func loadStore(path string) (*v1alpha1.SecretStoreSpec, error) { } // Unmarshal (convert YAML to JSON) - var spec v1alpha1.SecretStoreSpec + var spec v1alpha1.ProviderBackend jsonBytes, err := yaml.YAMLToJSON(yamlBytes) if err != nil { return nil, err diff --git a/cmd/sync_test.go b/cmd/sync_test.go index ab85cd0..d791d2e 100644 --- a/cmd/sync_test.go +++ b/cmd/sync_test.go @@ -23,11 +23,13 @@ import ( "github.com/stretchr/testify/assert" ) +// TODO: Expand tests + func TestSync(t *testing.T) { syncCmd := NewSyncCmd() syncCmd.SetArgs([]string{ - "--source", storeFile(t, "testdata/source"), - "--dest", storeFile(t, filepath.Join(os.TempDir(), "dest")), + "--source", storeFile(t, "testdata"), + "--target", storeFile(t, filepath.Join(os.TempDir(), "target")), "--sync", "testdata/syncjob.yaml", "--once", }) @@ -49,10 +51,8 @@ func storeFile(t *testing.T, dirPath string) string { // Write _, err = tmpFile.Write([]byte(fmt.Sprintf(` -permissions: ReadWrite -provider: - file: - dir-path: %q +file: + dirPath: %q `, path))) assert.Nil(t, err) diff --git a/cmd/testdata/source/a b/cmd/testdata/source/a deleted file mode 100644 index 2e65efe..0000000 --- a/cmd/testdata/source/a +++ /dev/null @@ -1 +0,0 @@ -a \ No newline at end of file diff --git a/cmd/testdata/source/b/b b/cmd/testdata/source/b/b deleted file mode 100644 index 63d8dbd..0000000 --- a/cmd/testdata/source/b/b +++ /dev/null @@ -1 +0,0 @@ -b \ No newline at end of file diff --git a/cmd/testdata/source/c/c/c b/cmd/testdata/source/c/c/c deleted file mode 100644 index 3410062..0000000 --- a/cmd/testdata/source/c/c/c +++ /dev/null @@ -1 +0,0 @@ -c \ No newline at end of file diff --git a/cmd/testdata/source/credentials/password b/cmd/testdata/source/credentials/password new file mode 100644 index 0000000..18f1697 --- /dev/null +++ b/cmd/testdata/source/credentials/password @@ -0,0 +1 @@ +this-is-password \ No newline at end of file diff --git a/cmd/testdata/source/credentials/username b/cmd/testdata/source/credentials/username new file mode 100644 index 0000000..b4defdd --- /dev/null +++ b/cmd/testdata/source/credentials/username @@ -0,0 +1 @@ +this-is-username \ No newline at end of file diff --git a/cmd/testdata/source/d/d/d/1 b/cmd/testdata/source/d/d/d/1 deleted file mode 100644 index 56a6051..0000000 --- a/cmd/testdata/source/d/d/d/1 +++ /dev/null @@ -1 +0,0 @@ -1 \ No newline at end of file diff --git a/cmd/testdata/source/d/d/d/2 b/cmd/testdata/source/d/d/d/2 deleted file mode 100644 index d8263ee..0000000 --- a/cmd/testdata/source/d/d/d/2 +++ /dev/null @@ -1 +0,0 @@ -2 \ No newline at end of file diff --git a/cmd/testdata/store-file-dest.yaml b/cmd/testdata/store-file-dest.yaml index fbd3e9e..78978bc 100644 --- a/cmd/testdata/store-file-dest.yaml +++ b/cmd/testdata/store-file-dest.yaml @@ -1,4 +1,2 @@ -permissions: ReadWrite -provider: - file: - dir-path: "/tmp/dest" +file: + dirPath: "/tmp/target" diff --git a/cmd/testdata/store-file-source.yaml b/cmd/testdata/store-file-source.yaml index 852b4b2..9e8387b 100644 --- a/cmd/testdata/store-file-source.yaml +++ b/cmd/testdata/store-file-source.yaml @@ -1,4 +1,2 @@ -permissions: ReadWrite -provider: - file: - dir-path: "/tmp/source" +file: + dirPath: "/tmp/source" diff --git a/cmd/testdata/store-vault.yaml b/cmd/testdata/store-vault.yaml index 968dd6b..1ceb310 100644 --- a/cmd/testdata/store-vault.yaml +++ b/cmd/testdata/store-vault.yaml @@ -1,9 +1,7 @@ -permissions: ReadWrite -provider: - vault: - address: http://0.0.0.0:8200 - unseal-keys-path: secret - role: '' - auth-path: userpass - token-path: '' - token: root +vault: + address: http://0.0.0.0:8200 + storePath: secret + role: '' + authPath: userpass + tokenPath: '' + token: root diff --git a/cmd/testdata/syncjob.yaml b/cmd/testdata/syncjob.yaml index 9a7db4a..db7aaf6 100644 --- a/cmd/testdata/syncjob.yaml +++ b/cmd/testdata/syncjob.yaml @@ -4,24 +4,201 @@ schedule: "@daily" ## Used to only perform sync once. ## If specified, Schedule will be ignored. -run-once: false - -## Used to specify overall sync plan between source and dest -plan: - - secret: - key: a - - secret: - key: b/b - - secret: - key: c/c/c - - query: - path: "d/d/d" +runOnce: false + +## Points to a file where all sync logs should be saved to. +auditLogPath: "path/to/file" + +## Defines how the secrets will be synced +sync: + ## 1. Usage: Sync key from ref + - secretRef: + key: /source/credentials/username + target: # If not specified, will be synced under the same key + key: /target/example-1 + + ## 2. Usage: Sync all keys from query + - secretQuery: + path: /source/credentials key: regexp: .* - key-transform: - - regexp: - source: "d/d/d/(.*)" - target: "d/d/d/$1-final" + target: # If not specified, all keys will be synced under the same path + keyPrefix: /target/example-2/ -## Points to a file where all sync logs should be saved to. -audit-log-path: "path/to/file" + ## 3. Usage: Sync key from ref with templating + - secretRef: + key: /source/credentials/password + target: + key: /target/example-3 + + # Template defines how the secret will be synced to target store. + # Either "rawData" or "data" should be specified, not both. + template: + rawData: '{{ .Data }}' # Save as raw (accepts multiline string) + data: # Save as map (accepts nested values) + example: '{{ .Data }}' + + ## 4. Usage: Sync all keys from query with templating + - secretQuery: + path: /source/credentials + key: + regexp: .* + target: + keyPrefix: /target/example-4/ + template: + rawData: 'SECRET-PREFIX-{{ .Data }}' + + ## 5. Usage: Sync single key from query with templating + - secretQuery: + path: /source/credentials/query-data/ + key: + regexp: (username|password) + flatten: true + target: + key: /target/example-5 + template: + data: + user: '{{ .Data.username }}' + pass: '{{ .Data.password }}' + + ## 6. Usage: Sync single key from multiple sources with templating + - secretSources: + - name: username # Username mapping, available as ".Data.username" + secretRef: + key: /source/credentials/username + + - name: password # Password mapping, available as ".Data.password" + secretRef: + key: /source/credentials/password + + - name: dynamic_query # Query mapping, available as "Data.dynamic_query." + secretQuery: + path: /source/credentials + key: + regexp: .* + + target: + key: /target/example-6 + + template: + data: + username: '{{ .Data.username }}' + password: '{{ .Data.password }}' + userpass: '{{ .Data.dynamic_query.username }}/{{ .Data.dynamic_query.password }}' + + +## TODO: Move these items to tests +## Defines how the secrets will be synced +# sync: +# ## 1. Usage: Sync key from ref (all possibilities) +# - secretRef: +# key: /source/credentials/example +# +# - secretRef: +# key: /source/credentials/example +# template: +# rawData: "pre-{{ .Data }}-post" +# +# - secretRef: +# key: /source/credentials/example +# template: +# data: +# auth: "example/pre-{{ .Data }}-post" +# +# - secretRef: +# key: /source/credentials/example +# target: +# key: /target/credentials/example +# +# - secretRef: +# key: /source/credentials/example +# target: +# key: /target/credentials/example +# template: +# rawData: "pre-{{ .Data }}-post" +# +# - secretRef: +# key: /source/credentials/example +# target: +# key: /target/credentials/example +# template: +# data: +# auth: "example/pre-{{ .Data }}-post" +# +# ## 2. Usage: Sync all keys from query individually (all possibilities) +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# template: +# auth: "example/pre-{{ .Data }}-post" +# +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# template: +# data: +# newKey: "pre-{{ .Data }}-post" +# +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# target: +# keyPrefix: /target/credentials/new/ +# +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# target: +# keyPrefix: /target/credentials/new/ +# template: +# auth: "example/pre-{{ .Data }}-post" +# +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# target: +# keyPrefix: /target/credentials/new/ +# template: +# data: +# newKey: "pre-{{ .Data }}-post" +# +# ## 3. Usage: Sync all keys from query into one key (all possibilities) +# - secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# target: +# key: /target/credentials/key-from-query +# template: +# data: +# username: '{{ .Data.username }}' +# password: '{{ .Data.password }}' +# +# ## 4. Usage: Sync all keys from different sources into one key (all possibilities) +# - secretSources: +# - name: example +# secretRef: +# key: /source/credentials/example +# +# - name: credentials +# secretQuery: +# path: /source/credentials +# key: +# regexp: (username|password) +# target: +# key: /target/credentials/key-from-sources +# template: +# data: +# example: '{{ .Data.example }}' +# username: '{{ .Data.credentials.username }}' +# password: '{{ .Data.credentials.password }}' diff --git a/docs/00-sync-proposal.md b/docs/00-sync-proposal.md deleted file mode 100644 index 0ab0bb0..0000000 --- a/docs/00-sync-proposal.md +++ /dev/null @@ -1,41 +0,0 @@ -Secret Sync -=================== - -This document describes the operational and support aspect of secret synchronization. - - -* [Secret Sync](#secret-sync) - * [Goals](#goals) - * [High-level overview](#high-level-overview) - * [Proposal](#proposal) - * [SecretStore](#secretstore) - * [SyncJob](#syncjob) - * [Potential issues](#potential-issues) - * [Example usages](#example-usages) - * [Synchronize secrets from Vault-Source to Vault-Dest instance every hour](#synchronize-secrets-from-vault-source-to-vault-dest-instance-every-hour) - - -## Goals -* Enable secret synchronization between two secret store services (e.g. between Vault and AWS). -* Provide ways to select which keys need to be synced from source store using either static values, dynamic query, or both. -* Provide a way to transform each key before being sent to destination store. -* Allow concurrent synchronization. -* Support simple sync auditing for transparency. -* Expose functionalities as a standalone CLI -* Provide ways to run inside Kubernetes - -## High-level overview -The API is composed of two schemas: - 1. `SecretStore` schema provides access to various secret stores. - It is composed of `Provider` which specifies the secret store backend (e.g. Vault, AWS), - and `Permissions` to ensure operational scope (e.g. Read, Write, ReadWrite) on the store itself. - - 2. `SyncJob` exposes options for periodic (CRON scheduled) secret synchronization between `Source` and `Dest` store. - The selection and transformations of secrets to sync can be done via `Plan` list, using: - * `Secret` - to specify a static secret key - * `Query` - to specify a dynamic query used to list secret keys to sync from Source - * `KeyTransform` - to specify ways to transform referenced key (either from secret or query) - * `Source` - to override default source (future implementation for many-to-1, currently we only focus on 1-to-1 store syncs) - -## Example usages -* Synchronize secrets from main Vault instance to local k8s Vault instance every hour diff --git a/go.mod b/go.mod index 5bb30cf..b437a26 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/spf13/cast v1.5.0 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.2 + golang.org/x/sync v0.1.0 ) require ( diff --git a/go.sum b/go.sum index be63833..7c0ec42 100644 --- a/go.sum +++ b/go.sum @@ -207,6 +207,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/apis/v1alpha1/provider.go b/pkg/apis/v1alpha1/provider.go index 57c223a..532757e 100644 --- a/pkg/apis/v1alpha1/provider.go +++ b/pkg/apis/v1alpha1/provider.go @@ -24,25 +24,25 @@ var ErrKeyNotFound = errors.New("secret key not found") // Provider defines methods to manage store clients. type Provider interface { // NewClient creates a new secret StoreClient for provided backend. - NewClient(ctx context.Context, backend SecretStoreProvider) (StoreClient, error) + NewClient(ctx context.Context, backend ProviderBackend) (StoreClient, error) // Validate checks if the provided backend is valid. - Validate(backend SecretStoreProvider) error + Validate(backend ProviderBackend) error } // StoreReader implements read ops for a secret backend. Must support concurrent calls. type StoreReader interface { // GetSecret returns a single secret fetched from secret store. - GetSecret(ctx context.Context, key SecretKey) ([]byte, error) + GetSecret(ctx context.Context, key SecretRef) ([]byte, error) // ListSecretKeys lists all keys matching the query from secret store. - ListSecretKeys(ctx context.Context, query SecretKeyQuery) ([]SecretKey, error) + ListSecretKeys(ctx context.Context, query SecretQuery) ([]SecretRef, error) } // StoreWriter implements write ops for a secret backend. Must support concurrent calls. type StoreWriter interface { // SetSecret writes data to a key in a secret store. - SetSecret(ctx context.Context, key SecretKey, value []byte) error + SetSecret(ctx context.Context, key SecretRef, value []byte) error } // StoreClient unifies read and write ops for a specific secret backend. @@ -50,3 +50,11 @@ type StoreClient interface { StoreReader StoreWriter } + +// ProviderBackend defines the which backend should be used for Provider. +// Only one can be specified. +type ProviderBackend struct { + Vault *VaultProvider `json:"vault,omitempty"` + + File *FileProvider `json:"file,omitempty"` +} diff --git a/pkg/apis/v1alpha1/secretstore_file_types.go b/pkg/apis/v1alpha1/provider_file_types.go similarity index 82% rename from pkg/apis/v1alpha1/secretstore_file_types.go rename to pkg/apis/v1alpha1/provider_file_types.go index 404ab26..4f353fb 100644 --- a/pkg/apis/v1alpha1/secretstore_file_types.go +++ b/pkg/apis/v1alpha1/provider_file_types.go @@ -14,7 +14,7 @@ package v1alpha1 -// SecretStoreProviderFile defines provider for a file. -type SecretStoreProviderFile struct { - DirPath string `json:"dir-path"` +// FileProvider uses OS dir and files as a backend. +type FileProvider struct { + DirPath string `json:"dirPath"` } diff --git a/pkg/apis/v1alpha1/provider_schema.go b/pkg/apis/v1alpha1/provider_schema.go index a0697b1..569a18d 100644 --- a/pkg/apis/v1alpha1/provider_schema.go +++ b/pkg/apis/v1alpha1/provider_schema.go @@ -23,9 +23,8 @@ import ( var providers = map[string]Provider{} var providerMu = sync.RWMutex{} -// Register a secret store backend type. Panics if a backend with for the same -// store is already registered. -func Register(provider Provider, backend *SecretStoreProvider) { +// Register a Provider for a given backend. Panics if a given backend is already registered. +func Register(provider Provider, backend *ProviderBackend) { providerName, err := getProviderName(backend) if err != nil { panic(fmt.Errorf("error registering secret backend: %w", err)) @@ -40,8 +39,8 @@ func Register(provider Provider, backend *SecretStoreProvider) { providers[providerName] = provider } -// GetProvider returns the provider for SecretStoreSpec. -func GetProvider(backend *SecretStoreProvider) (Provider, error) { +// GetProvider returns the Provider for given ProviderBackend. +func GetProvider(backend *ProviderBackend) (Provider, error) { providerName, err := getProviderName(backend) if err != nil { return nil, fmt.Errorf("failed to find store backend: %w", err) @@ -58,9 +57,9 @@ func GetProvider(backend *SecretStoreProvider) (Provider, error) { return provider, nil } -// getProviderName returns the name of the configured provider or an error if the -// provider is invalid/not configured. -func getProviderName(backend *SecretStoreProvider) (string, error) { +// getProviderName returns the name of the configured ProviderBackend or an error if the +// Provider is invalid/not configured. +func getProviderName(backend *ProviderBackend) (string, error) { if backend == nil { return "", fmt.Errorf("no StoreConfig provided") } diff --git a/pkg/apis/v1alpha1/secretstore_vault_types.go b/pkg/apis/v1alpha1/provider_vault_types.go similarity index 60% rename from pkg/apis/v1alpha1/secretstore_vault_types.go rename to pkg/apis/v1alpha1/provider_vault_types.go index 07b4c27..0b383ee 100644 --- a/pkg/apis/v1alpha1/secretstore_vault_types.go +++ b/pkg/apis/v1alpha1/provider_vault_types.go @@ -14,12 +14,12 @@ package v1alpha1 -// SecretStoreProviderVault defines provider for a Vault. -type SecretStoreProviderVault struct { - Address string `json:"address"` - UnsealKeysPath string `json:"unseal-keys-path"` - Role string `json:"role"` - AuthPath string `json:"auth-path"` - TokenPath string `json:"token-path"` - Token string `json:"token"` // TODO: Add support for reading this from a k8s secret +// VaultProvider uses Hashicorp Vault as a backend. +type VaultProvider struct { + Address string `json:"address"` + StorePath string `json:"storePath"` + Role string `json:"role"` + AuthPath string `json:"authPath"` + TokenPath string `json:"tokenPath"` + Token string `json:"token"` } diff --git a/pkg/apis/v1alpha1/secretkey_types.go b/pkg/apis/v1alpha1/secretkey_types.go index 125603b..56825bd 100644 --- a/pkg/apis/v1alpha1/secretkey_types.go +++ b/pkg/apis/v1alpha1/secretkey_types.go @@ -16,80 +16,73 @@ package v1alpha1 import "strings" -// SecretKey defines Provider key params. -// TODO: Add support for different encodings (to decode when fetching). -type SecretKey struct { +// SecretRef defines Provider reference key. +// TODO: Add support for version +// TODO: Add support for map field selector +// TODO: Add support for encoding +type SecretRef struct { // Key points to a specific key in store. // Format "path/to/key" // Required - Key string `json:"key"` + Key string `json:"key,omitempty"` // Version points to specific key version. - // TODO: Add support on providers // Optional - Version string `json:"version"` + Version *string `json:"version,omitempty"` } -// GetPath returns path pointed by Key, e.g. GetPath("path/to/key") returns ["path", "to"] -func (key *SecretKey) GetPath() []string { - parts := strings.Split(key.Key, "/") +// GetPath returns path pointed by Key, e.g. GetPath("/path/to/key") returns ["path", "to"] +func (key *SecretRef) GetPath() []string { + parts := strings.Split(strings.TrimPrefix(key.Key, "/"), "/") if len(parts) == 0 { return nil } return parts[:len(parts)-1] } -// GetProperty returns property (domain) pointed by Key, e.g. GetProperty("path/to/key") returns "key" -func (key *SecretKey) GetProperty() string { - parts := strings.Split(key.Key, "/") +// GetName returns (domain) name pointed by Key, e.g. GetName("/path/to/key") returns "key" +func (key *SecretRef) GetName() string { + parts := strings.Split(strings.TrimPrefix(key.Key, "/"), "/") if len(parts) == 0 { return key.Key } return parts[len(parts)-1] } -// SecretKeyFromRef defines SecretKey data to fetch and transform from referenced store. -// TODO: Add support for overriding default SyncJob source. -type SecretKeyFromRef struct { - // Used to reference a static secret key. +// SecretQuery defines how to query Provider to obtain SecretRef(s). +// TODO: Add support for version +// TODO: Add support for map field selector +// TODO: Add support for encoding +type SecretQuery struct { + // A root path to start the query operations. // Optional - SecretKey *SecretKey `json:"secret,omitempty"` - - // Used to find secret key based on query. - // Ignored if SecretKey is specified. - // Optional - Query *SecretKeyQuery `json:"query,omitempty"` + Path *string `json:"path,omitempty"` - // Used to transform secret keys after getting them from the Provider. - // Multiple KeyTransform operations will be applied in FIFO order. - // Optional - KeyTransform []SecretKeyTransform `json:"key-transform,omitempty"` + // Finds SecretRef based on key query. + // Required + Key Query `json:"key,omitempty"` } -type SecretKeyQuery struct { - // A root path to start the find operations. - // Optional - Path *string `json:"path,omitempty"` +// SecretSource defines named secret source. +// This enables named usage in SyncTemplate given as: +// a) when using FromRef, enables {{ .Data.ref_name }} +// b) when using FromQuery, enables {{ .Data.query_name. }} +type SecretSource struct { + // Used to define unique name for templating. + // Required + Name string `json:"name,omitempty"` - // Finds secret based on the regex key. - // Optional - Key *RegexpQuery `json:"key,omitempty"` -} + // FromRef selects a secret from a reference. + // Optional, but SecretQuery must be provided + FromRef *SecretRef `json:"secretRef,omitempty"` -type SecretKeyTransform struct { - // Used to transform SecretKey with regular expressions. - // The resulting SecretKey will be the output of a regexp.ReplaceAll operation. - Regexp *RegexpTransform `json:"regexp,omitempty"` + // FromQuery selects secret(s) from a query. + // Optional, but SecretRef must be provided + FromQuery *SecretQuery `json:"secretQuery,omitempty"` } -type RegexpQuery struct { +// Query defines how to match string-value data. +type Query struct { + // Uses regexp matching Regexp string `json:"regexp,omitempty"` } - -type RegexpTransform struct { - // Used to define the regular expression of a re.Compiler. - Source string `json:"source"` - - // Used to define the target pattern of a ReplaceAll operation. - Target string `json:"target"` -} diff --git a/pkg/apis/v1alpha1/secretstore_types.go b/pkg/apis/v1alpha1/secretstore_types.go deleted file mode 100644 index 61844ae..0000000 --- a/pkg/apis/v1alpha1/secretstore_types.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright © 2023 Cisco -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1alpha1 - -import "strings" - -var DefaultSecretStorePermissions = SecretStorePermissionsReadWrite - -// SecretStoreSpec defines an arbitrary SecretStore spec. -type SecretStoreSpec struct { - // Used to configure store mode. Defaults to ReadWrite. - // Optional - Permissions SecretStorePermissions `json:"permissions,omitempty"` - - // Used to configure secrets provider. - // Required - Provider SecretStoreProvider `json:"provider"` -} - -func (spec *SecretStoreSpec) GetPermissions() SecretStorePermissions { - if spec.Permissions == "" { - return DefaultSecretStorePermissions - } - return spec.Permissions -} - -type SecretStorePermissions string - -const ( - SecretStorePermissionsRead SecretStorePermissions = "Read" - SecretStorePermissionsWrite SecretStorePermissions = "Write" - SecretStorePermissionsReadWrite SecretStorePermissions = "ReadWrite" -) - -func (p SecretStorePermissions) CanPerform(perm SecretStorePermissions) bool { - return strings.Contains(string(p), string(perm)) -} - -// SecretStoreProvider defines secret backend for Provider. -// Only one can be specified. -type SecretStoreProvider struct { - // Used for Vault provider. - Vault *SecretStoreProviderVault `json:"vault,omitempty"` - - // Used for non-encrypted File provider. - File *SecretStoreProviderFile `json:"file,omitempty"` -} diff --git a/pkg/apis/v1alpha1/syncjob_types.go b/pkg/apis/v1alpha1/syncjob_types.go index e339a79..9609fe7 100644 --- a/pkg/apis/v1alpha1/syncjob_types.go +++ b/pkg/apis/v1alpha1/syncjob_types.go @@ -27,8 +27,14 @@ var ( DefaultSyncJobAuditLogPath = filepath.Join(os.TempDir(), "sync-audit.log") ) -// SyncJobSpec defines a source-to-dest sync request CR. -type SyncJobSpec struct { +// SyncJob defines overall source-to-target sync strategy. +// TODO: Add support for auditing. +type SyncJob struct { + // Points to a file where all sync logs should be saved to. + // Defaults to DefaultSyncJobAuditLogPath + // Optional + AuditLogPath string `json:"auditLogPath,omitempty"` + // Used to configure schedule for synchronization. // The schedule is in Cron format, see https://en.wikipedia.org/wiki/Cron // Defaults to @hourly @@ -38,22 +44,14 @@ type SyncJobSpec struct { // Used to only perform sync once. // If specified, Schedule will be ignored. // Optional - RunOnce bool `json:"run-once,omitempty"` + RunOnce bool `json:"runOnce,omitempty"` - // Used to specify sync plan. + // Used to specify the strategy for secrets sync. // Required - Plan []SecretKeyFromRef `json:"plan,omitempty"` - - // Points to a file where all sync logs should be saved to. - // Defaults to DefaultSyncJobAuditLogPath - // Optional - // TODO: Implement support for audit log file. - // Only write successful key syncs to this file. - // Consider exposing String() to get basic API details on v1alpha1.StoreClient. - AuditLogPath string `json:"audit-log-path,omitempty"` + Sync []SyncRequest `json:"sync,omitempty"` } -func (spec *SyncJobSpec) GetSchedule() string { +func (spec *SyncJob) GetSchedule() string { if spec.Schedule == "" { return DefaultSyncJobSchedule } @@ -65,9 +63,63 @@ func (spec *SyncJobSpec) GetSchedule() string { return spec.Schedule } -func (spec *SyncJobSpec) GetAuditLogPath() string { +func (spec *SyncJob) GetAuditLogPath() string { if spec.AuditLogPath == "" { return DefaultSyncJobAuditLogPath } return spec.AuditLogPath } + +// SyncRequest defines how to fetch, transform, and sync SecretRef(s) from source to target. +// Only one of FromRef, FromQuery, FromSources can be specified. +type SyncRequest struct { + // FromRef selects a secret from a reference. + // If SyncTarget.Key is nil, it will sync under referenced key. + // If SyncTarget.Key is not-nil, it will sync under targeted key. + FromRef *SecretRef `json:"secretRef,omitempty"` + + // FromQuery selects secret(s) from a query. + // To sync one secret, SyncTarget.Key and Template must be specified. + // To sync all secrets, SyncTarget.KeyPrefix must be specified. + FromQuery *SecretQuery `json:"secretQuery,omitempty"` + + // FromSources select secret(s) from a multiple sources. + // SyncTarget.Key and Template must be specified. + FromSources []SecretSource `json:"secretSources,omitempty"` + + // Target defines where the key(s) from sources will be synced on target. + // SyncTarget.Key means that only one secret will be synced. + // SyncTarget.KeyPrefix means that multiple secrets will be synced. + Target SyncTarget `json:"target,omitempty"` + + // Flatten indicates secrets FromQuery will be synced to a single SyncTarget.Key. + Flatten *bool `json:"flatten,omitempty"` + + // Template defines how the fetched key(s) will be transformed to create a new + // SecretRef that will be synced to target. + // When using FromRef, {{ .Data }} defines given secrets raw value. + // When using FromQuery and SyncTarget.Key, specific raw values can be accessed via {{ .Data. }}. + // When using FromQuery and SyncTarget.KeyPrefix, {{ .Data }} defines raw values of query iterator. + // When using FromSources, specific secret data can be accessed via {{ .Data. }}. + Template *SyncTemplate `json:"template,omitempty"` +} + +// SyncTarget defines where the secret(s) will be synced to. +type SyncTarget struct { + // Key indicates that a single SecretRef will be synced to target. + Key *string `json:"key,omitempty"` + + // KeyPrefix indicates that multiple SecretRef will be synced to target. + KeyPrefix *string `json:"keyPrefix,omitempty"` +} + +// SyncTemplate defines how to obtain SecretRef using template. +type SyncTemplate struct { + // Used to define the resulting secret (raw) value. Supports templating. + // Optional, but Data must be provided + RawData *string `json:"rawData,omitempty"` + + // Used to define the resulting secret (map) value. Supports templating. + // Optional, but RawData must be provided + Data map[string]string `json:"data,omitempty"` +} diff --git a/pkg/provider/file/client.go b/pkg/provider/file/client.go index 1220b0f..ceb6e09 100644 --- a/pkg/provider/file/client.go +++ b/pkg/provider/file/client.go @@ -19,6 +19,7 @@ import ( "fmt" "os" "path/filepath" + "regexp" "strings" "github.com/bank-vaults/secret-sync/pkg/apis/v1alpha1" @@ -28,7 +29,7 @@ type client struct { dir string } -func (c *client) GetSecret(_ context.Context, key v1alpha1.SecretKey) ([]byte, error) { +func (c *client) GetSecret(_ context.Context, key v1alpha1.SecretRef) ([]byte, error) { // Read file fpath := filepath.Join(c.dir, pathForKey(key)) data, err := os.ReadFile(fpath) @@ -38,7 +39,7 @@ func (c *client) GetSecret(_ context.Context, key v1alpha1.SecretKey) ([]byte, e return data, nil } -func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretKeyQuery) ([]v1alpha1.SecretKey, error) { +func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretQuery) ([]v1alpha1.SecretRef, error) { // Get query dir (if empty, use root) queryDir := c.dir if query.Path != nil { @@ -46,14 +47,20 @@ func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretKeyQuery } // Add all files that match filter from queried dir - var result []v1alpha1.SecretKey + var result []v1alpha1.SecretRef err := filepath.WalkDir(queryDir, func(path string, entry os.DirEntry, err error) error { // Only add files if entry != nil && entry.Type().IsRegular() { + // Extract secret key from the relative OS system path relativePath := strings.ReplaceAll(path, c.dir+string(os.PathSeparator), "") - result = append(result, v1alpha1.SecretKey{ - Key: strings.ReplaceAll(relativePath, string(os.PathSeparator), "/"), - }) + key := strings.ReplaceAll(relativePath, string(os.PathSeparator), "/") + + // Add key if it matches regexp query + if matches, _ := regexp.MatchString(query.Key.Regexp, key); matches { + result = append(result, v1alpha1.SecretRef{ + Key: "/" + key, + }) + } } return nil }) @@ -63,7 +70,7 @@ func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretKeyQuery return result, nil } -func (c *client) SetSecret(_ context.Context, key v1alpha1.SecretKey, value []byte) error { +func (c *client) SetSecret(_ context.Context, key v1alpha1.SecretRef, value []byte) error { // Create parent dir for file fpath := filepath.Join(c.dir, pathForKey(key)) parentDir := filepath.Dir(fpath) @@ -79,6 +86,6 @@ func (c *client) SetSecret(_ context.Context, key v1alpha1.SecretKey, value []by return nil } -func pathForKey(key v1alpha1.SecretKey) string { - return filepath.Join(append(key.GetPath(), key.GetProperty())...) +func pathForKey(key v1alpha1.SecretRef) string { + return filepath.Join(append(key.GetPath(), key.GetName())...) } diff --git a/pkg/provider/file/provider.go b/pkg/provider/file/provider.go index bef7b14..0b6a4c3 100644 --- a/pkg/provider/file/provider.go +++ b/pkg/provider/file/provider.go @@ -23,13 +23,13 @@ import ( type Provider struct{} -func (p *Provider) NewClient(_ context.Context, backend v1alpha1.SecretStoreProvider) (v1alpha1.StoreClient, error) { +func (p *Provider) NewClient(_ context.Context, backend v1alpha1.ProviderBackend) (v1alpha1.StoreClient, error) { return &client{ dir: backend.File.DirPath, }, nil } -func (p *Provider) Validate(backend v1alpha1.SecretStoreProvider) error { +func (p *Provider) Validate(backend v1alpha1.ProviderBackend) error { if backend.File == nil { return fmt.Errorf("empty .File") } @@ -40,7 +40,7 @@ func (p *Provider) Validate(backend v1alpha1.SecretStoreProvider) error { } func init() { - v1alpha1.Register(&Provider{}, &v1alpha1.SecretStoreProvider{ - File: &v1alpha1.SecretStoreProviderFile{}, + v1alpha1.Register(&Provider{}, &v1alpha1.ProviderBackend{ + File: &v1alpha1.FileProvider{}, }) } diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index f403a38..804219e 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -25,7 +25,7 @@ import ( ) // NewClient creates a store client for provided store backend config. -func NewClient(ctx context.Context, backend *v1alpha1.SecretStoreProvider) (v1alpha1.StoreClient, error) { +func NewClient(ctx context.Context, backend *v1alpha1.ProviderBackend) (v1alpha1.StoreClient, error) { // Get provider provider, err := v1alpha1.GetProvider(backend) if err != nil { diff --git a/pkg/provider/vault/client.go b/pkg/provider/vault/client.go index b72e7c2..d22848c 100644 --- a/pkg/provider/vault/client.go +++ b/pkg/provider/vault/client.go @@ -17,6 +17,7 @@ package vault import ( "context" "fmt" + "regexp" "strings" "github.com/bank-vaults/vault-sdk/vault" @@ -30,7 +31,7 @@ type client struct { apiKeyPath string } -func (c *client) GetSecret(_ context.Context, key v1alpha1.SecretKey) ([]byte, error) { +func (c *client) GetSecret(_ context.Context, key v1alpha1.SecretRef) ([]byte, error) { // Get secret from API keyPath := pathForKey(key) response, err := c.apiClient.RawClient().Logical().Read(fmt.Sprintf("%s/data/%s", c.apiKeyPath, keyPath)) @@ -52,16 +53,16 @@ func (c *client) GetSecret(_ context.Context, key v1alpha1.SecretKey) ([]byte, e return nil, fmt.Errorf("api get request findind data: %w", err) } - // Get property - property := key.GetProperty() - propertyData, ok := data[property] + // Get name + keyName := key.GetName() + keyData, ok := data[keyName] if !ok { - return nil, fmt.Errorf("could not find property %s for in get response", property) + return nil, fmt.Errorf("could not find %s for in get response", keyName) } - return []byte(propertyData.(string)), nil + return []byte(keyData.(string)), nil } -func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretKeyQuery) ([]v1alpha1.SecretKey, error) { +func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretQuery) ([]v1alpha1.SecretRef, error) { // Get relative path to dir queryPath := "" if query.Path != nil { @@ -88,14 +89,21 @@ func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretKeyQuery return nil, fmt.Errorf("api list returned invalid data") } - // Extract keys from response. - // A key in a KV store can be either a secret or a dir (marked by a suffix '/'). - var result []v1alpha1.SecretKey + // Extract keys from response + var result []v1alpha1.SecretRef for _, listKey := range listSlice { - keyPath := fmt.Sprintf("%s%v", queryPath, listKey) - if !strings.HasSuffix(keyPath, "/") { // key - result = append(result, v1alpha1.SecretKey{ - Key: keyPath, + // Extract key from path + key := fmt.Sprintf("%s%v", queryPath, listKey) + + // Skip values in KV store that are not keys (marked by a suffix '/'). + if strings.HasSuffix(key, "/") { + continue + } + + // Add key if it matches regexp query + if matches, _ := regexp.MatchString(query.Key.Regexp, key); matches { + result = append(result, v1alpha1.SecretRef{ + Key: key, }) } } @@ -103,14 +111,14 @@ func (c *client) ListSecretKeys(_ context.Context, query v1alpha1.SecretKeyQuery return result, nil } -func (c *client) SetSecret(_ context.Context, key v1alpha1.SecretKey, value []byte) error { +func (c *client) SetSecret(_ context.Context, key v1alpha1.SecretRef, value []byte) error { // Write secret to API keyPath := pathForKey(key) _, err := c.apiClient.RawClient().Logical().Write( fmt.Sprintf("%s/data/%s", c.apiKeyPath, keyPath), map[string]interface{}{ "data": map[string]interface{}{ - key.GetProperty(): value, + key.GetName(): value, }, }, ) @@ -126,7 +134,7 @@ func (c *client) SetSecret(_ context.Context, key v1alpha1.SecretKey, value []by // It could (potentially) be useful. // DEPRECATED //nolint -func (c *client) recursiveList(ctx context.Context, path string) ([]v1alpha1.SecretKey, error) { +func (c *client) recursiveList(ctx context.Context, path string) ([]v1alpha1.SecretRef, error) { // List API request response, err := c.apiClient.RawClient().Logical().List(fmt.Sprintf("%s/metadata/%s", c.apiKeyPath, path)) if err != nil { @@ -150,11 +158,11 @@ func (c *client) recursiveList(ctx context.Context, path string) ([]v1alpha1.Sec // A key in a KV store can be either a secret or a dir (marked by a suffix '/'). // For dirs, keep recursively listing them and adding their result results. // TODO: Track changes to Vault API https://github.com/hashicorp/vault/issues/5275. - var result []v1alpha1.SecretKey + var result []v1alpha1.SecretRef for _, listKey := range listSlice { subKey := fmt.Sprintf("%s%v", path, listKey) if !strings.HasSuffix(subKey, "/") { // key - result = append(result, v1alpha1.SecretKey{ + result = append(result, v1alpha1.SecretRef{ Key: subKey, }) } else { // dir @@ -172,6 +180,6 @@ func (c *client) recursiveList(ctx context.Context, path string) ([]v1alpha1.Sec return result, nil } -func pathForKey(key v1alpha1.SecretKey) string { - return strings.Join(append(key.GetPath(), key.GetProperty()), "/") +func pathForKey(key v1alpha1.SecretRef) string { + return strings.Join(append(key.GetPath(), key.GetName()), "/") } diff --git a/pkg/provider/vault/provider.go b/pkg/provider/vault/provider.go index bd8c175..e57d514 100644 --- a/pkg/provider/vault/provider.go +++ b/pkg/provider/vault/provider.go @@ -25,7 +25,7 @@ import ( type Provider struct{} -func (p *Provider) NewClient(_ context.Context, backend v1alpha1.SecretStoreProvider) (v1alpha1.StoreClient, error) { +func (p *Provider) NewClient(_ context.Context, backend v1alpha1.ProviderBackend) (v1alpha1.StoreClient, error) { vaultCfg := backend.Vault apiClient, err := vault.NewClientWithOptions( vault.ClientURL(vaultCfg.Address), @@ -39,11 +39,11 @@ func (p *Provider) NewClient(_ context.Context, backend v1alpha1.SecretStoreProv return &client{ apiClient: apiClient, - apiKeyPath: vaultCfg.UnsealKeysPath, + apiKeyPath: vaultCfg.StorePath, }, nil } -func (p *Provider) Validate(backend v1alpha1.SecretStoreProvider) error { +func (p *Provider) Validate(backend v1alpha1.ProviderBackend) error { vaultCfg := backend.Vault if vaultCfg == nil { return fmt.Errorf("empty Vault config") @@ -51,8 +51,8 @@ func (p *Provider) Validate(backend v1alpha1.SecretStoreProvider) error { if vaultCfg.Address == "" { return fmt.Errorf("empty .Vault.Address") } - if vaultCfg.UnsealKeysPath == "" { - return fmt.Errorf("empty .Vault.UnsealKeysPath") + if vaultCfg.StorePath == "" { + return fmt.Errorf("empty .Vault.StorePath") } if vaultCfg.AuthPath == "" { return fmt.Errorf("empty .Vault.AuthPath") @@ -64,7 +64,7 @@ func (p *Provider) Validate(backend v1alpha1.SecretStoreProvider) error { } func init() { - v1alpha1.Register(&Provider{}, &v1alpha1.SecretStoreProvider{ - Vault: &v1alpha1.SecretStoreProviderVault{}, + v1alpha1.Register(&Provider{}, &v1alpha1.ProviderBackend{ + Vault: &v1alpha1.VaultProvider{}, }) } diff --git a/pkg/storesync/processor.go b/pkg/storesync/processor.go new file mode 100644 index 0000000..8932c16 --- /dev/null +++ b/pkg/storesync/processor.go @@ -0,0 +1,383 @@ +// Copyright © 2023 Cisco +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storesync + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "sync" + "text/template" + + "golang.org/x/sync/errgroup" + + "github.com/bank-vaults/secret-sync/pkg/apis/v1alpha1" +) + +// processor is used to optimally fetch secrets from a source or internal fetched map. +type processor struct { + mu sync.RWMutex + source v1alpha1.StoreReader + fetched map[v1alpha1.SecretRef][]byte +} + +func newProcessor(source v1alpha1.StoreReader) *processor { + return &processor{ + mu: sync.RWMutex{}, + source: source, + fetched: map[v1alpha1.SecretRef][]byte{}, + } +} + +type FetchResponse struct { + // Always set + Data []byte + + // Only 1 is non-nil + FromRef *v1alpha1.SecretRef + FromQuery *v1alpha1.SecretQuery + FromSource *v1alpha1.SecretSource +} + +type SyncPlan struct { + Data []byte + Request *v1alpha1.SyncRequest + RequestID int +} + +// GetSyncPlan fetches the data from source and applies templating based on the provided v1alpha1.SyncRequest. +// Returned map defines all secrets that need to be sent to the target store to complete the request. +func (p *processor) GetSyncPlan(ctx context.Context, reqID int, req v1alpha1.SyncRequest) (map[v1alpha1.SecretRef]SyncPlan, error) { + switch { + // FromRef can only sync a single secret + case req.FromRef != nil: + resp, err := p.FetchFromRef(ctx, *req.FromRef) + if err != nil { + return nil, err + } + + syncRef := *req.FromRef + if req.Target.Key != nil { + syncRef.Key = *req.Target.Key + } + + syncValue := resp.Data + if !isTemplateEmpty(req.Template) { + syncValue, err = getTemplatedValue(req.Template, string(resp.Data)) + if err != nil { + return nil, err + } + } + + return map[v1alpha1.SecretRef]SyncPlan{ + syncRef: { + Data: syncValue, + Request: &req, + RequestID: reqID, + }, + }, nil + + // FromQuery can sync both a single secret or multiple secrets + case req.FromQuery != nil: + fetchResps, err := p.FetchFromQuery(ctx, *req.FromQuery) + if err != nil { + return nil, err + } + + // Handle FromQuery => Key + if req.Target.Key != nil { + if req.Flatten == nil || !*req.Flatten { + return nil, fmt.Errorf("requires 'flatten' for 'fromQuery' and 'target.key'") + } + + syncRef := v1alpha1.SecretRef{ + Key: *req.Target.Key, + Version: nil, + } + + templateData := make(map[string]string) + for ref, resp := range fetchResps { + templateData[ref.GetName()] = string(resp.Data) + } + if isTemplateEmpty(req.Template) { + return nil, fmt.Errorf("requires 'template' for 'fromQuery' and 'target.key'") + } + syncValue, err := getTemplatedValue(req.Template, templateData) + if err != nil { + return nil, err + } + + return map[v1alpha1.SecretRef]SyncPlan{ + syncRef: { + Data: syncValue, + Request: &req, + RequestID: reqID, + }, + }, nil + } + + // Handle FromQuery => KeyPrefix or empty + if req.Flatten != nil && *req.Flatten { + return nil, fmt.Errorf("cannot use 'flatten' for 'fromQuery' and 'target.key'") + } + + syncMap := make(map[v1alpha1.SecretRef]SyncPlan) + for ref, resp := range fetchResps { + syncRef := ref + if req.Target.KeyPrefix != nil { + syncRef.Key = *req.Target.KeyPrefix + ref.GetName() + } + + syncValue := resp.Data + if !isTemplateEmpty(req.Template) { + syncValue, err = getTemplatedValue(req.Template, string(resp.Data)) + if err != nil { + return nil, err + } + } + + syncMap[syncRef] = SyncPlan{ + Data: syncValue, + Request: &req, + RequestID: reqID, + } + } + return syncMap, nil + + // FromSources can only sync a single secret + case len(req.FromSources) > 0: + fetchResps, err := p.FetchFromSources(ctx, req.FromSources) + if err != nil { + return nil, err + } + + if req.Target.Key == nil { + return nil, fmt.Errorf("requires 'target.key' for 'fromSources'") + } + syncRef := v1alpha1.SecretRef{ + Key: *req.Target.Key, + Version: nil, + } + + templateData := make(map[string]interface{}) + for ref, resp := range fetchResps { + // For responses originating fromRef + source := resp.FromSource + if source.FromRef != nil { + // Ensures that .Data. fromRef is the secret value + templateData[source.Name] = string(resp.Data) + } + if source.FromQuery != nil { + // ensures that .Data.. fromQuery is the secret value + if templateData[source.Name] == nil { + templateData[source.Name] = make(map[string]string) + } + templateData[source.Name].(map[string]string)[ref.GetName()] = string(resp.Data) + } + } + if isTemplateEmpty(req.Template) { + return nil, fmt.Errorf("requires 'template' for 'fromSources'") + } + syncValue, err := getTemplatedValue(req.Template, templateData) + if err != nil { + return nil, err + } + + return map[v1alpha1.SecretRef]SyncPlan{ + syncRef: { + Data: syncValue, + Request: &req, + RequestID: reqID, + }, + }, nil + } + + return nil, fmt.Errorf("no sources specified") +} + +// FetchFromRef fetches v1alpha1.SecretRef data from reference or from internal fetch store. +func (p *processor) FetchFromRef(ctx context.Context, fromRef v1alpha1.SecretRef) (*FetchResponse, error) { + // Get from fetch store + data, exists := p.getFetchedSecret(fromRef) + + // Fetch and save if not found + if !exists { + var err error + data, err = p.source.GetSecret(ctx, fromRef) + if err != nil { + return nil, err + } + p.addFetchedSecret(fromRef, data) + } + + // Return + return &FetchResponse{ + Data: data, + FromRef: &fromRef, + }, nil +} + +// FetchFromQuery fetches v1alpha1.SecretRef data from query or from internal fetch store. +func (p *processor) FetchFromQuery(ctx context.Context, fromQuery v1alpha1.SecretQuery) (map[v1alpha1.SecretRef]FetchResponse, error) { + // List secrets from source + keyRefs, err := p.source.ListSecretKeys(ctx, fromQuery) + if err != nil { + return nil, fmt.Errorf("failed while doing query %v: %w", fromQuery, err) + } + + // Fetch queried keys in parallel + fetchMu := sync.Mutex{} + fetched := make(map[v1alpha1.SecretRef]FetchResponse) + fetchGroup, fetchCtx := errgroup.WithContext(ctx) + for _, ref := range keyRefs { + func(ref v1alpha1.SecretRef) { + fetchGroup.Go(func() error { + // Fetch + resp, err := p.FetchFromRef(fetchCtx, ref) + if err != nil { + return err + } + + // Update + fetchMu.Lock() + fetched[ref] = FetchResponse{ + Data: resp.Data, + FromQuery: &fromQuery, + } + fetchMu.Unlock() + return nil + }) + }(ref) + } + + // Return + if err = fetchGroup.Wait(); err != nil { + return nil, err + } + return fetched, nil +} + +// FetchFromSources fetches v1alpha1.SecretRef data from selectors or from internal fetch store.. +func (p *processor) FetchFromSources(ctx context.Context, fromSources []v1alpha1.SecretSource) (map[v1alpha1.SecretRef]FetchResponse, error) { + // Fetch source keys from source or fetch store in parallel + fetchMu := sync.Mutex{} + fetched := make(map[v1alpha1.SecretRef]FetchResponse) + fetchGroup, fetchCtx := errgroup.WithContext(ctx) + for _, src := range fromSources { + func(src v1alpha1.SecretSource) { + fetchGroup.Go(func() error { + // Fetch + kvData := make(map[v1alpha1.SecretRef][]byte) + switch { + case src.FromRef != nil: + resp, err := p.FetchFromRef(fetchCtx, *src.FromRef) + if err != nil { + return err + } + kvData[*src.FromRef] = resp.Data + + case src.FromQuery != nil: + respMap, err := p.FetchFromQuery(fetchCtx, *src.FromQuery) + if err != nil { + return err + } + for ref, resp := range respMap { + kvData[ref] = resp.Data + } + + default: + return fmt.Errorf("both ref and query are empty") + } + + // Update + fetchMu.Lock() + for ref, value := range kvData { + fetched[ref] = FetchResponse{ + Data: value, + FromSource: &src, + } + } + fetchMu.Unlock() + return nil + }) + }(src) + } + + // Return + if err := fetchGroup.Wait(); err != nil { + return nil, err + } + return fetched, nil +} + +// getFetchedSecret returns a key value from local fetched source. +func (p *processor) getFetchedSecret(ref v1alpha1.SecretRef) ([]byte, bool) { + p.mu.RLock() + defer p.mu.RUnlock() + res, ok := p.fetched[ref] + return res, ok +} + +// addFetchedSecret adds a key value to local fetched store. +func (p *processor) addFetchedSecret(ref v1alpha1.SecretRef, value []byte) { + p.mu.Lock() + defer p.mu.Unlock() + p.fetched[ref] = value +} + +func getTemplatedValue(syncTemplate *v1alpha1.SyncTemplate, templateData interface{}) ([]byte, error) { + // Handle Template.RawData + if syncTemplate.RawData != nil { + tpl, err := template.New("template").Parse(*syncTemplate.RawData) + if err != nil { + return nil, err + } + output := new(bytes.Buffer) + if err = tpl.Execute(output, struct{ Data interface{} }{Data: templateData}); err != nil { + return nil, err + } + return output.Bytes(), nil + } + + // Handle Template.Data + if len(syncTemplate.Data) > 0 { + outputMap := make(map[string]string) + for key, keyTpl := range syncTemplate.Data { + tpl, err := template.New("template").Parse(keyTpl) + if err != nil { + return nil, err + } + output := new(bytes.Buffer) + if err = tpl.Execute(output, struct{ Data interface{} }{Data: templateData}); err != nil { + return nil, err + } + outputMap[key] = output.String() + } + + return json.Marshal(outputMap) + } + + return nil, fmt.Errorf("cannot apply empty template") +} + +// isTemplateEmpty checks if template is defined. +// TODO: debug why syncTemplate is sometimes not nil when not specified +func isTemplateEmpty(syncTemplate *v1alpha1.SyncTemplate) bool { + if syncTemplate == nil { + return true + } + return syncTemplate.RawData == nil && len(syncTemplate.Data) == 0 +} diff --git a/pkg/storesync/storesync.go b/pkg/storesync/storesync.go index a2c92b5..4d11462 100644 --- a/pkg/storesync/storesync.go +++ b/pkg/storesync/storesync.go @@ -12,28 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. +// TODO: Expose a way to handle key collisions (for both fetch and sync) + package storesync import ( "context" "fmt" - "regexp" "sync" "sync/atomic" "time" "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" "github.com/bank-vaults/secret-sync/pkg/apis/v1alpha1" ) -// syncRequest defines data required to perform a key sync. -// This is the minimal unit of work for full v1alpha1.SecretKey sync. -type syncRequest struct { - SecretKey v1alpha1.SecretKey - KeyTransform []v1alpha1.SecretKeyTransform -} - // Status defines response data returned by Sync. type Status struct { Total uint32 // total number of keys marked for sync @@ -43,169 +38,105 @@ type Status struct { SyncedAt time.Time // completion timestamp } -// Sync will synchronize keys from source to dest based on provided specs. -func Sync(ctx context.Context, source v1alpha1.StoreReader, dest v1alpha1.StoreWriter, refs []v1alpha1.SecretKeyFromRef) (*Status, error) { +// Sync will synchronize keys from source to target based on provided specs. +func Sync(ctx context.Context, + source v1alpha1.StoreReader, + target v1alpha1.StoreWriter, + requests []v1alpha1.SyncRequest, +) (*Status, error) { // Validate if source == nil { return nil, fmt.Errorf("source is nil") } - if dest == nil { - return nil, fmt.Errorf("dest is nil") + if target == nil { + return nil, fmt.Errorf("target is nil") } - if len(refs) == 0 { - return nil, fmt.Errorf("no sync data") + if len(requests) == 0 { + return nil, fmt.Errorf("nothing to sync") } - // All sync requests will be concurrently sent to this channel - syncQueue := make(chan syncRequest, 1) - - // Fetch keys based on ref params and add them to sync queue. - // Do each fetch in a separate goroutine (there could be API requests). - { - extractWg := sync.WaitGroup{} - for i := range refs { - extractWg.Add(1) - go func(ref v1alpha1.SecretKeyFromRef) { - defer extractWg.Done() - - // Fetch keys - secretKeys, err := getKeys(ctx, source, ref) + // Define data stores + syncMu := sync.Mutex{} + syncPlan := make(map[v1alpha1.SecretRef]SyncPlan) + processor := newProcessor(source) + + // Get sync plan for each request in a separate goroutine. + // If the same secret needs to be synced more than once, abort sync. + fetchGroup, fetchCtx := errgroup.WithContext(ctx) + for id, req := range requests { + func(id int, req v1alpha1.SyncRequest) { + fetchGroup.Go(func() error { + // Fetch keys to store + plans, err := processor.GetSyncPlan(fetchCtx, id, req) if err != nil { - logrus.Warnf("Failed to extract keys, reason: %v", err) + logrus.WithField("z-req", req).Warnf("Failed to fetch reqID = %d sync plan, reason: %v", id, err) + return nil } - // Submit keys for sync - for i := range secretKeys { - syncQueue <- syncRequest{ - SecretKey: secretKeys[i], // use fetched key - KeyTransform: ref.KeyTransform, // use same transform + // Add to sync data + syncMu.Lock() + defer syncMu.Unlock() + for ref, plan := range plans { + if _, exists := syncPlan[ref]; exists { + // This is a critical error; stop everything + return fmt.Errorf("key %v was schedule for sync more than once", ref) } + syncPlan[ref] = plan } - }(refs[i]) - } - - // Close sync request channel when everything has been extracted to stop the loop - go func() { - extractWg.Wait() - close(syncQueue) - }() + return nil + }) + }(id, req) } - // Sync keys between source and dest read from sync queue. - // Do sync for each key in a separate goroutine (there will be API requests). - var totalCount uint32 - var successCounter atomic.Uint32 - { - syncWg := sync.WaitGroup{} - syncKeyMap := make(map[string]bool) - for req := range syncQueue { - // Check if the key has already been synced - if _, exists := syncKeyMap[req.SecretKey.Key]; exists { - continue - } - syncKeyMap[req.SecretKey.Key] = true - totalCount++ + // Wait fetch + if err := fetchGroup.Wait(); err != nil { + return nil, fmt.Errorf("aborted syncing, reason: %w", err) + } - // Sync key in a separate goroutine - syncWg.Add(1) - go func(req syncRequest) { - defer syncWg.Done() + // Sync requests from source to target store. + // Do sync for each plan item in a separate goroutine. + var syncWg sync.WaitGroup + var syncCounter atomic.Uint32 + for ref, plan := range syncPlan { + syncWg.Add(1) + go func(ref v1alpha1.SecretRef, plan SyncPlan) { + defer syncWg.Done() + + // Sync + var err error + if len(plan.Data) == 0 { + err = fmt.Errorf("empty value") + } else { + err = target.SetSecret(ctx, ref, plan.Data) + } - key := req.SecretKey - destKey, err := doRequest(ctx, source, dest, req) - if err != nil { - if err == v1alpha1.ErrKeyNotFound { // not found, soft warn - logrus.Warnf("Skipped syncing key '%s', reason: %v", key.Key, err) - } else { // otherwise, log error - logrus.Errorf("Failed to sync key '%s', reason: %v", key.Key, err) - } - return + // Handle response + if err != nil { + if err == v1alpha1.ErrKeyNotFound { // not found, soft warn + logrus.WithField("z-req", plan.Request). + Warnf("Skipped syncing reqID = %d for key %s, reason: %v", plan.RequestID, ref.Key, err) + } else { // otherwise, log error + logrus.WithField("z-req", plan.Request). + Errorf("Failed to sync reqID = %d for key %s, reason: %v", plan.RequestID, ref.Key, err) } + return + } - logrus.Infof("Successfully synced key '%s' to '%s'", key.Key, destKey.Key) - successCounter.Add(1) - }(req) - } - syncWg.Wait() + logrus.WithField("z-req", plan.Request). + Infof("Successfully synced reqID = %d for key %s", plan.RequestID, ref.Key /* , string(plan.Data) */) + syncCounter.Add(1) + }(ref, plan) } + syncWg.Wait() // Return response - successCount := successCounter.Load() + syncCount := syncCounter.Load() + totalCount := uint32(len(syncPlan)) return &Status{ Total: totalCount, - Synced: successCount, - Success: totalCount == successCount, - Status: fmt.Sprintf("Synced %d out of total %d keys", successCount, totalCount), + Synced: syncCount, + Success: totalCount == syncCount, + Status: fmt.Sprintf("Synced %d out of total %d keys", syncCount, totalCount), SyncedAt: time.Now(), }, nil } - -// getKeys fetches (one or multiple) v1alpha1.SecretKey for a single v1alpha1.SecretKeyFromRef. -// Performs an API list request on source if ref Query is specified to get multiple v1alpha1.SecretKey. -func getKeys(ctx context.Context, source v1alpha1.StoreReader, ref v1alpha1.SecretKeyFromRef) ([]v1alpha1.SecretKey, error) { - // Validate - if ref.SecretKey == nil && ref.Query == nil { - return nil, fmt.Errorf("both SecretKey and Query are empty, at least one required") - } - - // Get keys - var keys []v1alpha1.SecretKey - if ref.SecretKey != nil { - // Add static key - keys = append(keys, *ref.SecretKey) - } - if ref.Query != nil { - // Get keys from API - listKeys, err := source.ListSecretKeys(ctx, *ref.Query) - if err != nil { - return nil, fmt.Errorf("failed while doing query %v: %w", *ref.Query, err) - } - keys = append(listKeys, keys...) - } - - return keys, nil -} - -// doRequest will sync a given syncRequest from source to dest. Returns key that was synced to dest or error. -func doRequest(ctx context.Context, source v1alpha1.StoreReader, dest v1alpha1.StoreWriter, req syncRequest) (v1alpha1.SecretKey, error) { - // Get from source - key := req.SecretKey - value, err := source.GetSecret(ctx, key) - if err != nil { - return key, err - } - - // TODO: Consider adding a check to see if the secret needs to be updated. - // TODO: This adds additional option to Sync CRD => skip API set if get didn't change since last time - - // Transform before writing to dest - updatedKey, err := applyTransform(key, req.KeyTransform) - if err != nil { - return key, err - } - - // Set to dest - err = dest.SetSecret(ctx, updatedKey, value) - if err != nil { - return updatedKey, err - } - - return updatedKey, nil -} - -// applyTransform applies transform to v1alpha1.SecretKey and returns updated key or error. -func applyTransform(secretKey v1alpha1.SecretKey, transforms []v1alpha1.SecretKeyTransform) (v1alpha1.SecretKey, error) { - for _, transform := range transforms { - // Update Regexp field - keyRegex := transform.Regexp - if keyRegex == nil { - continue - } - keyGroup, err := regexp.Compile(keyRegex.Source) - if err != nil { - return secretKey, fmt.Errorf("failed to compile regex %s: %w", keyRegex.Source, err) - } - secretKey.Key = keyGroup.ReplaceAllString(secretKey.Key, keyRegex.Target) - } - return secretKey, nil -} diff --git a/pkg/storesync/storesync_test.go b/pkg/storesync/storesync_test.go index 9dd0c8d..a0d3882 100644 --- a/pkg/storesync/storesync_test.go +++ b/pkg/storesync/storesync_test.go @@ -14,143 +14,117 @@ package storesync_test -import ( - "context" - "io" - "os" - "testing" - - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - - "github.com/bank-vaults/secret-sync/pkg/apis/v1alpha1" - "github.com/bank-vaults/secret-sync/pkg/provider" - "github.com/bank-vaults/secret-sync/pkg/storesync" -) - -func BenchmarkSync(b *testing.B) { - b.ReportAllocs() - - // Prepare - source := &fakeClient{} - dest := &fakeClient{} - requests := refKeys("a", "b/b", "c/c/c") - logrus.SetOutput(io.Discard) - - // Run - for i := 0; i < b.N; i++ { - _, _ = storesync.Sync(context.Background(), source, dest, requests) - } -} - -func TestSync(t *testing.T) { - testCtx := context.Background() - - // Define sync data - source := createFileStore(t, "from-dir") - dest := createFileStore(t, "to-dir") - _ = createVaultStore(t, "http://0.0.0.0:8200", "root") - _ = createVaultStore(t, "http://0.0.0.0:8201", "root") - - expected := fromKeys("a", "b/b", "c/c/c", "d/d/d/0", "d/d/d/1", "d/d/d/2", "d/d/d/d") - requests := append( - refKeys("a", "b/b", "c/c/c"), - refFilter("d/d/d", ".*"), - ) - - // Init source store - initStore(t, source, expected) - - // Sync - resp, err := storesync.Sync(testCtx, source, dest, requests) - assert.Nil(t, err) - - // Validate that dest is synced - assert.Equal(t, true, resp.Success) - assert.Equal(t, true, resp.Synced > 0) - for _, key := range expected { - gotVal, err := dest.GetSecret(testCtx, key) - assert.Nil(t, err, key) - assert.Equal(t, []byte(key.Key), gotVal, key) - } -} - -func initStore(t *testing.T, store v1alpha1.StoreClient, keys []v1alpha1.SecretKey) { - for _, key := range keys { - assert.Nil(t, store.SetSecret(context.Background(), key, []byte(key.Key))) - } -} - -func fromKeys(keys ...string) []v1alpha1.SecretKey { - result := make([]v1alpha1.SecretKey, 0) - for _, key := range keys { - result = append(result, v1alpha1.SecretKey{ - Key: key, - }) - } - return result -} - -func refKeys(keys ...string) []v1alpha1.SecretKeyFromRef { - result := make([]v1alpha1.SecretKeyFromRef, 0) - for _, key := range keys { - result = append(result, v1alpha1.SecretKeyFromRef{ - SecretKey: &v1alpha1.SecretKey{ - Key: key, - }, - }) - } - return result -} - -func refFilter(path string, filter string) v1alpha1.SecretKeyFromRef { - return v1alpha1.SecretKeyFromRef{ - Query: &v1alpha1.SecretKeyQuery{ - Path: &path, - Key: &v1alpha1.RegexpQuery{ - Regexp: filter, - }, - }, - } -} - -func createFileStore(t *testing.T, name string) v1alpha1.StoreClient { - path, err := os.MkdirTemp("", name) - assert.Nil(t, err) - t.Cleanup(func() { _ = os.RemoveAll(path) }) - - client, err := provider.NewClient(context.Background(), &v1alpha1.SecretStoreProvider{ - File: &v1alpha1.SecretStoreProviderFile{ - DirPath: path, - }, - }) - assert.Nil(t, err) - return client -} - -func createVaultStore(t *testing.T, addr, token string) v1alpha1.StoreClient { - client, err := provider.NewClient(context.Background(), &v1alpha1.SecretStoreProvider{ - Vault: &v1alpha1.SecretStoreProviderVault{ - Address: addr, - UnsealKeysPath: "secret", - AuthPath: "userpass", - Token: token, - }, - }) - assert.Nil(t, err) - return client -} - -type fakeClient struct{} - -func (c *fakeClient) GetSecret(_ context.Context, _ v1alpha1.SecretKey) ([]byte, error) { - return []byte(""), nil -} - -func (c *fakeClient) ListSecretKeys(_ context.Context, _ v1alpha1.SecretKeyQuery) ([]v1alpha1.SecretKey, error) { - return []v1alpha1.SecretKey{{}, {}}, nil -} - -func (c *fakeClient) SetSecret(_ context.Context, _ v1alpha1.SecretKey, _ []byte) error { - return nil -} +//func BenchmarkSync(b *testing.B) { +// b.ReportAllocs() +// +// // Prepare +// source := &fakeClient{} +// dest := &fakeClient{} +// requests := refKeys("a", "b/b", "c/c/c") +// logrus.SetOutput(io.Discard) +// +// // Run +// for i := 0; i < b.N; i++ { +// _, _ = storesync.Sync(context.Background(), source, dest, requests) +// } +//} +// +//func TestSync(t *testing.T) { +// testCtx := context.Background() +// +// // Define sync data +// source := createFileStore(t, "from-dir") +// dest := createFileStore(t, "to-dir") +// _ = createVaultStore(t, "http://0.0.0.0:8200", "root") +// _ = createVaultStore(t, "http://0.0.0.0:8201", "root") +// +// expected := fromKeys("a", "b/b", "c/c/c", "d/d/d/0", "d/d/d/1", "d/d/d/2", "d/d/d/d") +// requests := append( +// refKeys("a", "b/b", "c/c/c"), +// refFilter("d/d/d", ".*"), +// ) +// +// // Init source store +// initStore(t, source, expected) +// +// // Sync +// resp, err := storesync.Sync(testCtx, source, dest, requests) +// assert.Nil(t, err) +// +// // Validate that dest is synced +// assert.Equal(t, true, resp.Success) +// assert.Equal(t, true, resp.Synced > 0) +// for _, key := range expected { +// gotVal, err := dest.GetSecret(testCtx, key) +// assert.Nil(t, err, key) +// assert.Equal(t, []byte(key.Key), gotVal, key) +// } +//} +// +//func initStore(t *testing.T, store v1alpha1.StoreClient, keys []v1alpha1.SecretRef) { +// for _, key := range keys { +// assert.Nil(t, store.SetSecret(context.Background(), key, []byte(key.Key))) +// } +//} +// +//func fromKeys(keys ...string) []v1alpha1.SecretRef { +// result := make([]v1alpha1.SecretRef, 0) +// for _, key := range keys { +// result = append(result, v1alpha1.SecretRef{ +// Key: key, +// }) +// } +// return result +//} +// +//func refFrom(keys ...string) []v1alpha1.StrategyDataFrom { +// result := make([]v1alpha1.StrategyDataFrom, 0) +// for _, key := range keys { +// result = append(result, v1alpha1.StrategyDataFrom{ +// Name: "", +// SecretRef: nil, +// }) +// } +// return result +//} +// +//func createFileStore(t *testing.T, name string) v1alpha1.StoreClient { +// path, err := os.MkdirTemp("", name) +// assert.Nil(t, err) +// t.Cleanup(func() { _ = os.RemoveAll(path) }) +// +// client, err := provider.NewClient(context.Background(), &v1alpha1.ProviderBackend{ +// File: &v1alpha1.FileProvider{ +// DirPath: path, +// }, +// }) +// assert.Nil(t, err) +// return client +//} +// +//func createVaultStore(t *testing.T, addr, token string) v1alpha1.StoreClient { +// client, err := provider.NewClient(context.Background(), &v1alpha1.ProviderBackend{ +// Vault: &v1alpha1.VaultProvider{ +// Address: addr, +// storePath: "secret", +// AuthPath: "userpass", +// Token: token, +// }, +// }) +// assert.Nil(t, err) +// return client +//} +// +//type fakeClient struct{} +// +//func (c *fakeClient) GetSecret(_ context.Context, key v1alpha1.SecretRef) ([]byte, error) { +// return []byte(""), nil +//} +// +//func (c *fakeClient) ListSecretKeys(_ context.Context, _ v1alpha1.SecretKeyQuery) ([]v1alpha1.SecretRef, error) { +// return []v1alpha1.SecretRef{{}, {}}, nil +//} +// +//func (c *fakeClient) SetSecret(_ context.Context, key v1alpha1.SecretRef, value []byte) error { +// return nil +//}