diff --git a/examples/helm/kanister/kanister-postgresql/kanister/postgres-blueprint.yaml b/examples/helm/kanister/kanister-postgresql/kanister/postgres-blueprint.yaml index 64a6243245..d46cc64dd4 100644 --- a/examples/helm/kanister/kanister-postgresql/kanister/postgres-blueprint.yaml +++ b/examples/helm/kanister/kanister-postgresql/kanister/postgres-blueprint.yaml @@ -34,7 +34,7 @@ actions: then # Setup wal-e s3 connection parameters. timeline={{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02T15-04-05" }} - wale_s3_prefix="s3://{{ .Profile.Location.S3Compliant.Bucket }}/postgres-backups/{{ .Deployment.Name }}/${timeline}" + wale_s3_prefix="s3://{{ .Profile.Location.Bucket }}/postgres-backups/{{ .Deployment.Name }}/${timeline}" echo "${wale_s3_prefix}" > "${env_wal_prefix}" fi @@ -44,8 +44,8 @@ actions: env_wal_access_key_id="${env_dir}/AWS_ACCESS_KEY_ID" env_wal_secret_access_key="${env_dir}/AWS_SECRET_ACCESS_KEY" - {{- if .Profile.Location.S3Compliant.Endpoint }} - wale_s3_endpoint="{{ .Profile.Location.S3Compliant.Endpoint | quote }}" + {{- if .Profile.Location.Endpoint }} + wale_s3_endpoint="{{ .Profile.Location.Endpoint | quote }}" wale_s3_endpoint=${wale_s3_endpoint,,} {{- if .Profile.SkipSSLVerify }} # Since wal-e does not support skip-ssl-verify switch to http:// @@ -63,8 +63,8 @@ actions: {{- else }} # Region is required when no endpoint is used (AWS S3). wale_s3_region="us-east-1" - {{- if .Profile.Location.S3Compliant.Region }} - wale_s3_region="{{ .Profile.Location.S3Compliant.Region | quote}}" + {{- if .Profile.Location.Region }} + wale_s3_region="{{ .Profile.Location.Region | quote}}" {{- end }} echo "${wale_s3_region}" > "${env_wal_region}" {{- end }} @@ -83,13 +83,13 @@ actions: {{- if .Profile.SkipSSLVerify }} s3_cmd+=("--no-verify-ssl") {{- end }} - {{- if .Profile.Location.S3Compliant.Endpoint }} - s3_cmd+=(--endpoint "{{ .Profile.Location.S3Compliant.Endpoint }}") + {{- if .Profile.Location.Endpoint }} + s3_cmd+=(--endpoint "{{ .Profile.Location.Endpoint }}") {{- end }} - {{- if .Profile.Location.S3Compliant.Region }} - s3_cmd+=(--region "{{ .Profile.Location.S3Compliant.Region | quote}}") + {{- if .Profile.Location.Region }} + s3_cmd+=(--region "{{ .Profile.Location.Region | quote}}") {{- end }} - s3_path="s3://{{ .Profile.Location.S3Compliant.Bucket }}/postgres-backups/{{ .Deployment.Name }}/{{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02T15-04-05" }}/manifest.txt" + s3_path="s3://{{ .Profile.Location.Bucket }}/postgres-backups/{{ .Deployment.Name }}/{{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02T15-04-05" }}/manifest.txt" s3_cmd+=(s3 cp - "${s3_path}") set +o xtrace @@ -147,13 +147,13 @@ actions: {{- if .Profile.SkipSSLVerify }} s3_cmd+=(" --no-verify-ssl") {{- end }} - {{- if .Profile.Location.S3Compliant.Endpoint }} - s3_cmd+=(--endpoint "{{ .Profile.Location.S3Compliant.Endpoint }}") + {{- if .Profile.Location.Endpoint }} + s3_cmd+=(--endpoint "{{ .Profile.Location.Endpoint }}") {{- end }} - {{- if .Profile.Location.S3Compliant.Region }} - s3_cmd+=(--region "{{ .Profile.Location.S3Compliant.Region | quote}}") + {{- if .Profile.Location.Region }} + s3_cmd+=(--region "{{ .Profile.Location.Region | quote}}") {{- end }} - s3_path="s3://{{ .Profile.Location.S3Compliant.Bucket }}/{{ .ArtifactsIn.manifest.KeyValue.path }}" + s3_path="s3://{{ .Profile.Location.Bucket }}/{{ .ArtifactsIn.manifest.KeyValue.path }}" s3_cmd+=(s3 cp "${s3_path}" -) set +o xtrace @@ -166,8 +166,8 @@ actions: # Fetch base backup using the old WALE_S3_PREFIX. # First need to setup wal-e conf as env vars - {{- if .Profile.Location.S3Compliant.Endpoint }} - wale_s3_endpoint="{{ .Profile.Location.S3Compliant.Endpoint | quote}}" + {{- if .Profile.Location.Endpoint }} + wale_s3_endpoint="{{ .Profile.Location.Endpoint | quote}}" wale_s3_endpoint=${wale_s3_endpoint,,} {{- if .Profile.SkipSSLVerify }} # Since wal-e does not support skip-ssl-verify switch to http:// @@ -183,8 +183,8 @@ actions: # Region will be ignored for S3 compatible object store so skipping. {{- else }} # Region is required when no endpoint is used (AWS S3). - {{- if .Profile.Location.S3Compliant.Region }} - wale_s3_region="{{ .Profile.Location.S3Compliant.Region | quote}}" + {{- if .Profile.Location.Region }} + wale_s3_region="{{ .Profile.Location.Region | quote}}" {{- else }} wale_s3_region="us-east-1" {{- end }} @@ -226,7 +226,7 @@ actions: # Recovery is now complete and can switch to new WAL timeline env_wal_prefix="${pgdata}/env/WALE_S3_PREFIX" timeline={{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02T15-04-05" }} - wale_s3_prefix="s3://{{ .Profile.Location.S3Compliant.Bucket }}/{{ .ArtifactsIn.manifest.KeyValue.prefix }}/${timeline}" + wale_s3_prefix="s3://{{ .Profile.Location.Bucket }}/{{ .ArtifactsIn.manifest.KeyValue.prefix }}/${timeline}" echo "${wale_s3_prefix}" > "${env_wal_prefix}" - func: ScaleWorkload name: restartPod @@ -267,13 +267,13 @@ actions: {{- if .Profile.SkipSSLVerify }} aws_args+=(" --no-verify-ssl") {{- end }} - {{- if .Profile.Location.S3Compliant.Endpoint }} - aws_args+=(--endpoint "{{ .Profile.Location.S3Compliant.Endpoint }}") + {{- if .Profile.Location.Endpoint }} + aws_args+=(--endpoint "{{ .Profile.Location.Endpoint }}") {{- end }} - {{- if .Profile.Location.S3Compliant.Region }} - aws_args+=(--region "{{ .Profile.Location.S3Compliant.Region | quote}}") + {{- if .Profile.Location.Region }} + aws_args+=(--region "{{ .Profile.Location.Region | quote}}") {{- end }} - s3_path="s3://{{ .Profile.Location.S3Compliant.Bucket }}/{{ .ArtifactsIn.manifest.KeyValue.path }}" + s3_path="s3://{{ .Profile.Location.Bucket }}/{{ .ArtifactsIn.manifest.KeyValue.path }}" # Get and parse artifact manifest to discover the timeline and the base-backup name. @@ -286,8 +286,8 @@ actions: aws "${aws_args[@]}" s3 rm --recursive "${base_backup_path}" # Setup configuration for wal-e. - {{- if .Profile.Location.S3Compliant.Endpoint }} - wale_s3_endpoint="{{ .Profile.Location.S3Compliant.Endpoint | quote}}" + {{- if .Profile.Location.Endpoint }} + wale_s3_endpoint="{{ .Profile.Location.Endpoint | quote}}" wale_s3_endpoint-${wale_s3_endpoint,,} {{- if .Profile.SkipSSLVerify }} # Since wal-e does not support skip-ssl-verify switch to http:// @@ -303,8 +303,8 @@ actions: # Region will be ignored for S3 compatible object store so skipping. {{- else }} # Region is required when no endpoint is used (AWS S3). - {{- if .Profile.Location.S3Compliant.Region }} - wale_s3_region="{{ .Profile.Location.S3Compliant.Region | quote}}" + {{- if .Profile.Location.Region }} + wale_s3_region="{{ .Profile.Location.Region | quote}}" {{- else }} wale_s3_region="us-east-1" {{- end }} diff --git a/examples/time-log/blueprint.yaml b/examples/time-log/blueprint.yaml index 3ce740b0cf..ba85b34693 100644 --- a/examples/time-log/blueprint.yaml +++ b/examples/time-log/blueprint.yaml @@ -9,7 +9,7 @@ actions: outputArtifacts: timeLog: keyValue: - path: '{{ .Profile.Location.S3Compliant.Bucket }}/time-log' + path: '{{ .Profile.Location.Bucket }}/time-log' backupIdentifier: keyValue: id: '{{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02" }}' @@ -21,7 +21,7 @@ actions: pod: "{{ index .Deployment.Pods 0 }}" container: test-container includePath: /var/log - backupArtifactPrefix: "{{ .Profile.Location.S3Compliant.Bucket }}/time-log" + backupArtifactPrefix: "{{ .Profile.Location.Bucket }}/time-log" backupIdentifier: '{{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02" }}' restore: type: Deployment diff --git a/examples/time-log/s3-profile.yaml b/examples/time-log/s3-profile.yaml index 9ecfcb006f..5394b5f601 100644 --- a/examples/time-log/s3-profile.yaml +++ b/examples/time-log/s3-profile.yaml @@ -6,11 +6,10 @@ metadata: # Note: Add details of an existing S3 compliant bucket below location: type: s3Compliant - s3Compliant: - bucket: XXXX - endpoint: XXXX - prefix: XXXX - region: XXXX + bucket: XXXX + endpoint: XXXX + prefix: XXXX + region: XXXX credential: type: keyPair keyPair: diff --git a/helm/profile/templates/profile.yaml b/helm/profile/templates/profile.yaml index 153eabf8bf..2a5482781c 100644 --- a/helm/profile/templates/profile.yaml +++ b/helm/profile/templates/profile.yaml @@ -19,11 +19,10 @@ metadata: {{ include "profile.helmLabels" . | indent 4 }} location: type: s3Compliant - s3Compliant: - bucket: {{ required "S3 compatible bucket is required when configuring a profile." .Values.s3.bucket | quote }} - endpoint: {{ .Values.s3.endpoint | quote }} - prefix: {{ .Values.s3.prefix | quote }} - region: {{ .Values.s3.region | quote }} + bucket: {{ required "S3 compatible bucket is required when configuring a profile." .Values.s3.bucket | quote }} + endpoint: {{ .Values.s3.endpoint | quote }} + prefix: {{ .Values.s3.prefix | quote }} + region: {{ .Values.s3.region | quote }} credential: type: keyPair keyPair: diff --git a/pkg/apis/cr/v1alpha1/types.go b/pkg/apis/cr/v1alpha1/types.go index a0dc877f68..2df0d54548 100644 --- a/pkg/apis/cr/v1alpha1/types.go +++ b/pkg/apis/cr/v1alpha1/types.go @@ -228,21 +228,17 @@ type Profile struct { type LocationType string const ( + LocationTypeGCS LocationType = "gcs" LocationTypeS3Compliant LocationType = "s3Compliant" ) // Location type Location struct { - Type LocationType `json:"type"` - S3Compliant *S3CompliantLocation `json:"s3Compliant"` -} - -// S3Compliant -type S3CompliantLocation struct { - Bucket string `json:"bucket"` - Endpoint string `json:"endpoint"` - Prefix string `json:"prefix"` - Region string `json:"region"` + Type LocationType `json:"type"` + Bucket string `json:"bucket"` + Endpoint string `json:"endpoint"` + Prefix string `json:"prefix"` + Region string `json:"region"` } // CredentialType diff --git a/pkg/apis/cr/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/cr/v1alpha1/zz_generated.deepcopy.go index 074981dfe0..0da6974a07 100644 --- a/pkg/apis/cr/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/cr/v1alpha1/zz_generated.deepcopy.go @@ -421,11 +421,6 @@ func (in *KeyPair) DeepCopy() *KeyPair { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Location) DeepCopyInto(out *Location) { *out = *in - if in.S3Compliant != nil { - in, out := &in.S3Compliant, &out.S3Compliant - *out = new(S3CompliantLocation) - **out = **in - } return } @@ -470,7 +465,7 @@ func (in *Profile) DeepCopyInto(out *Profile) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Location.DeepCopyInto(&out.Location) + out.Location = in.Location in.Credential.DeepCopyInto(&out.Credential) return } @@ -529,19 +524,3 @@ func (in *ProfileList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3CompliantLocation) DeepCopyInto(out *S3CompliantLocation) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3CompliantLocation. -func (in *S3CompliantLocation) DeepCopy() *S3CompliantLocation { - if in == nil { - return nil - } - out := new(S3CompliantLocation) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/function/backup_data_test.go b/pkg/function/backup_data_test.go index 9e6118af4e..dfe2e09f03 100644 --- a/pkg/function/backup_data_test.go +++ b/pkg/function/backup_data_test.go @@ -15,13 +15,11 @@ var _ = Suite(&BackupDataSuite{}) func newValidProfile() *param.Profile { return ¶m.Profile{ Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Bucket: "test-bucket", - Endpoint: "", - Prefix: "", - Region: "us-west-1", - }, + Type: crv1alpha1.LocationTypeS3Compliant, + Bucket: "test-bucket", + Endpoint: "", + Prefix: "", + Region: "us-west-1", }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, @@ -37,13 +35,11 @@ func newValidProfile() *param.Profile { func newInvalidProfile() *param.Profile { return ¶m.Profile{ Location: crv1alpha1.Location{ - Type: "foo-type", - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Bucket: "test-bucket", - Endpoint: "", - Prefix: "", - Region: "us-west-1", - }, + Type: "foo-type", + Bucket: "test-bucket", + Endpoint: "", + Prefix: "", + Region: "us-west-1", }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, diff --git a/pkg/function/create_volume_from_snapshot_test.go b/pkg/function/create_volume_from_snapshot_test.go index 6a3040f548..bed827faea 100644 --- a/pkg/function/create_volume_from_snapshot_test.go +++ b/pkg/function/create_volume_from_snapshot_test.go @@ -28,9 +28,8 @@ func (s *CreateVolumeFromSnapshotTestSuite) TestCreateVolumeFromSnapshot(c *C) { mockGetter := mockblockstorage.NewGetter() profile := ¶m.Profile{ Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Region: "us-west-2"}, + Type: crv1alpha1.LocationTypeS3Compliant, + Region: "us-west-2", }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, diff --git a/pkg/function/create_volume_snapshot.go b/pkg/function/create_volume_snapshot.go index 428beddd09..1df90932b2 100644 --- a/pkg/function/create_volume_snapshot.go +++ b/pkg/function/create_volume_snapshot.go @@ -69,7 +69,7 @@ func ValidateProfile(profile *param.Profile) error { if profile.Location.Type != crv1alpha1.LocationTypeS3Compliant { return errors.New("Location type not supported") } - if len(profile.Location.S3Compliant.Region) == 0 { + if len(profile.Location.Region) == 0 { return errors.New("Region is not set") } if profile.Credential.Type != param.CredentialTypeKeyPair { diff --git a/pkg/function/create_volume_snapshot_test.go b/pkg/function/create_volume_snapshot_test.go index 16fd0b3ec5..50eecc31e4 100644 --- a/pkg/function/create_volume_snapshot_test.go +++ b/pkg/function/create_volume_snapshot_test.go @@ -27,9 +27,8 @@ func (s *CreateVolumeSnapshotTestSuite) TestGetPVCInfo(c *C) { tp := param.TemplateParams{ Profile: ¶m.Profile{ Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Region: "us-west-2"}, + Type: crv1alpha1.LocationTypeS3Compliant, + Region: "us-west-2", }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, diff --git a/pkg/function/data_test.go b/pkg/function/data_test.go index 0bcc27efe4..c78f863d56 100644 --- a/pkg/function/data_test.go +++ b/pkg/function/data_test.go @@ -4,12 +4,6 @@ import ( "context" "fmt" - . "gopkg.in/check.v1" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - kanister "github.com/kanisterio/kanister/pkg" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" "github.com/kanisterio/kanister/pkg/client/clientset/versioned" @@ -17,6 +11,10 @@ import ( "github.com/kanisterio/kanister/pkg/param" "github.com/kanisterio/kanister/pkg/resource" "github.com/kanisterio/kanister/pkg/testutil" + . "gopkg.in/check.v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) type DataSuite struct { @@ -81,7 +79,7 @@ func newRestoreDataBlueprint(pvc string) *crv1alpha1.Blueprint { Args: map[string]interface{}{ RestoreDataNamespaceArg: "{{ .StatefulSet.Namespace }}", RestoreDataImageArg: "kanisterio/kanister-tools:0.15.0", - RestoreDataBackupArtifactPrefixArg: "{{ .Profile.Location.S3Compliant.Bucket }}/{{ .Profile.Location.S3Compliant.Prefix }}", + RestoreDataBackupArtifactPrefixArg: "{{ .Profile.Location.Bucket }}/{{ .Profile.Location.Prefix }}", RestoreDataRestorePathArg: "/mnt/data", RestoreDataBackupIdentifierArg: "{{ .Time }}", RestoreDataEncryptionKeyArg: "{{ .Secrets.backupKey.Data.password | toString }}", @@ -110,7 +108,7 @@ func newBackupDataBlueprint() *crv1alpha1.Blueprint { BackupDataPodArg: "{{ index .StatefulSet.Pods 0 }}", BackupDataContainerArg: "{{ index .StatefulSet.Containers 0 0 }}", BackupDataIncludePathArg: "/etc", - BackupDataBackupArtifactPrefixArg: "{{ .Profile.Location.S3Compliant.Bucket }}/{{ .Profile.Location.S3Compliant.Prefix }}", + BackupDataBackupArtifactPrefixArg: "{{ .Profile.Location.Bucket }}/{{ .Profile.Location.Prefix }}", BackupDataBackupIdentifierArg: "{{ .Time }}", BackupDataEncryptionKeyArg: "{{ .Secrets.backupKey.Data.password | toString }}", }, @@ -213,7 +211,7 @@ func newCopyDataTestBlueprint() crv1alpha1.Blueprint { Args: map[string]interface{}{ CopyVolumeDataNamespaceArg: "{{ .PVC.Namespace }}", CopyVolumeDataVolumeArg: "{{ .PVC.Name }}", - CopyVolumeDataArtifactPrefixArg: "{{ .Profile.Location.S3Compliant.Bucket }}/{{ .Profile.Location.S3Compliant.Prefix }}/{{ .PVC.Namespace }}/{{ .PVC.Name }}", + CopyVolumeDataArtifactPrefixArg: "{{ .Profile.Location.Bucket }}/{{ .Profile.Location.Prefix }}/{{ .PVC.Namespace }}/{{ .PVC.Name }}", }, }, }, diff --git a/pkg/function/delete_data.go b/pkg/function/delete_data.go index 1c7d722c3b..b8f0d7428b 100644 --- a/pkg/function/delete_data.go +++ b/pkg/function/delete_data.go @@ -36,8 +36,8 @@ func generateDeleteCommand(artifact string, profile *param.Profile) []string { cmd = append(cmd, "export", fmt.Sprintf("AWS_ACCESS_KEY_ID=%s\n", profile.Credential.KeyPair.ID)) // Command to delete from the object store cmd = append(cmd, "aws") - if profile.Location.S3Compliant.Endpoint != "" { - cmd = append(cmd, "--endpoint", profile.Location.S3Compliant.Endpoint) + if profile.Location.Endpoint != "" { + cmd = append(cmd, "--endpoint", profile.Location.Endpoint) } if profile.SkipSSLVerify { cmd = append(cmd, "--no-verify-ssl") diff --git a/pkg/function/delete_volume_snapshot_test.go b/pkg/function/delete_volume_snapshot_test.go index ab4e69fb8b..e7e700998b 100644 --- a/pkg/function/delete_volume_snapshot_test.go +++ b/pkg/function/delete_volume_snapshot_test.go @@ -23,9 +23,8 @@ func (s *DeleteVolumeSnapshotTestSuite) TestDeleteVolumeSnapshot(c *C) { mockGetter := mockblockstorage.NewGetter() profile := ¶m.Profile{ Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Region: "us-west-2"}, + Type: crv1alpha1.LocationTypeS3Compliant, + Region: "us-west-2", }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, diff --git a/pkg/function/e2e_volume_snapshot_test.go b/pkg/function/e2e_volume_snapshot_test.go index 0f8097c59c..c52bc6a1ef 100644 --- a/pkg/function/e2e_volume_snapshot_test.go +++ b/pkg/function/e2e_volume_snapshot_test.go @@ -120,8 +120,7 @@ func NewTestProfile(namespace string, secretName string) *crv1alpha1.Profile { }, Location: crv1alpha1.Location{ Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Region: os.Getenv(AWSRegion)}, + Region: os.Getenv(AWSRegion), }, Credential: crv1alpha1.Credential{ Type: crv1alpha1.CredentialTypeKeyPair, diff --git a/pkg/function/wait_for_snapshot_completion_test.go b/pkg/function/wait_for_snapshot_completion_test.go index 9a25de1cdd..2cec0b6b64 100644 --- a/pkg/function/wait_for_snapshot_completion_test.go +++ b/pkg/function/wait_for_snapshot_completion_test.go @@ -21,9 +21,8 @@ func (s *WaitForSnapshotCompletionTestSuite) TestWait(c *C) { mockGetter := mockblockstorage.NewGetter() profile := ¶m.Profile{ Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Region: "us-west-2"}, + Type: crv1alpha1.LocationTypeS3Compliant, + Region: "us-west-2", }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, diff --git a/pkg/kanctl/profile.go b/pkg/kanctl/profile.go index 21c98641de..6275485944 100644 --- a/pkg/kanctl/profile.go +++ b/pkg/kanctl/profile.go @@ -164,13 +164,11 @@ func constructS3CompliantProfile(s3P *s3CompliantParams, secret *v1.Secret) *v1a GenerateName: "s3-profile-", }, Location: v1alpha1.Location{ - Type: v1alpha1.LocationTypeS3Compliant, - S3Compliant: &v1alpha1.S3CompliantLocation{ - Bucket: s3P.bucket, - Endpoint: s3P.endpoint, - Prefix: s3P.prefix, - Region: s3P.region, - }, + Type: v1alpha1.LocationTypeS3Compliant, + Bucket: s3P.bucket, + Endpoint: s3P.endpoint, + Prefix: s3P.prefix, + Region: s3P.region, }, Credential: v1alpha1.Credential{ Type: v1alpha1.CredentialTypeKeyPair, diff --git a/pkg/location/location.go b/pkg/location/location.go index b2a6106bf4..68019e162b 100644 --- a/pkg/location/location.go +++ b/pkg/location/location.go @@ -159,11 +159,11 @@ const s3Prefix = "s3://" func s3CompliantPath(profile param.Profile, suffix string) string { path := filepath.Join( - profile.Location.S3Compliant.Bucket, - profile.Location.S3Compliant.Prefix, + profile.Location.Bucket, + profile.Location.Prefix, suffix, ) - if strings.HasPrefix(profile.Location.S3Compliant.Bucket, s3Prefix) { + if strings.HasPrefix(profile.Location.Bucket, s3Prefix) { return path } return s3Prefix + path @@ -214,8 +214,8 @@ func checkIfS3Dir(ctx context.Context, profile param.Profile, suffix string) (st } func s3CompliantFlags(profile param.Profile) (cmd []string) { - if profile.Location.S3Compliant.Endpoint != "" { - cmd = append(cmd, "--endpoint", profile.Location.S3Compliant.Endpoint) + if profile.Location.Endpoint != "" { + cmd = append(cmd, "--endpoint", profile.Location.Endpoint) } if profile.SkipSSLVerify { cmd = append(cmd, "--no-verify-ssl") diff --git a/pkg/restic/restic.go b/pkg/restic/restic.go index a9d59a1edf..80006182e3 100644 --- a/pkg/restic/restic.go +++ b/pkg/restic/restic.go @@ -76,8 +76,8 @@ const ( func resticArgs(profile *param.Profile, repository, encryptionKey string) []string { s3Endpoint := awsS3Endpoint - if profile.Location.S3Compliant.Endpoint != "" { - s3Endpoint = profile.Location.S3Compliant.Endpoint + if profile.Location.Endpoint != "" { + s3Endpoint = profile.Location.Endpoint } return []string{ fmt.Sprintf("export %s=%s\n", location.AWSAccessKeyID, profile.Credential.KeyPair.ID), diff --git a/pkg/testutil/fixture.go b/pkg/testutil/fixture.go index 7da7b8f920..419577fad0 100644 --- a/pkg/testutil/fixture.go +++ b/pkg/testutil/fixture.go @@ -23,11 +23,9 @@ func ObjectStoreProfileOrSkip(c *check.C) *param.Profile { skipIfEnvNotSet(c, objectStoreTestEnvVars) return ¶m.Profile{ Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{ - Bucket: os.Getenv(testBucketName), - Prefix: c.TestName(), - }, + Type: crv1alpha1.LocationTypeS3Compliant, + Bucket: os.Getenv(testBucketName), + Prefix: c.TestName(), }, Credential: param.Credential{ Type: param.CredentialTypeKeyPair, diff --git a/pkg/testutil/testutil.go b/pkg/testutil/testutil.go index 1ae71c40c5..7bbe87d604 100644 --- a/pkg/testutil/testutil.go +++ b/pkg/testutil/testutil.go @@ -112,8 +112,7 @@ func NewTestProfile(namespace string, secretName string) *crv1alpha1.Profile { Namespace: namespace, }, Location: crv1alpha1.Location{ - Type: crv1alpha1.LocationTypeS3Compliant, - S3Compliant: &crv1alpha1.S3CompliantLocation{}, + Type: crv1alpha1.LocationTypeS3Compliant, }, Credential: crv1alpha1.Credential{ Type: crv1alpha1.CredentialTypeKeyPair, diff --git a/pkg/validate/validate.go b/pkg/validate/validate.go index 999d6d2b54..7fa90ea38e 100644 --- a/pkg/validate/validate.go +++ b/pkg/validate/validate.go @@ -124,10 +124,10 @@ func ProfileSchema(p *crv1alpha1.Profile) error { if p.Credential.Type != crv1alpha1.CredentialTypeKeyPair { return errorf("unknown or unsupported credential type '%s'", p.Credential.Type) } - if p.Location.S3Compliant.Bucket == "" { + if p.Location.Bucket == "" { return errorf("S3 bucket not specified") } - if p.Location.S3Compliant.Endpoint == "" && p.Location.S3Compliant.Region == "" { + if p.Location.Endpoint == "" && p.Location.Region == "" { return errorf("S3 bucket region not specified") } if p.Credential.KeyPair.Secret.Name == "" { @@ -140,8 +140,8 @@ func ProfileSchema(p *crv1alpha1.Profile) error { } func ProfileBucket(ctx context.Context, p *crv1alpha1.Profile) error { - bucketName := p.Location.S3Compliant.Bucket - givenRegion := p.Location.S3Compliant.Region + bucketName := p.Location.Bucket + givenRegion := p.Location.Region if givenRegion != "" { actualRegion, err := objectstore.GetS3BucketRegion(ctx, bucketName, givenRegion) if err != nil { @@ -165,19 +165,19 @@ func ReadAccess(ctx context.Context, p *crv1alpha1.Profile, cli kubernetes.Inter } pc := objectstore.ProviderConfig{ Type: objectstore.ProviderTypeS3, - Endpoint: p.Location.S3Compliant.Endpoint, + Endpoint: p.Location.Endpoint, SkipSSLVerify: p.SkipSSLVerify, } provider, err := objectstore.NewProvider(ctx, pc, secret) if err != nil { return err } - bucket, err := provider.GetBucket(ctx, p.Location.S3Compliant.Bucket) + bucket, err := provider.GetBucket(ctx, p.Location.Bucket) if err != nil { return err } if _, err := bucket.ListDirectories(ctx); err != nil { - return errorf("failed to list directories in bucket '%s'", p.Location.S3Compliant.Bucket) + return errorf("failed to list directories in bucket '%s'", p.Location.Bucket) } return nil } @@ -195,23 +195,23 @@ func WriteAccess(ctx context.Context, p *crv1alpha1.Profile, cli kubernetes.Inte } pc := objectstore.ProviderConfig{ Type: objectstore.ProviderTypeS3, - Endpoint: p.Location.S3Compliant.Endpoint, + Endpoint: p.Location.Endpoint, SkipSSLVerify: p.SkipSSLVerify, } provider, err := objectstore.NewProvider(ctx, pc, secret) if err != nil { return err } - bucket, err := provider.GetBucket(ctx, p.Location.S3Compliant.Bucket) + bucket, err := provider.GetBucket(ctx, p.Location.Bucket) if err != nil { return err } data := []byte("sample content") if err := bucket.PutBytes(ctx, objName, data, nil); err != nil { - return errorf("failed to write contents to bucket '%s'", p.Location.S3Compliant.Bucket) + return errorf("failed to write contents to bucket '%s'", p.Location.Bucket) } if err := bucket.Delete(ctx, objName); err != nil { - return errorf("failed to delete contents in bucket '%s'", p.Location.S3Compliant.Bucket) + return errorf("failed to delete contents in bucket '%s'", p.Location.Bucket) } return nil }