diff --git a/Gopkg.lock b/Gopkg.lock index f839403a4..af2575304 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -6,6 +6,18 @@ packages = ["compute/metadata","internal"] revision = "3b1ae45394a234c385be014e9a488f2bb6eef821" +[[projects]] + name = "github.com/Azure/azure-sdk-for-go" + packages = ["storage"] + revision = "2d1d76c9013c4feb6695a2346f0e66ea0ef77aa6" + version = "v11.3.0-beta" + +[[projects]] + name = "github.com/Azure/go-autorest" + packages = ["autorest","autorest/adal","autorest/azure","autorest/date"] + revision = "809ed2ef5c4c9a60c3c2f3aa9cc11f3a7c2ce59d" + version = "v9.6.0" + [[projects]] name = "github.com/PuerkitoBio/purell" packages = ["."] @@ -50,6 +62,12 @@ packages = ["spew"] revision = "782f4967f2dc4564575ca782fe2d04090b5faca8" +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29" + version = "v3.1.0" + [[projects]] name = "github.com/emicklei/go-restful" packages = [".","log"] @@ -231,6 +249,12 @@ packages = [".","xfs"] revision = "65c1f6f8f0fc1e2185eb9863a3bc751496404259" +[[projects]] + name = "github.com/satori/uuid" + packages = ["."] + revision = "879c5887cd475cd7864858769793b2ceb0d44feb" + version = "v1.1.0" + [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] @@ -328,6 +352,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "bc96c486c694ef6b8f178f9290a6e5aa4f41003968a3b9e62e289d67408b9b46" + inputs-digest = "348ce70c891e3e31857fd45276c70cf2f54e6a63b76b889ee79525d8e8ea86a0" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 2e251afbd..857bc0599 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -40,3 +40,7 @@ [[constraint]] name = "golang.org/x/time" + +[[constraint]] + name = "github.com/Azure/azure-sdk-for-go" + version = "v11.3.0-beta" diff --git a/pkg/apis/etcd/v1beta2/backup_types.go b/pkg/apis/etcd/v1beta2/backup_types.go index e11f6b695..3802ef9f5 100644 --- a/pkg/apis/etcd/v1beta2/backup_types.go +++ b/pkg/apis/etcd/v1beta2/backup_types.go @@ -17,10 +17,15 @@ package v1beta2 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" const ( - BackupStorageTypeS3 BackupStorageType = "S3" + // AWS S3 related consts + BackupStorageTypeS3 BackupStorageType = "S3" + AWSSecretCredentialsFileName = "credentials" + AWSSecretConfigFileName = "config" - AWSSecretCredentialsFileName = "credentials" - AWSSecretConfigFileName = "config" + // Azure ABS related consts + BackupStorageTypeABS BackupStorageType = "ABS" + AzureSecretStorageAccount = "storage-account" + AzureSecretStorageKey = "storage-key" ) type BackupStorageType string @@ -71,6 +76,9 @@ type BackupSpec struct { type BackupSource struct { // S3 defines the S3 backup source spec. S3 *S3BackupSource `json:"s3,omitempty"` + + // ABS defines the ABS backup source spec. + ABS *ABSBackupSource `json:"abs,omitempty"` } // BackupStatus represents the status of the EtcdBackup Custom Resource. @@ -104,3 +112,14 @@ type S3BackupSource struct { // stores. Endpoint string `json:"endpoint,omitempty"` } + +// ABSBackupSource provides the spec how to store backups on ABS. +type ABSBackupSource struct { + // Path is the full abs path where the backup is saved. + // The format of the path must be: "/" + // e.g: "myabscontainer/etcd.backup" + Path string `json:"path"` + + // The name of the secret object that stores the Azure storage credential + ABSSecret string `json:"absSecret"` +} diff --git a/pkg/apis/etcd/v1beta2/restore_types.go b/pkg/apis/etcd/v1beta2/restore_types.go index 491b0d5d2..2e82be47a 100644 --- a/pkg/apis/etcd/v1beta2/restore_types.go +++ b/pkg/apis/etcd/v1beta2/restore_types.go @@ -63,6 +63,9 @@ type EtcdClusterRef struct { type RestoreSource struct { // S3 tells where on S3 the backup is saved and how to fetch the backup. S3 *S3RestoreSource `json:"s3,omitempty"` + + // ABS tells where on ABS the backup is saved and how to fetch the backup. + ABS *ABSRestoreSource `json:"abs,omitempty"` } type S3RestoreSource struct { @@ -84,6 +87,16 @@ type S3RestoreSource struct { Endpoint string `json:"endpoint"` } +type ABSRestoreSource struct { + // Path is the full abs path where the backup is saved. + // The format of the path must be: "/" + // e.g: "myabscontainer/etcd.backup" + Path string `json:"path"` + + // The name of the secret object that stores the Azure Blob Storage credential. + ABSSecret string `json:"absSecret"` +} + // RestoreStatus reports the status of this restore operation. type RestoreStatus struct { // Succeeded indicates if the backup has Succeeded. diff --git a/pkg/apis/etcd/v1beta2/zz_generated.deepcopy.go b/pkg/apis/etcd/v1beta2/zz_generated.deepcopy.go index 3957a5b27..60be479c3 100644 --- a/pkg/apis/etcd/v1beta2/zz_generated.deepcopy.go +++ b/pkg/apis/etcd/v1beta2/zz_generated.deepcopy.go @@ -32,6 +32,14 @@ import ( // Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { return []conversion.GeneratedDeepCopyFunc{ + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ABSBackupSource).DeepCopyInto(out.(*ABSBackupSource)) + return nil + }, InType: reflect.TypeOf(&ABSBackupSource{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ABSRestoreSource).DeepCopyInto(out.(*ABSRestoreSource)) + return nil + }, InType: reflect.TypeOf(&ABSRestoreSource{})}, {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { in.(*BackupSource).DeepCopyInto(out.(*BackupSource)) return nil @@ -131,6 +139,38 @@ func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ABSBackupSource) DeepCopyInto(out *ABSBackupSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ABSBackupSource. +func (in *ABSBackupSource) DeepCopy() *ABSBackupSource { + if in == nil { + return nil + } + out := new(ABSBackupSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ABSRestoreSource) DeepCopyInto(out *ABSRestoreSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ABSRestoreSource. +func (in *ABSRestoreSource) DeepCopy() *ABSRestoreSource { + if in == nil { + return nil + } + out := new(ABSRestoreSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSource) DeepCopyInto(out *BackupSource) { *out = *in @@ -143,6 +183,15 @@ func (in *BackupSource) DeepCopyInto(out *BackupSource) { **out = **in } } + if in.ABS != nil { + in, out := &in.ABS, &out.ABS + if *in == nil { + *out = nil + } else { + *out = new(ABSBackupSource) + **out = **in + } + } return } @@ -597,6 +646,15 @@ func (in *RestoreSource) DeepCopyInto(out *RestoreSource) { **out = **in } } + if in.ABS != nil { + in, out := &in.ABS, &out.ABS + if *in == nil { + *out = nil + } else { + *out = new(ABSRestoreSource) + **out = **in + } + } return } diff --git a/pkg/backup/backupapi/api.go b/pkg/backup/backupapi/api.go deleted file mode 100644 index 26e4e6c69..000000000 --- a/pkg/backup/backupapi/api.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The etcd-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backupapi - -import "path" - -const ( - APIV1 = "/v1" - // S3V1 indicates the version 1 of - // S3 backup format: //"v1"// - S3V1 = "v1" -) - -// ToS3Prefix concatenates s3Prefix, S3V1, namespace, clusterName to a single s3 prefix. -// the concatenated prefix determines the location of S3 backup files. -func ToS3Prefix(s3Prefix, namespace, clusterName string) string { - return path.Join(s3Prefix, S3V1, namespace, clusterName) -} diff --git a/pkg/backup/backupapi/http.go b/pkg/backup/backupapi/http.go index 20e679bb1..902482ff1 100644 --- a/pkg/backup/backupapi/http.go +++ b/pkg/backup/backupapi/http.go @@ -19,6 +19,10 @@ import ( "path" ) +const ( + APIV1 = "/v1" +) + // BackupURLForRestore creates a URL struct for retrieving an existing backup specified by a restore CR func BackupURLForRestore(scheme, host, restoreName string) *url.URL { return &url.URL{ diff --git a/pkg/backup/reader/abs_reader.go b/pkg/backup/reader/abs_reader.go new file mode 100644 index 000000000..dcb51b0a9 --- /dev/null +++ b/pkg/backup/reader/abs_reader.go @@ -0,0 +1,58 @@ +// Copyright 2017 The etcd-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reader + +import ( + "fmt" + "io" + + "github.com/coreos/etcd-operator/pkg/backup/util" + + "github.com/Azure/azure-sdk-for-go/storage" +) + +// ensure absReader satisfies reader interface. +var _ Reader = &absReader{} + +// absReader provides Reader implementation for reading a file from ABS +type absReader struct { + abs *storage.BlobStorageClient +} + +// NewABSReader return a Reader implementation to read a file from ABS in the form of absReader +func NewABSReader(abs *storage.BlobStorageClient) Reader { + return &absReader{abs} +} + +// Open opens the file on path where path must be in the format "/" +func (absr *absReader) Open(path string) (io.ReadCloser, error) { + container, key, err := util.ParseBucketAndKey(path) + if err != nil { + return nil, fmt.Errorf("failed to parse abs container and key: %v", err) + } + + containerRef := absr.abs.GetContainerReference(container) + containerExists, err := containerRef.Exists() + if err != nil { + return nil, err + } + + if !containerExists { + return nil, fmt.Errorf("container %v does not exist", container) + } + + blob := containerRef.GetBlobReference(key) + return blob.Get(&storage.GetBlobOptions{}) +} diff --git a/pkg/backup/writer/abs_writer.go b/pkg/backup/writer/abs_writer.go new file mode 100644 index 000000000..5b9d3398b --- /dev/null +++ b/pkg/backup/writer/abs_writer.go @@ -0,0 +1,100 @@ +// Copyright 2017 The etcd-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writer + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + + "github.com/coreos/etcd-operator/pkg/backup/util" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/pborman/uuid" +) + +var _ Writer = &absWriter{} + +type absWriter struct { + abs *storage.BlobStorageClient +} + +// NewABSWriter creates a abs writer. +func NewABSWriter(abs *storage.BlobStorageClient) Writer { + return &absWriter{abs} +} + +const ( + // AzureBlobBlockChunkLimitInBytes 100MiB is the limit + AzureBlobBlockChunkLimitInBytes = 100 * 1024 * 1024 +) + +// Write writes the backup file to the given abs path, "/". +func (absw *absWriter) Write(path string, r io.Reader) (int64, error) { + container, key, err := util.ParseBucketAndKey(path) + if err != nil { + return 0, err + } + + containerRef := absw.abs.GetContainerReference(container) + containerExists, err := containerRef.Exists() + if err != nil { + return 0, err + } + + if !containerExists { + return 0, fmt.Errorf("container %v does not exist", container) + } + + blob := containerRef.GetBlobReference(key) + err = blob.CreateBlockBlob(&storage.PutBlobOptions{}) + if err != nil { + return 0, err + } + + buf := new(bytes.Buffer) + buf.ReadFrom(r) + len := len(buf.Bytes()) + chunckCount := len/AzureBlobBlockChunkLimitInBytes + 1 + blocks := make([]storage.Block, 0, chunckCount) + for i := 0; i < chunckCount; i++ { + blockID := base64.StdEncoding.EncodeToString([]byte(uuid.New())) + blocks = append(blocks, storage.Block{ID: blockID, Status: storage.BlockStatusLatest}) + start := i * AzureBlobBlockChunkLimitInBytes + end := (i + 1) * AzureBlobBlockChunkLimitInBytes + if len < end { + end = len + } + + chunk := buf.Bytes()[start:end] + err = blob.PutBlock(blockID, chunk, &storage.PutBlockOptions{}) + if err != nil { + return 0, err + } + } + + err = blob.PutBlockList(blocks, &storage.PutBlockListOptions{}) + if err != nil { + return 0, err + } + + _, err = blob.Get(&storage.GetBlobOptions{}) + if err != nil { + return 0, err + } + + return blob.Properties.ContentLength, nil +} diff --git a/pkg/controller/backup-operator/abs_backup.go b/pkg/controller/backup-operator/abs_backup.go new file mode 100644 index 000000000..1d339e20b --- /dev/null +++ b/pkg/controller/backup-operator/abs_backup.go @@ -0,0 +1,47 @@ +// Copyright 2017 The etcd-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "crypto/tls" + "fmt" + + api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2" + "github.com/coreos/etcd-operator/pkg/backup" + "github.com/coreos/etcd-operator/pkg/backup/writer" + "github.com/coreos/etcd-operator/pkg/util/azureutil/absfactory" + + "k8s.io/client-go/kubernetes" +) + +// handleABS saves etcd cluster's backup to specificed ABS path. +func handleABS(kubecli kubernetes.Interface, s *api.ABSBackupSource, endpoints []string, clientTLSSecret, namespace string) (*api.BackupStatus, error) { + cli, err := absfactory.NewClientFromSecret(kubecli, namespace, s.ABSSecret) + if err != nil { + return nil, err + } + + var tlsConfig *tls.Config + if tlsConfig, err = generateTLSConfig(kubecli, clientTLSSecret, namespace); err != nil { + return nil, err + } + + bm := backup.NewBackupManagerFromWriter(kubecli, writer.NewABSWriter(cli.ABS), tlsConfig, endpoints, namespace) + rev, etcdVersion, err := bm.SaveSnap(s.Path) + if err != nil { + return nil, fmt.Errorf("failed to save snapshot (%v)", err) + } + return &api.BackupStatus{EtcdVersion: etcdVersion, EtcdRevision: rev}, nil +} diff --git a/pkg/controller/backup-operator/s3_backup.go b/pkg/controller/backup-operator/s3_backup.go index e2cbba476..48bfd66a1 100644 --- a/pkg/controller/backup-operator/s3_backup.go +++ b/pkg/controller/backup-operator/s3_backup.go @@ -22,8 +22,6 @@ import ( "github.com/coreos/etcd-operator/pkg/backup" "github.com/coreos/etcd-operator/pkg/backup/writer" "github.com/coreos/etcd-operator/pkg/util/awsutil/s3factory" - "github.com/coreos/etcd-operator/pkg/util/etcdutil" - "github.com/coreos/etcd-operator/pkg/util/k8sutil" "k8s.io/client-go/kubernetes" ) @@ -38,15 +36,8 @@ func handleS3(kubecli kubernetes.Interface, s *api.S3BackupSource, endpoints []s defer cli.Close() var tlsConfig *tls.Config - if len(clientTLSSecret) != 0 { - d, err := k8sutil.GetTLSDataFromSecret(kubecli, namespace, clientTLSSecret) - if err != nil { - return nil, fmt.Errorf("failed to get TLS data from secret (%v): %v", clientTLSSecret, err) - } - tlsConfig, err = etcdutil.NewTLSConfig(d.CertData, d.KeyData, d.CAData) - if err != nil { - return nil, fmt.Errorf("failed to constructs tls config: %v", err) - } + if tlsConfig, err = generateTLSConfig(kubecli, clientTLSSecret, namespace); err != nil { + return nil, err } bm := backup.NewBackupManagerFromWriter(kubecli, writer.NewS3Writer(cli.S3), tlsConfig, endpoints, namespace) diff --git a/pkg/controller/backup-operator/sync.go b/pkg/controller/backup-operator/sync.go index c732c59bc..6b439193a 100644 --- a/pkg/controller/backup-operator/sync.go +++ b/pkg/controller/backup-operator/sync.go @@ -119,6 +119,12 @@ func (b *Backup) handleBackup(spec *api.BackupSpec) (*api.BackupStatus, error) { return nil, err } return bs, nil + case api.BackupStorageTypeABS: + bs, err := handleABS(b.kubecli, spec.ABS, spec.EtcdEndpoints, spec.ClientTLSSecret, b.namespace) + if err != nil { + return nil, err + } + return bs, nil default: logrus.Fatalf("unknown StorageType: %v", spec.StorageType) } diff --git a/pkg/controller/backup-operator/util.go b/pkg/controller/backup-operator/util.go new file mode 100644 index 000000000..5cceba083 --- /dev/null +++ b/pkg/controller/backup-operator/util.go @@ -0,0 +1,40 @@ +// Copyright 2017 The etcd-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "crypto/tls" + "fmt" + + "github.com/coreos/etcd-operator/pkg/util/etcdutil" + "github.com/coreos/etcd-operator/pkg/util/k8sutil" + + "k8s.io/client-go/kubernetes" +) + +func generateTLSConfig(kubecli kubernetes.Interface, clientTLSSecret, namespace string) (*tls.Config, error) { + var tlsConfig *tls.Config + if len(clientTLSSecret) != 0 { + d, err := k8sutil.GetTLSDataFromSecret(kubecli, namespace, clientTLSSecret) + if err != nil { + return nil, fmt.Errorf("failed to get TLS data from secret (%v): %v", clientTLSSecret, err) + } + tlsConfig, err = etcdutil.NewTLSConfig(d.CertData, d.KeyData, d.CAData) + if err != nil { + return nil, fmt.Errorf("failed to constructs tls config: %v", err) + } + } + return tlsConfig, nil +} diff --git a/pkg/controller/restore-operator/http.go b/pkg/controller/restore-operator/http.go index 319695235..80724e86f 100644 --- a/pkg/controller/restore-operator/http.go +++ b/pkg/controller/restore-operator/http.go @@ -24,6 +24,7 @@ import ( "github.com/coreos/etcd-operator/pkg/backup/backupapi" "github.com/coreos/etcd-operator/pkg/backup/reader" "github.com/coreos/etcd-operator/pkg/util/awsutil/s3factory" + "github.com/coreos/etcd-operator/pkg/util/azureutil/absfactory" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -98,6 +99,24 @@ func (r *Restore) serveBackup(w http.ResponseWriter, req *http.Request) error { backupReader = reader.NewS3Reader(s3Cli.S3) path = s3RestoreSource.Path + case api.BackupStorageTypeABS: + restoreSource := cr.Spec.RestoreSource + if restoreSource.ABS == nil { + return errors.New("empty abs restore source") + } + absRestoreSource := restoreSource.ABS + if len(absRestoreSource.ABSSecret) == 0 || len(absRestoreSource.Path) == 0 { + return errors.New("invalid abs restore source field (spec.abs), must specify all required subfields") + } + + absCli, err := absfactory.NewClientFromSecret(r.kubecli, r.namespace, absRestoreSource.ABSSecret) + if err != nil { + return fmt.Errorf("failed to create ABS client: %v", err) + } + // Nothing to Close for absCli yet + + backupReader = reader.NewABSReader(absCli.ABS) + path = absRestoreSource.Path default: return fmt.Errorf("unknown backup storage type (%s) for restore CR (%v)", cr.Spec.BackupStorageType, restoreName) } diff --git a/pkg/util/azureutil/absfactory/client.go b/pkg/util/azureutil/absfactory/client.go new file mode 100644 index 000000000..b9260afb2 --- /dev/null +++ b/pkg/util/azureutil/absfactory/client.go @@ -0,0 +1,57 @@ +// Copyright 2017 The etcd-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package absfactory + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/storage" + api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// ABSClient is a wrapper of ABS client that provides cleanup functionality. +type ABSClient struct { + ABS *storage.BlobStorageClient +} + +// NewClientFromSecret returns a ABS client based on given k8s secret containing azure credentials. +func NewClientFromSecret(kubecli kubernetes.Interface, namespace, absSecret string) (w *ABSClient, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("new ABS client failed: %v", err) + } + }() + + se, err := kubecli.CoreV1().Secrets(namespace).Get(absSecret, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get k8s secret: %v", err) + } + + storageAccount := se.Data[api.AzureSecretStorageAccount] + storageKey := se.Data[api.AzureSecretStorageKey] + + bc, err := storage.NewBasicClient( + string(storageAccount), + string(storageKey)) + if err != nil { + return nil, fmt.Errorf("failed to create Azure storage client: %v", err) + } + + abs := bc.GetBlobService() + return &ABSClient{ABS: &abs}, nil +}