Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

backend-storage/backups/tag-on-upload #281

Merged
merged 5 commits into from
Nov 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ kind-deploy-backup-assets: export KUBECONFIG = $(PWD)/kubeconfig
kind-deploy-backup-assets: kind-load-redis-with-ssh
$(KUSTOMIZE) build config/test/redis-backups --load-restrictor LoadRestrictionsNone --enable-helm | kubectl apply -f -

REDIS_WITH_SSH_IMG = redis-with-ssh:4.0.11-alpine
REDIS_WITH_SSH_IMG = redis-with-ssh:6.2.13-alpine
kind-load-redis-with-ssh:
docker build -t $(REDIS_WITH_SSH_IMG) test/assets/redis-with-ssh
$(KIND) load docker-image $(REDIS_WITH_SSH_IMG)
Expand Down
26 changes: 26 additions & 0 deletions examples/backend/redis-v6/backup.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
apiVersion: saas.3scale.net/v1alpha1
kind: ShardedRedisBackup
metadata:
name: backup
namespace: default
spec:
timeout: 5m
schedule: "* * * * *"
sentinelRef: sentinel
historyLimit: 2
pollInterval: 10s
dbFile: /data/dump.rdb
sshOptions:
privateKeySecretRef:
name: redis-backup-ssh-private-key
user: docker
sudo: true
port: 2222
s3Options:
bucket: my-bucket
path: backups
region: us-east-1
credentialsSecretRef:
name: aws-credentials
serviceEndpoint: http://minio.default.svc.cluster.local:9000
pause: false
13 changes: 8 additions & 5 deletions examples/backend/redis-v6/redis.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,20 @@ kind: RedisShard
metadata:
name: shard01
spec:
image:
tag: 6.2.13-alpine
command: /entrypoint.sh
image:
name: redis-with-ssh
tag: 6.2.13-alpine
---
apiVersion: saas.3scale.net/v1alpha1
kind: RedisShard
metadata:
name: shard02
spec:
image:
tag: 6.2.13-alpine

command: /entrypoint.sh
image:
name: redis-with-ssh
tag: 6.2.13-alpine
---
apiVersion: saas.3scale.net/v1alpha1
kind: RedisShard
Expand Down
46 changes: 46 additions & 0 deletions pkg/redis/backup/check.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
package backup

import (
"context"
"fmt"
"sort"

"github.com/3scale/saas-operator/pkg/util"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"sigs.k8s.io/controller-runtime/pkg/log"
)

func (br *Runner) CheckBackup(ctx context.Context) error {
logger := log.FromContext(ctx, "function", "(br *Runner) CheckBackup()")

awsconfig, err := util.AWSConfig(ctx, br.AWSAccessKeyID, br.AWSSecretAccessKey, br.AWSRegion, br.AWSS3Endpoint)
if err != nil {
return err
}

client := s3.NewFromConfig(*awsconfig)

// get backups of current hour
hourResult, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(br.S3Bucket),
Prefix: aws.String(br.S3Path + "/" + br.BackupFileBaseNameWithTimeSuffix(br.Timestamp.Format("2006-01-02T15"))),
})
if err != nil {
return err
}
sort.SliceStable(hourResult.Contents, func(i, j int) bool {
return hourResult.Contents[i].LastModified.Before(*hourResult.Contents[j].LastModified)
})

latest := hourResult.Contents[len(hourResult.Contents)-1]
if br.BackupFileS3Path() != *latest.Key {
err := fmt.Errorf("latest backup %s has different key than expected (%s)", *latest.Key, br.BackupFileS3Path())
logger.Error(err, "unable to find backup s3")
return err
}
// store backup size
br.status.BackupSize = latest.Size

return nil
}
2 changes: 1 addition & 1 deletion pkg/redis/backup/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (br *Runner) Start(parentCtx context.Context, l logr.Logger) error {
errCh <- err
return
}
if err := br.TagBackup(ctx); err != nil {
if err := br.CheckBackup(ctx); err != nil {
errCh <- err
return
}
Expand Down
182 changes: 118 additions & 64 deletions pkg/redis/backup/s3upload.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
package backup

import (
"bytes"
"context"
"fmt"
"net"
"net/url"
"path"
"strconv"
"strings"
"path/filepath"
"text/template"
"time"

"github.com/3scale/saas-operator/pkg/ssh"
"github.com/3scale/saas-operator/pkg/util"
"golang.org/x/crypto/ssh"
"github.com/MakeNowJust/heredoc"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"sigs.k8s.io/controller-runtime/pkg/log"
)

Expand All @@ -19,6 +23,14 @@ const (
backupFileExtension string = "rdb"
)

type Retention string

const (
Retention90d Retention = "90d"
Retention7d Retention = "7d"
Retention24h Retention = "24h"
)

func (br *Runner) BackupFileBaseName() string {
return fmt.Sprintf("%s_%s", backupFilePrefix, br.ShardName)
}
Expand All @@ -45,86 +57,128 @@ func (br *Runner) BackupFileS3Path() string {
func (br *Runner) UploadBackup(ctx context.Context) error {
logger := log.FromContext(ctx, "function", "(br *Runner) UploadBackup()")

var awsBaseCommand string
if br.AWSS3Endpoint != nil {
awsBaseCommand = strings.Join([]string{"aws", "--endpoint-url", *br.AWSS3Endpoint}, " ")
} else {
awsBaseCommand = "aws"
uploadScript, err := br.uploadScript(ctx)
if err != nil {
return err
}

var commands = []string{
// mv /data/dump.rdb /data/redis-backup-<shard>-<server>-<timestamp>.rdb
fmt.Sprintf("mv %s %s/%s",
br.RedisDBFile,
path.Dir(br.RedisDBFile), br.BackupFile(),
),
// gzip /data/redis-backup-<shard>-<server>-<timestamp>.rdb
fmt.Sprintf("gzip -1 %s/%s", path.Dir(br.RedisDBFile), br.BackupFile()),
// AWS_ACCESS_KEY_ID=*** AWS_SECRET_ACCESS_KEY=*** s3cmd put /data/redis-backup-<shard>-<server>-<timestamp>.rdb s3://<bucket>/<path>/redis-backup-<shard>-<server>-<timestamp>.rdb
fmt.Sprintf("%s=%s %s=%s %s=%s %s s3 cp --only-show-errors %s/%s s3://%s/%s/%s",
util.AWSRegionEnvvar, br.AWSRegion,
util.AWSAccessKeyEnvvar, br.AWSAccessKeyID,
util.AWSSecretKeyEnvvar, br.AWSSecretAccessKey,
awsBaseCommand,
path.Dir(br.RedisDBFile), br.BackupFileCompressed(),
br.S3Bucket, br.S3Path, br.BackupFileCompressed(),
),
fmt.Sprintf("rm -f %s/%s*", path.Dir(br.RedisDBFile), br.BackupFileBaseName()),
remoteExec := ssh.RemoteExecutor{
Host: br.Server.GetHost(),
User: br.SSHUser,
Port: br.SSHPort,
PrivateKey: br.SSHKey,
Logger: logger,
CmdTimeout: 0,
Commands: []ssh.Runnable{
ssh.NewCommand(fmt.Sprintf("mv %s %s/%s", br.RedisDBFile, path.Dir(br.RedisDBFile), br.BackupFile())),
ssh.NewCommand(fmt.Sprintf("gzip -1 %s/%s", path.Dir(br.RedisDBFile), br.BackupFile())),
ssh.NewScript(fmt.Sprintf("%s=%s %s=%s %s=%s python -",
util.AWSRegionEnvvar, br.AWSRegion,
util.AWSAccessKeyEnvvar, br.AWSAccessKeyID,
util.AWSSecretKeyEnvvar, br.AWSSecretAccessKey),
uploadScript,
br.AWSSecretAccessKey,
),
ssh.NewCommand(fmt.Sprintf("rm -f %s/%s*", path.Dir(br.RedisDBFile), br.BackupFileBaseName())),
},
}

for _, command := range commands {
if br.SSHSudo {
command = "sudo " + command
}
logger.V(1).Info(br.hideSensitive(fmt.Sprintf("running command '%s' on %s:%d", command, br.Server.GetHost(), br.SSHPort)))
output, err := remoteRun(ctx, br.SSHUser, br.Server.GetHost(), strconv.Itoa(int(br.SSHPort)), br.SSHKey, command)
if output != "" {
logger.V(1).Info(fmt.Sprintf("remote ssh command output: %s", output))
}
if err != nil {
logger.V(1).Info(fmt.Sprintf("remote ssh command error: %s", err.Error()))
return fmt.Errorf("remote ssh command failed: %w (%s)", err, output)
}
err = remoteExec.Run()
if err != nil {
return err
}

return nil
}

// e.g. output, err := remoteRun(ctx, "root", "MY_IP", "MY_PORT", "PRIVATE_KEY", "ls")
func remoteRun(ctx context.Context, user, addr, port, privateKey, cmd string) (string, error) {
func (br *Runner) resolveTags(ctx context.Context) (string, error) {
logger := log.FromContext(ctx, "function", "(br *Runner) ResolveTags()")
var retention Retention

key, err := ssh.ParsePrivateKey([]byte(privateKey))
awsconfig, err := util.AWSConfig(ctx, br.AWSAccessKeyID, br.AWSSecretAccessKey, br.AWSRegion, br.AWSS3Endpoint)
if err != nil {
return "", err
return "{}", err
}
// Authentication
config := &ssh.ClientConfig{
User: user,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Auth: []ssh.AuthMethod{
ssh.PublicKeys(key),
},

client := s3.NewFromConfig(*awsconfig)

// get backups of current day
dayResult, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(br.S3Bucket),
Prefix: aws.String(br.S3Path + "/" + br.BackupFileBaseNameWithTimeSuffix(br.Timestamp.Format("2006-01-02"))),
})
if err != nil {
return "{}", err
}
client, err := ssh.Dial("tcp", net.JoinHostPort(addr, port), config)

// get backups of current hour
hourResult, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(br.S3Bucket),
Prefix: aws.String(br.S3Path + "/" + br.BackupFileBaseNameWithTimeSuffix(br.Timestamp.Format("2006-01-02T15"))),
})
if err != nil {
return "", err
return "{}", err
}

if len(dayResult.Contents) == 0 {
retention = Retention90d
logger.V(1).Info("backup tagged with 90d retention")
} else if len(hourResult.Contents) == 0 {
retention = Retention7d
logger.V(1).Info("backup tagged with 7d retention")
} else {
retention = Retention24h
logger.V(1).Info("backup tagged with 24h retention")
}
defer client.Close()

// Create a session. It is one session per command.
session, err := client.NewSession()
tags := url.Values{
"Layer": []string{"bck-storage"},
"App": []string{"Backend"},
"Shard": []string{br.ShardName},
"HostAddress": []string{br.Server.ID()},
"HostAlias": []string{br.Server.GetAlias()},
"Retention": []string{string(retention)},
}

return tags.Encode(), nil
}

func (br *Runner) uploadScript(ctx context.Context) (string, error) {
tags, err := br.resolveTags(ctx)
if err != nil {
return "", err
}
defer session.Close()

output, err := session.CombinedOutput(cmd)
return string(output), err
}
scriptTemplate := heredoc.Doc(`
import boto3
session = boto3.session.Session()
s3 = session.client(service_name="s3"{{if .Endpoint}},endpoint_url="{{.Endpoint}}"{{end}})
s3.upload_file(
"{{.File}}",
"{{.Bucket}}",
"{{.Key}}",
ExtraArgs={"Tagging": "{{.Tags}}"},
)
`)

templateVars := struct {
File, Bucket, Key, Endpoint, Tags string
}{
File: filepath.Join(path.Dir(br.RedisDBFile), br.BackupFileCompressed()),
Bucket: br.S3Bucket,
Key: filepath.Join(br.S3Path, br.BackupFileCompressed()),
Tags: tags,
}
if br.AWSS3Endpoint != nil {
templateVars.Endpoint = *br.AWSS3Endpoint
}

func (br *Runner) hideSensitive(msg string) string {
for _, ss := range []string{br.AWSSecretAccessKey, br.SSHKey} {
msg = strings.ReplaceAll(msg, ss, "*****")
t := template.Must(template.New("script").Parse(scriptTemplate))
script := new(bytes.Buffer)
err = t.Execute(script, templateVars)
if err != nil {
return "", err
}
return msg

return script.String(), nil
}
Loading
Loading