Skip to content

Commit

Permalink
BackupController: do as much as possible
Browse files Browse the repository at this point in the history
When running a backup, try to do as much as possible, collecting errors
along the way, and return an aggregate at the end. This way, if a backup
fails for most reasons, we'll be able to upload the backup log file to
object storage, which wasn't happening before.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
  • Loading branch information
ncdc committed Dec 14, 2017
1 parent 6d5eeb2 commit 6740070
Show file tree
Hide file tree
Showing 2 changed files with 81 additions and 54 deletions.
29 changes: 19 additions & 10 deletions pkg/cloudprovider/backup_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,25 @@ func NewBackupService(objectStore ObjectStore, logger *logrus.Logger) BackupServ
}

func (br *backupService) UploadBackup(bucket, backupName string, metadata, backup, log io.Reader) error {
if log != nil {
// Uploading the log file is best-effort; if it fails, we log the error but it doesn't impact the
// backup's status.
logKey := getBackupLogKey(backupName)
if err := br.objectStore.PutObject(bucket, logKey, log); err != nil {
br.logger.WithError(err).WithFields(logrus.Fields{
"bucket": bucket,
"key": logKey,
}).Error("Error uploading log file")
}
}

if metadata == nil || backup == nil {
// If we don't have metadata or backup, something failed, and there's no point in continuing. An object
// storage bucket that is missing either the metadata file or the backup tarball can't be
// restored.
return nil
}

// upload metadata file
metadataKey := getMetadataKey(backupName)
if err := br.objectStore.PutObject(bucket, metadataKey, metadata); err != nil {
Expand All @@ -132,16 +151,6 @@ func (br *backupService) UploadBackup(bucket, backupName string, metadata, backu
return kerrors.NewAggregate([]error{err, deleteErr})
}

// uploading log file is best-effort; if it fails, we log the error but call the overall upload a
// success
logKey := getBackupLogKey(backupName)
if err := br.objectStore.PutObject(bucket, logKey, log); err != nil {
br.logger.WithError(err).WithFields(logrus.Fields{
"bucket": bucket,
"key": logKey,
}).Error("Error uploading log file")
}

return nil
}

Expand Down
106 changes: 62 additions & 44 deletions pkg/controller/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"sync"
Expand All @@ -30,7 +31,7 @@ import (

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
kuberrs "k8s.io/apimachinery/pkg/util/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
Expand Down Expand Up @@ -285,71 +286,88 @@ func (controller *backupController) getValidationErrors(itm *api.Backup) []strin
return validationErrors
}

func (controller *backupController) runBackup(backup *api.Backup, bucket string) error {
backupFile, err := ioutil.TempFile("", "")
if err != nil {
return errors.Wrap(err, "error creating temp file for Backup")
func closeAndRemoveFile(file *os.File, log logrus.FieldLogger) {
if err := file.Close(); err != nil {
log.WithError(err).WithField("file", file.Name()).Error("error closing file")
}
if err := os.Remove(file.Name()); err != nil {
log.WithError(err).WithField("file", file.Name()).Error("error removing file")
}
}

func (controller *backupController) runBackup(backup *api.Backup, bucket string) error {
log := controller.logger.WithField("backup", kubeutil.NamespaceAndName(backup))

logFile, err := ioutil.TempFile("", "")
if err != nil {
return errors.Wrap(err, "error creating temp file for Backup log")
return errors.Wrap(err, "error creating temp file for backup log")
}
defer closeAndRemoveFile(logFile, log)

defer func() {
var errs []error
// TODO should this be wrapped?
errs = append(errs, err)

if err := backupFile.Close(); err != nil {
errs = append(errs, errors.Wrap(err, "error closing Backup temp file"))
}
log.Info("starting backup")

if err := os.Remove(backupFile.Name()); err != nil {
errs = append(errs, errors.Wrap(err, "error removing Backup temp file"))
}

if err := logFile.Close(); err != nil {
errs = append(errs, errors.Wrap(err, "error closing Backup log temp file"))
}

if err := os.Remove(logFile.Name()); err != nil {
errs = append(errs, errors.Wrap(err, "error removing Backup log temp file"))
}

err = kuberrs.NewAggregate(errs)
}()
backupFile, err := ioutil.TempFile("", "")
if err != nil {
return errors.Wrap(err, "error creating temp file for backup")
}
defer closeAndRemoveFile(backupFile, log)

actions, err := controller.pluginManager.GetBackupItemActions(backup.Name)
if err != nil {
return err
}
defer controller.pluginManager.CloseBackupItemActions(backup.Name)

controller.logger.WithField("backup", kubeutil.NamespaceAndName(backup)).Info("starting backup")
var (
errs []error
backupJsonToUpload io.Reader
backupFileToUpload io.Reader
logFileToUpload io.Reader
)

if err := controller.backupper.Backup(backup, backupFile, logFile, actions); err != nil {
return err
}
errs = append(errs, err)

controller.logger.WithField("backup", kubeutil.NamespaceAndName(backup)).Info("backup completed")
// TODO(ncdc): should the backupper be responsible for changing the phase?
backup.Status.Phase = api.BackupPhaseFailed
} else {
backup.Status.Phase = api.BackupPhaseCompleted

// re-set the file's offset to 0 for reading so we can upload it
if _, err = backupFile.Seek(0, 0); err != nil {
errs = append(errs, errors.Wrap(err, "unable to upload backup file - error resetting backup file offset"))
} else {
// There were no errors backing up or seeking back to the beginning of the file, so we can
// proceed with uploading the backup file.
backupFileToUpload = backupFile
}
}

// note: updating this here so the uploaded JSON shows "completed". If
// the upload fails, we'll alter the phase in the calling func.
backup.Status.Phase = api.BackupPhaseCompleted
log.Info("backup completed")

buf := new(bytes.Buffer)
if err := encode.EncodeTo(backup, "json", buf); err != nil {
return errors.Wrap(err, "error encoding Backup")
backupJson := new(bytes.Buffer)
if err := encode.EncodeTo(backup, "json", backupJson); err != nil {
errs = append(errs, errors.Wrap(err, "error encoding backup"))
} else {
backupJsonToUpload = backupJson
}

// re-set the files' offset to 0 for reading
if _, err = backupFile.Seek(0, 0); err != nil {
return errors.Wrap(err, "error resetting Backup file offset")
}
// re-set the log file's offset to 0 for reading so we can upload it
if _, err = logFile.Seek(0, 0); err != nil {
return errors.Wrap(err, "error resetting Backup log file offset")
errs = append(errs, errors.Wrap(err, "error resetting Backup log file offset"))
} else {
logFileToUpload = logFile
}

if err := controller.backupService.UploadBackup(
bucket,
backup.Name,
backupJsonToUpload,
backupFileToUpload,
logFileToUpload,
); err != nil {
errs = append(errs, err)
}

return controller.backupService.UploadBackup(bucket, backup.Name, buf, backupFile, logFile)
return kerrors.NewAggregate(errs)
}

0 comments on commit 6740070

Please sign in to comment.