Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(backups) #6091

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions @xen-orchestra/backups/_cleanVm.js
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ const listVhds = async (handler, vmDir) => {
const list = await handler.list(vdiDir, {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd))
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
Expand Down Expand Up @@ -249,15 +249,11 @@ exports.cleanVm = async function cleanVm(
}
}

// 2022-01-17 - FBP & JFT - Temporary disable aliases checking as it appears problematic
//
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
// await asyncMap(Object.keys(aliases), async dir => {
// await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
// })
// Avoid a ESLint unused variable
noop(aliases)
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
})

// remove VHDs with missing ancestors
{
Expand Down
9 changes: 5 additions & 4 deletions @xen-orchestra/backups/writers/_MixinBackupWriter.js
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,14 @@ exports.MixinBackupWriter = (BaseClass = Object) =>

async afterBackup() {
const { disableMergeWorker } = this._backup.config
// merge worker only compatible with local remotes
const { handler } = this._adapter
const willMergeInWorker = !disableMergeWorker && typeof handler._getRealPath === 'function'

const { merge } = await this._cleanVm({ remove: true, merge: disableMergeWorker })
const { merge } = await this._cleanVm({ remove: true, merge: !willMergeInWorker })
await this.#lock.dispose()

// merge worker only compatible with local remotes
const { handler } = this._adapter
if (merge && !disableMergeWorker && typeof handler._getRealPath === 'function') {
if (merge && willMergeInWorker) {
const taskFile =
join(MergeWorker.CLEAN_VM_QUEUE, formatFilenameDate(new Date())) +
'-' +
Expand Down
35 changes: 19 additions & 16 deletions @xen-orchestra/fs/src/s3.js
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,25 @@ const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB

const { warn } = createLogger('xo:fs:s3')

// some objectstorage provider like backblaze, can answer a 500/503 routinely
// in this case we should retry, and let their load balancing do its magic
// https://www.backblaze.com/b2/docs/calling.html#error_handling
const retryOptions = {
delays: [100, 200, 500, 1000, 2000],
when: e => e.code === 'InternalError',
onRetry(error) {
warn('retrying writing file', {
attemptNumber: this.attemptNumber,
delay: this.delay,
error,
file: this.arguments[0],
})
},
}
export default class S3Handler extends RemoteHandlerAbstract {



constructor(remote, _opts) {
super(remote)
const { allowUnauthorized, host, path, username, password, protocol, region } = parse(remote.url)
Expand Down Expand Up @@ -123,21 +140,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}

// some objectstorage provider like backblaze, can answer a 500/503 routinely
// in this case we should retry, and let their load balancing do its magic
// https://www.backblaze.com/b2/docs/calling.html#error_handling
@decorateWith(pRetry.wrap, {
delays: [100, 200, 500, 1000, 2000],
when: e => e.code === 'InternalError',
onRetry(error) {
warn('retrying writing file', {
attemptNumber: this.attemptNumber,
delay: this.delay,
error,
file: this.arguments[0],
})
},
})
@decorateWith(pRetry.wrap, retryOptions)
async _writeFile(file, data, options) {
return this._s3.putObject({ ...this._createParams(file), Body: data })
}
Expand Down
4 changes: 4 additions & 0 deletions packages/vhd-lib/index.js
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

exports.chainVhd = require('./chain')
exports.checkFooter = require('./checkFooter')
exports.checkVhdChain = require('./checkChain')
Expand All @@ -12,3 +13,6 @@ exports.VhdDirectory = require('./Vhd/VhdDirectory').VhdDirectory
exports.VhdFile = require('./Vhd/VhdFile').VhdFile
exports.VhdSynthetic = require('./Vhd/VhdSynthetic').VhdSynthetic
exports.Constants = require('./_constants')
const {isVhdAlias, resolveAlias} = require('./_resolveAlias')
exports.isVhdAlias = isVhdAlias
exports.resolveAlias = resolveAlias
Comment on lines +16 to +18
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe we should eventually remove this index in favor of specific submodule exports/imports.

Example for this commit:

import { isVhdAlias, resolveAlias } from 'vhd-lib//aliases.js'