Skip to content

Commit

Permalink
Feat(@xen-orchstra/backups): implement file cache for listing backup …
Browse files Browse the repository at this point in the history
…of a VM
  • Loading branch information
fbeauchamp committed May 6, 2022
1 parent 2518395 commit b2ca330
Show file tree
Hide file tree
Showing 5 changed files with 121 additions and 31 deletions.
124 changes: 113 additions & 11 deletions @xen-orchestra/backups/RemoteAdapter.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
'use strict'

const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { synchronized } = require('decorator-synchronized')
const Disposable = require('promise-toolbox/Disposable')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
Expand All @@ -17,6 +18,7 @@ const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')

const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
Expand Down Expand Up @@ -78,6 +80,7 @@ class RemoteAdapter {
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._createCacheListVmBackups = synchronized.withKey()(this._createCacheListVmBackups)
}

get handler() {
Expand Down Expand Up @@ -261,7 +264,8 @@ class RemoteAdapter {
}

async deleteVmBackups(files) {
const { delta, full, ...others } = groupBy(await asyncMap(files, file => this.readVmBackupMetadata(file)), 'mode')
const metadatas = await asyncMap(files, file => this.readVmBackupMetadata(file))
const { delta, full, ...others } = groupBy(metadatas, 'mode')

const unsupportedModes = Object.keys(others)
if (unsupportedModes.length !== 0) {
Expand All @@ -278,6 +282,7 @@ class RemoteAdapter {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
await asyncMap(metadatas, metadata => this.invalidateVmBackupListCache(metadata.vm.uuid))
}

#getCompressionType() {
Expand Down Expand Up @@ -448,9 +453,22 @@ class RemoteAdapter {
return backupsByPool
}

async listVmBackups(vmUuid, predicate) {
async invalidateVmBackupListCache(vmUuid) {
try {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)

// remove any pending loc
} catch (error) {
if (error.code === 'ENOENT') {
return
}
throw error
}
}

async #getCachabledDataListVmBackups(vmUuid) {
const handler = this._handler
const backups = []
const backups = {}

try {
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
Expand All @@ -460,22 +478,103 @@ class RemoteAdapter {
await asyncMap(files, async file => {
try {
const metadata = await this.readVmBackupMetadata(file)
if (predicate === undefined || predicate(metadata)) {
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename

backups.push(metadata)
}
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups[file] = metadata
} catch (error) {
warn(`listVmBackups ${file}`, { error })
warn(`createCacheListVmBackups ${file}`, { error })
}
})
return backups
} catch (error) {
let code
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
throw error
}
}
}

// use _ to mark this method as private
// since we decorate it with syncrhonized.withKey in constructor
async _createCacheListVmBackups(vmUuid) {
const path = `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
try {
const cached = await this.#readCacheListVmBackups(vmUuid)
if (cached !== undefined) {
return cached
}
// file did not get created during lock acquisition

const backups = await this.#getCachabledDataListVmBackups(vmUuid)
if (backups === undefined) {
return
}
const text = JSON.stringify(backups)
const zipped = await new Promise((resolve, reject) => {
zlib.gzip(text, (err, buffer) => {
if (err !== null) {
reject(err)
} else {
resolve(buffer)
}
})
})
// some file systems don't supports lock reliably
// in this case let's overwrite any existing file
// if the cache file is broken, it will be removed by readCacheListVmBackups
await this.handler.writeFile(path, zipped, { flags: 'w' })

return backups
} catch (error) {
let code
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
throw error
}
}
}

async #readCacheListVmBackups(vmUuid) {
try {
const gzipped = await this.handler.readFile(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
const text = await new Promise((resolve, reject) => {
zlib.gunzip(gzipped, (err, buffer) => {
if (err !== null) {
reject(err)
} else {
resolve(buffer)
}
})
})
return JSON.parse(text)
} catch (error) {
if (error.code === 'ENOENT') {
return
}
// try to delete the cache if the file is broken
await this.invalidateVmBackupListCache(vmUuid).catch(noop)
throw error
}
}

async listVmBackups(vmUuid, predicate) {
const backups = []
// await this.invalidateVmBackupListCache(vmUuid)
let cached = await this.#readCacheListVmBackups(vmUuid)

// nothing cached, update cache
if (cached === undefined) {
cached = await this._createCacheListVmBackups(vmUuid)
}

if (cached === undefined) {
return []
}

Object.values(cached).forEach(metadata => {
if (predicate === undefined || predicate(metadata)) {
backups.push(metadata)
}
})

return backups.sort(compareTimestamp)
}
Expand Down Expand Up @@ -603,7 +702,10 @@ class RemoteAdapter {
}

async readVmBackupMetadata(path) {
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
// @todo : I really want to be able to stringify _filename
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }

// Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
}
}

Expand Down
1 change: 1 addition & 0 deletions @xen-orchestra/backups/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
"golike-defer": "^0.5.1",
Expand Down
1 change: 1 addition & 0 deletions @xen-orchestra/backups/writers/_MixinBackupWriter.js
Original file line number Diff line number Diff line change
Expand Up @@ -64,5 +64,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}
}
7 changes: 6 additions & 1 deletion CHANGELOG.unreleased.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))

### Bug fixes

> Users must be able to say: “I had this issue, happy to know it's fixed”
Expand All @@ -33,5 +35,8 @@
<!--packages-start-->


- @xen-orchestra/backups minor
- xo-server patch
- @xen-orchestra/backups-cli patch
- @xen-orchestra/proxy patch
<!--packages-end-->
19 changes: 0 additions & 19 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -16353,25 +16353,6 @@ semver@^7.3.6:
dependencies:
lru-cache "^7.4.0"

send@0.17.2:
version "0.17.2"
resolved "https://registry.yarnpkg.com/send/-/send-0.17.2.tgz#926622f76601c41808012c8bf1688fe3906f7820"
integrity sha512-UJYB6wFSJE3G00nEivR5rgWp8c2xXvJ3OPWPhmuteU0IKj8nKbG3DrjiOmLwpnHGYWAVwA69zmTm++YG0Hmwww==
dependencies:
debug "2.6.9"
depd "~1.1.2"
destroy "~1.0.4"
encodeurl "~1.0.2"
escape-html "~1.0.3"
etag "~1.8.1"
fresh "0.5.2"
http-errors "1.8.1"
mime "1.6.0"
ms "2.1.3"
on-finished "~2.3.0"
range-parser "~1.2.1"
statuses "~1.5.0"

send@0.18.0:
version "0.18.0"
resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be"
Expand Down

0 comments on commit b2ca330

Please sign in to comment.