diff --git a/.eslintrc.js b/.eslintrc.js index 6d7afd31fae5ee..ca0e40e4b6ceee 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -54,6 +54,7 @@ module.exports = { { files: [ 'doc/api/esm.md', + 'doc/api/fs.md', 'doc/api/module.md', 'doc/api/modules.md', 'doc/api/packages.md', diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index 26be5a7cab2f98..9bb9cd14bc1d0f 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -98,3 +98,12 @@ jobs: - uses: mszostok/codeowners-validator@v0.4.0 with: checks: "files,duppatterns" + lint-pr-url: + if: ${{ github.event.pull_request }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 2 + # GH Actions squashes all PR commits, HEAD^ refers to the base branch. + - run: git diff HEAD^ HEAD -G"pr-url:" -- "*.md" | ./tools/lint-pr-url.mjs ${{ github.event.pull_request.html_url }} diff --git a/BUILDING.md b/BUILDING.md index 44f56a5ba23854..f4ecd3d1f522bc 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -8,7 +8,7 @@ If you can reproduce a test failure, search for it in the [Node.js issue tracker](https://github.com/nodejs/node/issues) or file a new issue. -## Table of Contents +## Table of contents * [Supported platforms](#supported-platforms) * [Input](#input) @@ -309,7 +309,7 @@ To install this version of Node.js into a system directory: [sudo] make install ``` -#### Running Tests +#### Running tests To verify the build: @@ -379,7 +379,7 @@ You can use [node-code-ide-configs](https://github.com/nodejs/node-code-ide-configs) to run/debug tests, if your IDE configs are present. -#### Running Coverage +#### Running coverage It's good practice to ensure any code you add or change is covered by tests. You can do so by running the test suite with coverage enabled: diff --git a/CHANGELOG.md b/CHANGELOG.md index 4079b2f21294ff..0dc6061fc0f33a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,8 @@ release.
npm ls promzard
in npm’s source tree will show:
-npm@7.5.1 /path/to/npm
+npm@7.5.3 /path/to/npm
└─┬ init-package-json@0.0.4
└── promzard@0.1.5
diff --git a/deps/npm/docs/output/commands/npm.html b/deps/npm/docs/output/commands/npm.html
index 727120c1b747aa..3eda5376a36313 100644
--- a/deps/npm/docs/output/commands/npm.html
+++ b/deps/npm/docs/output/commands/npm.html
@@ -148,7 +148,7 @@ Table of contents
npm <command> [args]
Version
-7.5.1
+7.5.3
Description
npm is the package manager for the Node JavaScript platform. It puts
modules in place so that node can find them, and manages dependency
diff --git a/deps/npm/lib/exec.js b/deps/npm/lib/exec.js
index d36dd87cfb9712..e90ec0866e4dd7 100644
--- a/deps/npm/lib/exec.js
+++ b/deps/npm/lib/exec.js
@@ -169,8 +169,12 @@ const exec = async args => {
return await readPackageJson(pj)
} catch (er) {}
}
+ // Force preferOnline to true so we are making sure to pull in the latest
+ // This is especially useful if the user didn't give us a version, and
+ // they expect to be running @latest
return await pacote.manifest(p, {
...npm.flatOptions,
+ preferOnline: true,
})
}))
@@ -193,9 +197,13 @@ const exec = async args => {
const arb = new Arborist({ ...npm.flatOptions, path: installDir })
const tree = await arb.loadActual()
- // any that don't match the manifest we have, install them
- // add installDir/node_modules/.bin to pathArr
- const add = manis.filter(mani => manifestMissing(tree, mani))
+ // at this point, we have to ensure that we get the exact same
+ // version, because it's something that has only ever been installed
+ // by npm exec in the cache install directory
+ const add = manis.filter(mani => manifestMissing(tree, {
+ ...mani,
+ _from: `${mani.name}@${mani.version}`,
+ }))
.map(mani => mani._from)
.sort((a, b) => a.localeCompare(b))
diff --git a/deps/npm/lib/help.js b/deps/npm/lib/help.js
index 171c52704df6c7..f6996166542f9b 100644
--- a/deps/npm/lib/help.js
+++ b/deps/npm/lib/help.js
@@ -8,22 +8,22 @@ help.completion = function (opts, cb) {
}
const npmUsage = require('./utils/npm-usage.js')
-var path = require('path')
-var spawn = require('./utils/spawn')
-var npm = require('./npm.js')
-var log = require('npmlog')
-var openUrl = require('./utils/open-url')
-var glob = require('glob')
-var output = require('./utils/output.js')
+const { spawn } = require('child_process')
+const path = require('path')
+const npm = require('./npm.js')
+const log = require('npmlog')
+const openUrl = require('./utils/open-url')
+const glob = require('glob')
+const output = require('./utils/output.js')
const usage = require('./utils/usage.js')
help.usage = usage('help', 'npm help []')
function help (args, cb) {
- var argv = npm.config.parsedArgv.cooked
+ const argv = npm.config.parsedArgv.cooked
- var argnum = 0
+ let argnum = 0
if (args.length === 2 && ~~args[0])
argnum = ~~args.shift()
@@ -34,7 +34,7 @@ function help (args, cb) {
const affordances = {
'find-dupes': 'dedupe',
}
- var section = affordances[args[0]] || npm.deref(args[0]) || args[0]
+ let section = affordances[args[0]] || npm.deref(args[0]) || args[0]
// npm help : show basic usage
if (!section) {
@@ -52,15 +52,12 @@ function help (args, cb) {
return cb()
}
- var pref = [1, 5, 7]
- if (argnum) {
- pref = [argnum].concat(pref.filter(function (n) {
- return n !== argnum
- }))
- }
+ let pref = [1, 5, 7]
+ if (argnum)
+ pref = [argnum].concat(pref.filter(n => n !== argnum))
// npm help : Try to find the path
- var manroot = path.resolve(__dirname, '..', 'man')
+ const manroot = path.resolve(__dirname, '..', 'man')
// legacy
if (section === 'global')
@@ -71,18 +68,18 @@ function help (args, cb) {
// find either /section.n or /npm-section.n
// The glob is used in the glob. The regexp is used much
// further down. Globs and regexps are different
- var compextglob = '.+(gz|bz2|lzma|[FYzZ]|xz)'
- var compextre = '\\.(gz|bz2|lzma|[FYzZ]|xz)$'
- var f = '+(npm-' + section + '|' + section + ').[0-9]?(' + compextglob + ')'
- return glob(manroot + '/*/' + f, function (er, mans) {
+ const compextglob = '.+(gz|bz2|lzma|[FYzZ]|xz)'
+ const compextre = '\\.(gz|bz2|lzma|[FYzZ]|xz)$'
+ const f = '+(npm-' + section + '|' + section + ').[0-9]?(' + compextglob + ')'
+ return glob(manroot + '/*/' + f, (er, mans) => {
if (er)
return cb(er)
if (!mans.length)
return npm.commands['help-search'](args, cb)
- mans = mans.map(function (man) {
- var ext = path.extname(man)
+ mans = mans.map((man) => {
+ const ext = path.extname(man)
if (man.match(new RegExp(compextre)))
man = path.basename(man, ext)
@@ -94,14 +91,12 @@ function help (args, cb) {
}
function pickMan (mans, pref_) {
- var nre = /([0-9]+)$/
- var pref = {}
- pref_.forEach(function (sect, i) {
- pref[sect] = i
- })
- mans = mans.sort(function (a, b) {
- var an = a.match(nre)[1]
- var bn = b.match(nre)[1]
+ const nre = /([0-9]+)$/
+ const pref = {}
+ pref_.forEach((sect, i) => pref[sect] = i)
+ mans = mans.sort((a, b) => {
+ const an = a.match(nre)[1]
+ const bn = b.match(nre)[1]
return an === bn ? (a > b ? -1 : 1)
: pref[an] < pref[bn] ? -1
: 1
@@ -110,48 +105,61 @@ function pickMan (mans, pref_) {
}
function viewMan (man, cb) {
- var nre = /([0-9]+)$/
- var num = man.match(nre)[1]
- var section = path.basename(man, '.' + num)
+ const nre = /([0-9]+)$/
+ const num = man.match(nre)[1]
+ const section = path.basename(man, '.' + num)
// at this point, we know that the specified man page exists
- var manpath = path.join(__dirname, '..', 'man')
- var env = {}
+ const manpath = path.join(__dirname, '..', 'man')
+ const env = {}
Object.keys(process.env).forEach(function (i) {
env[i] = process.env[i]
})
env.MANPATH = manpath
- var viewer = npm.config.get('viewer')
+ const viewer = npm.config.get('viewer')
+
+ const opts = {
+ env,
+ stdio: 'inherit',
+ }
- var conf
+ let bin = 'man'
+ const args = []
switch (viewer) {
case 'woman':
- var a = ['-e', '(woman-find-file \'' + man + '\')']
- conf = { env: env, stdio: 'inherit' }
- var woman = spawn('emacsclient', a, conf)
- woman.on('close', cb)
+ bin = 'emacsclient'
+ args.push('-e', `(woman-find-file '${man}')`)
break
case 'browser':
+ bin = false
try {
- var url = htmlMan(man)
+ const url = htmlMan(man)
+ openUrl(url, 'help available at the following URL', cb)
} catch (err) {
return cb(err)
}
- openUrl(url, 'help available at the following URL', cb)
break
default:
- conf = { env: env, stdio: 'inherit' }
- var manProcess = spawn('man', [num, section], conf)
- manProcess.on('close', cb)
+ args.push(num, section)
break
}
+
+ if (bin) {
+ const proc = spawn(bin, args, opts)
+ proc.on('exit', (code) => {
+ if (code)
+ return cb(new Error(`help process exited with code: ${code}`))
+
+ return cb()
+ })
+ }
}
function htmlMan (man) {
- var sect = +man.match(/([0-9]+)$/)[1]
- var f = path.basename(man).replace(/[.]([0-9]+)$/, '')
+ let sect = +man.match(/([0-9]+)$/)[1]
+ const f = path.basename(man).replace(/[.]([0-9]+)$/, '')
switch (sect) {
case 1:
sect = 'commands'
@@ -169,7 +177,7 @@ function htmlMan (man) {
}
function getSections (cb) {
- var g = path.resolve(__dirname, '../man/man[0-9]/*.[0-9]')
+ const g = path.resolve(__dirname, '../man/man[0-9]/*.[0-9]')
glob(g, function (er, files) {
if (er)
return cb(er)
diff --git a/deps/npm/lib/ls.js b/deps/npm/lib/ls.js
index 153759d83815e6..603c3b412ddc5e 100644
--- a/deps/npm/lib/ls.js
+++ b/deps/npm/lib/ls.js
@@ -163,7 +163,10 @@ const getJsonOutputItem = (node, { global, long }) => {
Object.assign(item, packageInfo)
item.extraneous = false
item.path = node.path
- item._dependencies = node.package.dependencies || {}
+ item._dependencies = {
+ ...node.package.dependencies,
+ ...node.package.optionalDependencies,
+ }
item.devDependencies = node.package.devDependencies || {}
item.peerDependencies = node.package.peerDependencies || {}
}
diff --git a/deps/npm/lib/npm.js b/deps/npm/lib/npm.js
index 7a15a29b2fed99..40aa9bbd9b5061 100644
--- a/deps/npm/lib/npm.js
+++ b/deps/npm/lib/npm.js
@@ -173,8 +173,8 @@ const npm = module.exports = new class extends EventEmitter {
if (node && node.toUpperCase() !== process.execPath.toUpperCase()) {
log.verbose('node symlink', node)
process.execPath = node
+ this.config.execPath = node
}
- this.config.execPath = node
await this.config.load()
this.argv = this.config.parsedArgv.remain
diff --git a/deps/npm/lib/publish.js b/deps/npm/lib/publish.js
index 49b2088070e7a5..190d381a8aeeb4 100644
--- a/deps/npm/lib/publish.js
+++ b/deps/npm/lib/publish.js
@@ -6,6 +6,7 @@ const libpub = require('libnpmpublish').publish
const runScript = require('@npmcli/run-script')
const pacote = require('pacote')
const npa = require('npm-package-arg')
+const npmFetch = require('npm-registry-fetch')
const npm = require('./npm.js')
const output = require('./utils/output.js')
@@ -71,27 +72,12 @@ const publish_ = async (arg, opts) => {
// you can publish name@version, ./foo.tgz, etc.
// even though the default is the 'file:.' cwd.
const spec = npa(arg)
- const manifest = await getManifest(spec, opts)
+
+ let manifest = await getManifest(spec, opts)
if (manifest.publishConfig)
Object.assign(opts, publishConfigToOpts(manifest.publishConfig))
- const { registry } = opts
- if (!registry) {
- throw Object.assign(new Error('No registry specified.'), {
- code: 'ENOREGISTRY',
- })
- }
-
- if (!dryRun) {
- const creds = npm.config.getCredentialsByURI(registry)
- if (!creds.token && !creds.username) {
- throw Object.assign(new Error('This command requires you to be logged in.'), {
- code: 'ENEEDAUTH',
- })
- }
- }
-
// only run scripts for directory type publishes
if (spec.type === 'directory') {
await runScript({
@@ -105,18 +91,27 @@ const publish_ = async (arg, opts) => {
const tarballData = await pack(spec, opts)
const pkgContents = await getContents(manifest, tarballData)
+ // The purpose of re-reading the manifest is in case it changed,
+ // so that we send the latest and greatest thing to the registry
+ // note that publishConfig might have changed as well!
+ manifest = await getManifest(spec, opts)
+ if (manifest.publishConfig)
+ Object.assign(opts, publishConfigToOpts(manifest.publishConfig))
+
// note that logTar calls npmlog.notice(), so if we ARE in silent mode,
// this will do nothing, but we still want it in the debuglog if it fails.
if (!json)
logTar(pkgContents, { log, unicode })
if (!dryRun) {
- // The purpose of re-reading the manifest is in case it changed,
- // so that we send the latest and greatest thing to the registry
- // note that publishConfig might have changed as well!
- const manifest = await getManifest(spec, opts)
- if (manifest.publishConfig)
- Object.assign(opts, publishConfigToOpts(manifest.publishConfig))
+ const resolved = npa.resolve(manifest.name, manifest.version)
+ const registry = npmFetch.pickRegistry(resolved, opts)
+ const creds = npm.config.getCredentialsByURI(registry)
+ if (!creds.token && !creds.username) {
+ throw Object.assign(new Error('This command requires you to be logged in.'), {
+ code: 'ENEEDAUTH',
+ })
+ }
await otplease(opts, opts => libpub(manifest, tarballData, opts))
}
diff --git a/deps/npm/lib/utils/no-progress-while-running.js b/deps/npm/lib/utils/no-progress-while-running.js
deleted file mode 100644
index c2e6a01b2396dd..00000000000000
--- a/deps/npm/lib/utils/no-progress-while-running.js
+++ /dev/null
@@ -1,25 +0,0 @@
-var log = require('npmlog')
-var progressEnabled
-var running = 0
-
-var startRunning = exports.startRunning = function () {
- if (progressEnabled == null)
- progressEnabled = log.progressEnabled
- if (progressEnabled)
- log.disableProgress()
- ++running
-}
-
-var stopRunning = exports.stopRunning = function () {
- --running
- if (progressEnabled && running === 0)
- log.enableProgress()
-}
-
-exports.tillDone = function noProgressTillDone (cb) {
- startRunning()
- return function () {
- stopRunning()
- cb.apply(this, arguments)
- }
-}
diff --git a/deps/npm/lib/utils/pulse-till-done.js b/deps/npm/lib/utils/pulse-till-done.js
index 13147bae166137..a88b8aacd862b8 100644
--- a/deps/npm/lib/utils/pulse-till-done.js
+++ b/deps/npm/lib/utils/pulse-till-done.js
@@ -1,41 +1,26 @@
const log = require('npmlog')
-let pulsers = 0
-let pulse
+let pulseTimer = null
+const withPromise = async (promise) => {
+ pulseStart()
+ try {
+ return await promise
+ } finally {
+ pulseStop()
+ }
+}
-function pulseStart (prefix) {
- if (++pulsers > 1)
- return
- pulse = setInterval(function () {
- log.gauge.pulse(prefix)
+const pulseStart = () => {
+ pulseTimer = pulseTimer || setInterval(() => {
+ log.gauge.pulse('')
}, 150)
}
-function pulseStop () {
- if (--pulsers > 0)
- return
- clearInterval(pulse)
-}
-module.exports = function (prefix, cb) {
- if (!prefix)
- prefix = 'network'
- pulseStart(prefix)
- return (er, ...args) => {
- pulseStop()
- cb(er, ...args)
- }
+const pulseStop = () => {
+ clearInterval(pulseTimer)
+ pulseTimer = null
}
-const pulseWhile = async (prefix, promise) => {
- if (!promise) {
- promise = prefix
- prefix = ''
- }
- pulseStart(prefix)
- try {
- return await promise
- } finally {
- pulseStop()
- }
+module.exports = {
+ withPromise,
}
-module.exports.withPromise = pulseWhile
diff --git a/deps/npm/lib/utils/read-user-info.js b/deps/npm/lib/utils/read-user-info.js
index b0166e18c90df2..e3c4a9fbe51cab 100644
--- a/deps/npm/lib/utils/read-user-info.js
+++ b/deps/npm/lib/utils/read-user-info.js
@@ -8,21 +8,21 @@ exports.password = readPassword
exports.username = readUsername
exports.email = readEmail
+const otpPrompt = `This command requires a one-time password (OTP) from your authenticator app.
+Enter one below. You can also pass one on the command line by appending --otp=123456.
+For more information, see:
+https://docs.npmjs.com/getting-started/using-two-factor-authentication
+Enter OTP: `
+const passwordPrompt = 'npm password: '
+const usernamePrompt = 'npm username: '
+const emailPrompt = 'email (this IS public): '
+
function read (opts) {
log.clearProgress()
return readAsync(opts).finally(() => log.showProgress())
}
-function readOTP (msg, otp, isRetry) {
- if (!msg) {
- msg = [
- 'This command requires a one-time password (OTP) from your authenticator app.',
- 'Enter one below. You can also pass one on the command line by appending --otp=123456.',
- 'For more information, see:',
- 'https://docs.npmjs.com/getting-started/using-two-factor-authentication',
- 'Enter OTP: ',
- ].join('\n')
- }
+function readOTP (msg = otpPrompt, otp, isRetry) {
if (isRetry && otp && /^[\d ]+$|^[A-Fa-f0-9]{64,64}$/.test(otp))
return otp.replace(/\s+/g, '')
@@ -30,9 +30,7 @@ function readOTP (msg, otp, isRetry) {
.then((otp) => readOTP(msg, otp, true))
}
-function readPassword (msg, password, isRetry) {
- if (!msg)
- msg = 'npm password: '
+function readPassword (msg = passwordPrompt, password, isRetry) {
if (isRetry && password)
return password
@@ -40,9 +38,7 @@ function readPassword (msg, password, isRetry) {
.then((password) => readPassword(msg, password, true))
}
-function readUsername (msg, username, opts, isRetry) {
- if (!msg)
- msg = 'npm username: '
+function readUsername (msg = usernamePrompt, username, opts = {}, isRetry) {
if (isRetry && username) {
const error = userValidate.username(username)
if (error)
@@ -55,9 +51,7 @@ function readUsername (msg, username, opts, isRetry) {
.then((username) => readUsername(msg, username, opts, true))
}
-function readEmail (msg, email, opts, isRetry) {
- if (!msg)
- msg = 'email (this IS public): '
+function readEmail (msg = emailPrompt, email, opts = {}, isRetry) {
if (isRetry && email) {
const error = userValidate.email(email)
if (error)
diff --git a/deps/npm/lib/utils/spawn.js b/deps/npm/lib/utils/spawn.js
deleted file mode 100644
index 3bbe18384bd3c1..00000000000000
--- a/deps/npm/lib/utils/spawn.js
+++ /dev/null
@@ -1,58 +0,0 @@
-module.exports = spawn
-
-var _spawn = require('child_process').spawn
-var EventEmitter = require('events').EventEmitter
-var npwr = require('./no-progress-while-running.js')
-
-function willCmdOutput (stdio) {
- if (stdio === 'inherit')
- return true
- if (!Array.isArray(stdio))
- return false
- for (var fh = 1; fh <= 2; ++fh) {
- if (stdio[fh] === 'inherit')
- return true
- if (stdio[fh] === 1 || stdio[fh] === 2)
- return true
- }
- return false
-}
-
-function spawn (cmd, args, options) {
- var cmdWillOutput = willCmdOutput(options && options.stdio)
-
- if (cmdWillOutput)
- npwr.startRunning()
- var raw = _spawn(cmd, args, options)
- var cooked = new EventEmitter()
-
- raw.on('error', function (er) {
- if (cmdWillOutput)
- npwr.stopRunning()
- er.file = cmd
- cooked.emit('error', er)
- }).on('close', function (code, signal) {
- if (cmdWillOutput)
- npwr.stopRunning()
- // Create ENOENT error because Node.js v0.8 will not emit
- // an `error` event if the command could not be found.
- if (code === 127) {
- var er = new Error('spawn ENOENT')
- er.code = 'ENOENT'
- er.errno = 'ENOENT'
- er.syscall = 'spawn'
- er.file = cmd
- cooked.emit('error', er)
- } else
- cooked.emit('close', code, signal)
- })
-
- cooked.stdin = raw.stdin
- cooked.stdout = raw.stdout
- cooked.stderr = raw.stderr
- cooked.kill = function (sig) {
- return raw.kill(sig)
- }
-
- return cooked
-}
diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1
index 450f21d7b757a1..09a0729591f1ce 100644
--- a/deps/npm/man/man1/npm-ls.1
+++ b/deps/npm/man/man1/npm-ls.1
@@ -26,7 +26,7 @@ example, running \fBnpm ls promzard\fP in npm's source tree will show:
.P
.RS 2
.nf
-npm@7\.5\.1 /path/to/npm
+npm@7\.5\.3 /path/to/npm
└─┬ init\-package\-json@0\.0\.4
└── promzard@0\.1\.5
.fi
diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1
index fc76aef29eb231..041838e698b496 100644
--- a/deps/npm/man/man1/npm.1
+++ b/deps/npm/man/man1/npm.1
@@ -10,7 +10,7 @@ npm [args]
.RE
.SS Version
.P
-7\.5\.1
+7\.5\.3
.SS Description
.P
npm is the package manager for the Node JavaScript platform\. It puts
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/actual.js b/deps/npm/node_modules/@npmcli/arborist/bin/actual.js
new file mode 100644
index 00000000000000..ef254e1d4133d0
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/actual.js
@@ -0,0 +1,21 @@
+const Arborist = require('../')
+const print = require('./lib/print-tree.js')
+const options = require('./lib/options.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const start = process.hrtime()
+new Arborist(options).loadActual(options).then(tree => {
+ const end = process.hrtime(start)
+ if (!process.argv.includes('--quiet'))
+ print(tree)
+
+ console.error(`read ${tree.inventory.size} deps in ${end[0] * 1000 + end[1] / 1e6}ms`)
+ if (options.save)
+ tree.meta.save()
+ if (options.saveHidden) {
+ tree.meta.hiddenLockfile = true
+ tree.meta.filename = options.path + '/node_modules/.package-lock.json'
+ tree.meta.save()
+ }
+}).catch(er => console.error(er))
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/audit.js b/deps/npm/node_modules/@npmcli/arborist/bin/audit.js
new file mode 100644
index 00000000000000..5075724e2d471e
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/audit.js
@@ -0,0 +1,48 @@
+const Arborist = require('../')
+
+const print = require('./lib/print-tree.js')
+const options = require('./lib/options.js')
+require('./lib/timers.js')
+require('./lib/logging.js')
+
+const Vuln = require('../lib/vuln.js')
+const printReport = report => {
+ for (const vuln of report.values())
+ console.log(printVuln(vuln))
+ if (report.topVulns.size) {
+ console.log('\n# top-level vulnerabilities')
+ for (const vuln of report.topVulns.values())
+ console.log(printVuln(vuln))
+ }
+}
+
+const printVuln = vuln => {
+ return {
+ __proto__: { constructor: Vuln },
+ name: vuln.name,
+ issues: [...vuln.advisories].map(a => printAdvisory(a)),
+ range: vuln.simpleRange,
+ nodes: [...vuln.nodes].map(node => `${node.name} ${node.location || '#ROOT'}`),
+ ...(vuln.topNodes.size === 0 ? {} : {
+ topNodes: [...vuln.topNodes].map(node => `${node.location || '#ROOT'}`),
+ }),
+ }
+}
+
+const printAdvisory = a => `${a.title}${a.url ? ' ' + a.url : ''}`
+
+const start = process.hrtime()
+process.emit('time', 'audit script')
+const arb = new Arborist(options)
+arb.audit(options).then(tree => {
+ process.emit('timeEnd', 'audit script')
+ const end = process.hrtime(start)
+ if (options.fix)
+ print(tree)
+ if (!options.quiet)
+ printReport(arb.auditReport)
+ if (options.fix)
+ console.error(`resolved ${tree.inventory.size} deps in ${end[0] + end[1] / 1e9}s`)
+ if (tree.meta && options.save)
+ tree.meta.save()
+}).catch(er => console.error(er))
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/funding.js b/deps/npm/node_modules/@npmcli/arborist/bin/funding.js
new file mode 100644
index 00000000000000..fa1237e87e98a0
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/funding.js
@@ -0,0 +1,32 @@
+const options = require('./lib/options.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const Arborist = require('../')
+const a = new Arborist(options)
+const query = options._.shift()
+const start = process.hrtime()
+a.loadVirtual().then(tree => {
+ // only load the actual tree if the virtual one doesn't have modern metadata
+ if (!tree.meta || !(tree.meta.originalLockfileVersion >= 2)) {
+ console.error('old metadata, load actual')
+ throw 'load actual'
+ } else {
+ console.error('meta ok, return virtual tree')
+ return tree
+ }
+}).catch(() => a.loadActual()).then(tree => {
+ const end = process.hrtime(start)
+ if (!query) {
+ for (const node of tree.inventory.values()) {
+ if (node.package.funding)
+ console.log(node.name, node.location, node.package.funding)
+ }
+ } else {
+ for (const node of tree.inventory.query('name', query)) {
+ if (node.package.funding)
+ console.log(node.name, node.location, node.package.funding)
+ }
+ }
+ console.error(`read ${tree.inventory.size} deps in ${end[0] * 1000 + end[1] / 1e6}ms`)
+})
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/ideal.js b/deps/npm/node_modules/@npmcli/arborist/bin/ideal.js
new file mode 100644
index 00000000000000..18a5b9eb310869
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/ideal.js
@@ -0,0 +1,68 @@
+const Arborist = require('../')
+
+const options = require('./lib/options.js')
+const print = require('./lib/print-tree.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const c = require('chalk')
+
+const whichIsA = (name, dependents, indent = ' ') => {
+ if (!dependents || dependents.length === 0)
+ return ''
+ const str = `\nfor: ` +
+ dependents.map(dep => {
+ return dep.more ? `${dep.more} more (${dep.names.join(', ')})`
+ : `${dep.type} dependency ` +
+ `${c.bold(name)}@"${c.bold(dep.spec)}"` + `\nfrom:` +
+ (dep.from.location ? (dep.from.name
+ ? ` ${c.bold(dep.from.name)}@${c.bold(dep.from.version)} ` +
+ c.dim(`at ${dep.from.location}`)
+ : ' the root project')
+ : ` ${c.bold(dep.from.name)}@${c.bold(dep.from.version)}`) +
+ whichIsA(dep.from.name, dep.from.dependents, ' ')
+ }).join('\nand: ')
+
+ return str.split(/\n/).join(`\n${indent}`)
+}
+
+const explainEresolve = ({ dep, current, peerConflict, fixWithForce }) => {
+ return (!dep.whileInstalling ? '' : `While resolving: ` +
+ `${c.bold(dep.whileInstalling.name)}@${c.bold(dep.whileInstalling.version)}\n`) +
+
+ `Found: ` +
+ `${c.bold(current.name)}@${c.bold(current.version)} ` +
+ c.dim(`at ${current.location}`) +
+ `${whichIsA(current.name, current.dependents)}` +
+
+ `\n\nCould not add conflicting dependency: ` +
+ `${c.bold(dep.name)}@${c.bold(dep.version)} ` +
+ c.dim(`at ${dep.location}`) +
+ `${whichIsA(dep.name, dep.dependents)}\n` +
+
+ (!peerConflict ? '' :
+ `\nConflicting peer dependency: ` +
+ `${c.bold(peerConflict.name)}@${c.bold(peerConflict.version)} ` +
+ c.dim(`at ${peerConflict.location}`) +
+ `${whichIsA(peerConflict.name, peerConflict.dependents)}\n`
+ ) +
+
+ `\nFix the upstream dependency conflict, or
+run this command with --legacy-peer-deps${
+ fixWithForce ? ' or --force' : ''}
+to accept an incorrect (and potentially broken) dependency resolution.
+`
+}
+
+const start = process.hrtime()
+new Arborist(options).buildIdealTree(options).then(tree => {
+ const end = process.hrtime(start)
+ print(tree)
+ console.error(`resolved ${tree.inventory.size} deps in ${end[0] + end[1] / 10e9}s`)
+ if (tree.meta && options.save)
+ tree.meta.save()
+}).catch(er => {
+ console.error(er)
+ if (er.code === 'ERESOLVE')
+ console.error(explainEresolve(er))
+})
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/index.js b/deps/npm/node_modules/@npmcli/arborist/bin/index.js
new file mode 100755
index 00000000000000..3cedc91d735652
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/index.js
@@ -0,0 +1,77 @@
+#!/usr/bin/env node
+const [cmd] = process.argv.splice(2, 1)
+
+const usage = () => `Arborist - the npm tree doctor
+
+Version: ${require('../package.json').version}
+
+# USAGE
+ arborist [path] [options...]
+
+# COMMANDS
+
+* reify: reify ideal tree to node_modules (install, update, rm, ...)
+* ideal: generate and print the ideal tree
+* actual: read and print the actual tree in node_modules
+* virtual: read and print the virtual tree in the local shrinkwrap file
+* shrinkwrap: load a local shrinkwrap and print its data
+* audit: perform a security audit on project dependencies
+* funding: query funding information in the local package tree. A second
+ positional argument after the path name can limit to a package name.
+* license: query license information in the local package tree. A second
+ positional argument after the path name can limit to a license type.
+* help: print this text
+
+# OPTIONS
+
+Most npm options are supported, but in camelCase rather than css-case. For
+example, instead of '--dry-run', use '--dryRun'.
+
+Additionally:
+
+* --quiet will supppress the printing of package trees
+* Instead of 'npm install ', use 'arborist reify --add='.
+ The '--add=' option can be specified multiple times.
+* Instead of 'npm rm ', use 'arborist reify --rm='.
+ The '--rm=' option can be specified multiple times.
+* Instead of 'npm update', use 'arborist reify --update-all'.
+* 'npm audit fix' is 'arborist audit --fix'
+`
+
+const help = () => console.log(usage())
+
+switch (cmd) {
+ case 'actual':
+ require('./actual.js')
+ break
+ case 'virtual':
+ require('./virtual.js')
+ break
+ case 'ideal':
+ require('./ideal.js')
+ break
+ case 'reify':
+ require('./reify.js')
+ break
+ case 'audit':
+ require('./audit.js')
+ break
+ case 'funding':
+ require('./funding.js')
+ break
+ case 'license':
+ require('./license.js')
+ break
+ case 'shrinkwrap':
+ require('./shrinkwrap.js')
+ break
+ case 'help':
+ case '-h':
+ case '--help':
+ help()
+ break
+ default:
+ process.exitCode = 1
+ console.error(usage())
+ break
+}
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js b/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js
new file mode 100644
index 00000000000000..57597b2e509e44
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js
@@ -0,0 +1,33 @@
+const options = require('./options.js')
+const { quiet = false } = options
+const { loglevel = quiet ? 'warn' : 'silly' } = options
+
+const levels = [
+ 'silly',
+ 'verbose',
+ 'info',
+ 'timing',
+ 'http',
+ 'notice',
+ 'warn',
+ 'error',
+ 'silent',
+]
+
+const levelMap = new Map(levels.reduce((set, level, index) => {
+ set.push([level, index], [index, level])
+ return set
+}, []))
+
+const { inspect, format } = require('util')
+if (loglevel !== 'silent') {
+ process.on('log', (level, ...args) => {
+ if (levelMap.get(level) < levelMap.get(loglevel))
+ return
+ const pref = `${process.pid} ${level} `
+ if (level === 'warn' && args[0] === 'ERESOLVE')
+ args[2] = inspect(args[2], { depth: Infinity })
+ const msg = pref + format(...args).trim().split('\n').join(`\n${pref}`)
+ console.error(msg)
+ })
+}
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/lib/options.js b/deps/npm/node_modules/@npmcli/arborist/bin/lib/options.js
new file mode 100644
index 00000000000000..8f0dc2f1203240
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/lib/options.js
@@ -0,0 +1,49 @@
+const options = module.exports = {
+ path: undefined,
+ cache: `${process.env.HOME}/.npm/_cacache`,
+ _: [],
+}
+
+for (const arg of process.argv.slice(2)) {
+ if (/^--add=/.test(arg)) {
+ options.add = options.add || []
+ options.add.push(arg.substr('--add='.length))
+ } else if (/^--rm=/.test(arg)) {
+ options.rm = options.rm || []
+ options.rm.push(arg.substr('--rm='.length))
+ } else if (arg === '--global')
+ options.global = true
+ else if (arg === '--global-style')
+ options.globalStyle = true
+ else if (arg === '--prefer-dedupe')
+ options.preferDedupe = true
+ else if (arg === '--legacy-peer-deps')
+ options.legacyPeerDeps = true
+ else if (arg === '--force')
+ options.force = true
+ else if (arg === '--update-all') {
+ options.update = options.update || {}
+ options.update.all = true
+ } else if (/^--update=/.test(arg)) {
+ options.update = options.update || {}
+ options.update.names = options.update.names || []
+ options.update.names.push(arg.substr('--update='.length))
+ } else if (/^--omit=/.test(arg)) {
+ options.omit = options.omit || []
+ options.omit.push(arg.substr('--omit='.length))
+ } else if (/^--[^=]+=/.test(arg)) {
+ const [key, ...v] = arg.replace(/^--/, '').split('=')
+ const val = v.join('=')
+ options[key] = val === 'false' ? false : val === 'true' ? true : val
+ } else if (/^--.+/.test(arg))
+ options[arg.replace(/^--/, '')] = true
+ else if (options.path === undefined)
+ options.path = arg
+ else
+ options._.push(arg)
+}
+
+if (options.path === undefined)
+ options.path = '.'
+
+console.error(options)
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/lib/print-tree.js b/deps/npm/node_modules/@npmcli/arborist/bin/lib/print-tree.js
new file mode 100644
index 00000000000000..1ea2a721873324
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/lib/print-tree.js
@@ -0,0 +1,5 @@
+const { inspect } = require('util')
+const { quiet } = require('./options.js')
+
+module.exports = quiet ? () => {}
+ : tree => console.log(inspect(tree.toJSON(), { depth: Infinity }))
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js b/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js
new file mode 100644
index 00000000000000..3b73c0bf6ddd32
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js
@@ -0,0 +1,22 @@
+const timers = Object.create(null)
+
+process.on('time', name => {
+ if (timers[name])
+ throw new Error('conflicting timer! ' + name)
+ timers[name] = process.hrtime()
+})
+
+process.on('timeEnd', name => {
+ if (!timers[name])
+ throw new Error('timer not started! ' + name)
+ const res = process.hrtime(timers[name])
+ delete timers[name]
+ console.error(`${process.pid} ${name}`, res[0] * 1e3 + res[1] / 1e6)
+})
+
+process.on('exit', () => {
+ for (const name of Object.keys(timers)) {
+ console.error('Dangling timer: ', name)
+ process.exitCode = 1
+ }
+})
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/license.js b/deps/npm/node_modules/@npmcli/arborist/bin/license.js
new file mode 100644
index 00000000000000..4083ddc695d467
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/license.js
@@ -0,0 +1,34 @@
+const Arborist = require('../')
+const options = require('./lib/options.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const a = new Arborist(options)
+const query = options._.shift()
+
+a.loadVirtual().then(tree => {
+ // only load the actual tree if the virtual one doesn't have modern metadata
+ if (!tree.meta || !(tree.meta.originalLockfileVersion >= 2))
+ throw 'load actual'
+ else
+ return tree
+}).catch((er) => {
+ console.error('loading actual tree', er)
+ return a.loadActual()
+}).then(tree => {
+ if (!query) {
+ const set = []
+ for (const license of tree.inventory.query('license'))
+ set.push([tree.inventory.query('license', license).size, license])
+
+ for (const [count, license] of set.sort((a, b) =>
+ a[1] && b[1] ? b[0] - a[0] || a[1].localeCompare(b[1])
+ : a[1] ? -1
+ : b[1] ? 1
+ : 0))
+ console.log(count, license)
+ } else {
+ for (const node of tree.inventory.query('license', query === 'undefined' ? undefined : query))
+ console.log(`${node.name} ${node.location} ${node.package.description || ''}`)
+ }
+})
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/reify.js b/deps/npm/node_modules/@npmcli/arborist/bin/reify.js
new file mode 100644
index 00000000000000..d17a0e03b3286a
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/reify.js
@@ -0,0 +1,46 @@
+const Arborist = require('../')
+
+const options = require('./lib/options.js')
+const print = require('./lib/print-tree.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const printDiff = diff => {
+ const {depth} = require('treeverse')
+ depth({
+ tree: diff,
+ visit: d => {
+ if (d.location === '')
+ return
+ switch (d.action) {
+ case 'REMOVE':
+ console.error('REMOVE', d.actual.location)
+ break
+ case 'ADD':
+ console.error('ADD', d.ideal.location, d.ideal.resolved)
+ break
+ case 'CHANGE':
+ console.error('CHANGE', d.actual.location, {
+ from: d.actual.resolved,
+ to: d.ideal.resolved,
+ })
+ break
+ }
+ },
+ getChildren: d => d.children,
+ })
+}
+
+const start = process.hrtime()
+process.emit('time', 'install')
+const arb = new Arborist(options)
+arb.reify(options).then(tree => {
+ process.emit('timeEnd', 'install')
+ const end = process.hrtime(start)
+ print(tree)
+ if (options.dryRun)
+ printDiff(arb.diff)
+ console.error(`resolved ${tree.inventory.size} deps in ${end[0] + end[1] / 1e9}s`)
+ if (tree.meta && options.save)
+ tree.meta.save()
+}).catch(er => console.error(require('util').inspect(er, { depth: Infinity })))
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/shrinkwrap.js b/deps/npm/node_modules/@npmcli/arborist/bin/shrinkwrap.js
new file mode 100644
index 00000000000000..ee5ec24557947c
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/shrinkwrap.js
@@ -0,0 +1,12 @@
+const Shrinkwrap = require('../lib/shrinkwrap.js')
+const options = require('./lib/options.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const { quiet } = options
+Shrinkwrap.load(options)
+ .then(s => quiet || console.log(JSON.stringify(s.data, 0, 2)))
+ .catch(er => {
+ console.error('shrinkwrap load failure', er)
+ process.exit(1)
+ })
diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/virtual.js b/deps/npm/node_modules/@npmcli/arborist/bin/virtual.js
new file mode 100644
index 00000000000000..7f90f20cf38176
--- /dev/null
+++ b/deps/npm/node_modules/@npmcli/arborist/bin/virtual.js
@@ -0,0 +1,15 @@
+const Arborist = require('../')
+
+const print = require('./lib/print-tree.js')
+const options = require('./lib/options.js')
+require('./lib/logging.js')
+require('./lib/timers.js')
+
+const start = process.hrtime()
+new Arborist(options).loadVirtual().then(tree => {
+ const end = process.hrtime(start)
+ print(tree)
+ if (options.save)
+ tree.meta.save()
+ console.error(`read ${tree.inventory.size} deps in ${end[0] * 1000 + end[1] / 1e6}ms`)
+}).catch(er => console.error(er))
diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js
index ae92b74cefd188..4c266502101a4c 100644
--- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js
+++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js
@@ -398,6 +398,7 @@ module.exports = cls => class IdealTreeBuilder extends cls {
if (this[_global] && (this[_updateAll] || this[_updateNames].length)) {
const nm = resolve(this.path, 'node_modules')
for (const name of await readdir(nm)) {
+ tree.package.dependencies = tree.package.dependencies || {}
if (this[_updateAll] || this[_updateNames].includes(name))
tree.package.dependencies[name] = '*'
}
diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js
index 6cc129a7cc0572..1dd4b4b0f19317 100644
--- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js
+++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js
@@ -907,7 +907,7 @@ module.exports = cls => class Reifier extends cls {
return Promise.all([
this[_saveLockFile](saveOpt),
- updateRootPackageJson({ tree: this.idealTree }),
+ updateRootPackageJson(this.idealTree),
]).then(() => process.emit('timeEnd', 'reify:save'))
}
diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/update-root-package-json.js b/deps/npm/node_modules/@npmcli/arborist/lib/update-root-package-json.js
index f5d62f7a5a713a..735ebd10ad16ff 100644
--- a/deps/npm/node_modules/@npmcli/arborist/lib/update-root-package-json.js
+++ b/deps/npm/node_modules/@npmcli/arborist/lib/update-root-package-json.js
@@ -15,7 +15,7 @@ const depTypes = new Set([
'peerDependencies',
])
-async function updateRootPackageJson ({ tree }) {
+const updateRootPackageJson = async tree => {
const filename = resolve(tree.path, 'package.json')
const originalContent = await readFile(filename, 'utf8')
.then(data => parseJSON(data))
@@ -25,6 +25,16 @@ async function updateRootPackageJson ({ tree }) {
...tree.package,
})
+ // optionalDependencies don't need to be repeated in two places
+ if (depsData.dependencies) {
+ if (depsData.optionalDependencies) {
+ for (const name of Object.keys(depsData.optionalDependencies))
+ delete depsData.dependencies[name]
+ }
+ if (Object.keys(depsData.dependencies).length === 0)
+ delete depsData.dependencies
+ }
+
// if there's no package.json, just use internal pkg info as source of truth
const packageJsonContent = originalContent || depsData
diff --git a/deps/npm/node_modules/@npmcli/arborist/package.json b/deps/npm/node_modules/@npmcli/arborist/package.json
index 2107652c6754d3..6e88b9cbb57a10 100644
--- a/deps/npm/node_modules/@npmcli/arborist/package.json
+++ b/deps/npm/node_modules/@npmcli/arborist/package.json
@@ -1,15 +1,15 @@
{
"name": "@npmcli/arborist",
- "version": "2.1.1",
+ "version": "2.2.1",
"description": "Manage node_modules trees",
"dependencies": {
- "@npmcli/installed-package-contents": "^1.0.5",
- "@npmcli/map-workspaces": "^1.0.1",
+ "@npmcli/installed-package-contents": "^1.0.6",
+ "@npmcli/map-workspaces": "^1.0.2",
"@npmcli/metavuln-calculator": "^1.0.1",
"@npmcli/move-file": "^1.1.0",
"@npmcli/name-from-folder": "^1.0.1",
"@npmcli/node-gyp": "^1.0.1",
- "@npmcli/run-script": "^1.8.1",
+ "@npmcli/run-script": "^1.8.2",
"bin-links": "^2.2.1",
"cacache": "^15.0.3",
"common-ancestor-path": "^1.0.1",
@@ -20,11 +20,11 @@
"npm-package-arg": "^8.1.0",
"npm-pick-manifest": "^6.1.0",
"npm-registry-fetch": "^9.0.0",
- "pacote": "^11.2.4",
+ "pacote": "^11.2.6",
"parse-conflict-json": "^1.1.1",
"promise-all-reject-late": "^1.0.0",
"promise-call-limit": "^1.0.1",
- "read-package-json-fast": "^1.2.1",
+ "read-package-json-fast": "^2.0.1",
"readdir-scoped-modules": "^1.1.0",
"semver": "^7.3.4",
"tar": "^6.1.0",
@@ -55,7 +55,7 @@
"postversion": "npm publish",
"prepublishOnly": "git push origin --follow-tags",
"eslint": "eslint",
- "lint": "npm run eslint -- \"lib/**/*.js\" \"test/arborist/*.js\" \"test/*.js\"",
+ "lint": "npm run eslint -- \"lib/**/*.js\" \"test/arborist/*.js\" \"test/*.js\" \"bin/**/*.js\"",
"lintfix": "npm run lint -- --fix",
"benchmark": "node scripts/benchmark.js",
"benchclean": "rm -rf scripts/benchmark/*/"
@@ -67,9 +67,13 @@
"author": "Isaac Z. Schlueter (http://blog.izs.me/)",
"license": "ISC",
"files": [
- "lib/**/*.js"
+ "lib/**/*.js",
+ "bin/**/*.js"
],
"main": "lib/index.js",
+ "bin": {
+ "arborist": "bin/index.js"
+ },
"tap": {
"100": true,
"node-arg": [
diff --git a/deps/npm/node_modules/@npmcli/config/lib/set-envs.js b/deps/npm/node_modules/@npmcli/config/lib/set-envs.js
index 089333796163ed..36d37145466e0d 100644
--- a/deps/npm/node_modules/@npmcli/config/lib/set-envs.js
+++ b/deps/npm/node_modules/@npmcli/config/lib/set-envs.js
@@ -53,13 +53,7 @@ const setEnvs = (config) => {
list: [cliConf, envConf],
} = config
- const { DESTDIR } = env
- if (platform !== 'win32' && DESTDIR && globalPrefix.indexOf(DESTDIR) === 0)
- env.PREFIX = globalPrefix.substr(DESTDIR.length)
- else
- env.PREFIX = globalPrefix
-
- env.INIT_CWD = env.INIT_CWD || process.cwd()
+ env.INIT_CWD = process.cwd()
// if the key is the default value,
// if the environ is NOT the default value,
diff --git a/deps/npm/node_modules/@npmcli/config/package.json b/deps/npm/node_modules/@npmcli/config/package.json
index a7050c73a03ef4..644544a49d8698 100644
--- a/deps/npm/node_modules/@npmcli/config/package.json
+++ b/deps/npm/node_modules/@npmcli/config/package.json
@@ -1,6 +1,6 @@
{
"name": "@npmcli/config",
- "version": "1.2.8",
+ "version": "1.2.9",
"files": [
"lib"
],
diff --git a/deps/npm/node_modules/@npmcli/installed-package-contents/index.js b/deps/npm/node_modules/@npmcli/installed-package-contents/index.js
index fa81551fed4bfe..30427fe28c1086 100755
--- a/deps/npm/node_modules/@npmcli/installed-package-contents/index.js
+++ b/deps/npm/node_modules/@npmcli/installed-package-contents/index.js
@@ -22,6 +22,7 @@ const fs = require('fs')
const readFile = promisify(fs.readFile)
const readdir = promisify(fs.readdir)
const stat = promisify(fs.stat)
+const lstat = promisify(fs.lstat)
const {relative, resolve, basename, dirname} = require('path')
const normalizePackageBin = require('npm-normalize-package-bin')
@@ -131,6 +132,18 @@ const pkgContents = async ({
const recursePromises = []
+ // if we didn't get withFileTypes support, tack that on
+ if (typeof dirEntries[0] === 'string') {
+ // use a map so we can return a promise, but we mutate dirEntries in place
+ // this is much slower than getting the entries from the readdir call,
+ // but polyfills support for node versions before 10.10
+ await Promise.all(dirEntries.map(async (name, index) => {
+ const p = resolve(path, name)
+ const st = await lstat(p)
+ dirEntries[index] = Object.assign(st, {name})
+ }))
+ }
+
for (const entry of dirEntries) {
const p = resolve(path, entry.name)
if (entry.isDirectory() === false) {
diff --git a/deps/npm/node_modules/@npmcli/installed-package-contents/package.json b/deps/npm/node_modules/@npmcli/installed-package-contents/package.json
index 5af7077b6ac989..13916308f99dbf 100644
--- a/deps/npm/node_modules/@npmcli/installed-package-contents/package.json
+++ b/deps/npm/node_modules/@npmcli/installed-package-contents/package.json
@@ -1,10 +1,12 @@
{
"name": "@npmcli/installed-package-contents",
- "version": "1.0.5",
+ "version": "1.0.7",
"description": "Get the list of files installed in a package in node_modules, including bundled dependencies",
"author": "Isaac Z. Schlueter (https://izs.me)",
"main": "index.js",
- "bin": "index.js",
+ "bin": {
+ "installed-package-contents": "index.js"
+ },
"license": "ISC",
"scripts": {
"test": "tap",
@@ -14,16 +16,16 @@
"postpublish": "git push origin --follow-tags"
},
"tap": {
- "check-coverage": true
+ "check-coverage": true,
+ "color": true
},
"devDependencies": {
- "tap": "^14.10.4"
+ "require-inject": "^1.4.4",
+ "tap": "^14.11.0"
},
"dependencies": {
"npm-bundled": "^1.1.1",
- "npm-normalize-package-bin": "^1.0.1",
- "read-package-json-fast": "^1.1.1",
- "readdir-scoped-modules": "^1.1.0"
+ "npm-normalize-package-bin": "^1.0.1"
},
"repository": "git+https://github.com/npm/installed-package-contents",
"files": [
diff --git a/deps/npm/node_modules/@npmcli/map-workspaces/package.json b/deps/npm/node_modules/@npmcli/map-workspaces/package.json
index 2a66a74240d6de..df509648db0c7b 100644
--- a/deps/npm/node_modules/@npmcli/map-workspaces/package.json
+++ b/deps/npm/node_modules/@npmcli/map-workspaces/package.json
@@ -1,6 +1,6 @@
{
"name": "@npmcli/map-workspaces",
- "version": "1.0.1",
+ "version": "1.0.2",
"files": [
"index.js"
],
@@ -52,6 +52,6 @@
"@npmcli/name-from-folder": "^1.0.1",
"glob": "^7.1.6",
"minimatch": "^3.0.4",
- "read-package-json-fast": "^1.2.1"
+ "read-package-json-fast": "^2.0.1"
}
}
diff --git a/deps/npm/node_modules/@npmcli/run-script/package.json b/deps/npm/node_modules/@npmcli/run-script/package.json
index 7adb5c76d82cc1..332f1e74df6573 100644
--- a/deps/npm/node_modules/@npmcli/run-script/package.json
+++ b/deps/npm/node_modules/@npmcli/run-script/package.json
@@ -1,6 +1,6 @@
{
"name": "@npmcli/run-script",
- "version": "1.8.1",
+ "version": "1.8.2",
"description": "Run a lifecycle script for a package (descendant of npm-lifecycle)",
"author": "Isaac Z. Schlueter (https://izs.me)",
"license": "ISC",
@@ -18,22 +18,22 @@
"coverage-map": "map.js"
},
"devDependencies": {
- "eslint": "^7.10.0",
+ "eslint": "^7.19.0",
"eslint-plugin-import": "^2.22.1",
"eslint-plugin-node": "^11.1.0",
"eslint-plugin-promise": "^4.2.1",
- "eslint-plugin-standard": "^4.0.1",
+ "eslint-plugin-standard": "^5.0.0",
"minipass": "^3.1.1",
"require-inject": "^1.4.4",
- "tap": "^14.10.6"
+ "tap": "^14.11.0"
},
"dependencies": {
- "@npmcli/node-gyp": "^1.0.0",
- "@npmcli/promise-spawn": "^1.3.0",
+ "@npmcli/node-gyp": "^1.0.1",
+ "@npmcli/promise-spawn": "^1.3.2",
"infer-owner": "^1.0.4",
"node-gyp": "^7.1.0",
"puka": "^1.0.1",
- "read-package-json-fast": "^1.1.3"
+ "read-package-json-fast": "^2.0.1"
},
"files": [
"lib/**/*.js",
diff --git a/deps/npm/node_modules/graceful-fs/clone.js b/deps/npm/node_modules/graceful-fs/clone.js
index 028356c96ed536..dff3cc8c504b4c 100644
--- a/deps/npm/node_modules/graceful-fs/clone.js
+++ b/deps/npm/node_modules/graceful-fs/clone.js
@@ -2,12 +2,16 @@
module.exports = clone
+var getPrototypeOf = Object.getPrototypeOf || function (obj) {
+ return obj.__proto__
+}
+
function clone (obj) {
if (obj === null || typeof obj !== 'object')
return obj
if (obj instanceof Object)
- var copy = { __proto__: obj.__proto__ }
+ var copy = { __proto__: getPrototypeOf(obj) }
else
var copy = Object.create(null)
diff --git a/deps/npm/node_modules/graceful-fs/graceful-fs.js b/deps/npm/node_modules/graceful-fs/graceful-fs.js
index de3df47fd55529..8218b1478a0033 100644
--- a/deps/npm/node_modules/graceful-fs/graceful-fs.js
+++ b/deps/npm/node_modules/graceful-fs/graceful-fs.js
@@ -170,6 +170,21 @@ function patch (fs) {
}
}
+ var fs$copyFile = fs.copyFile
+ if (fs$copyFile)
+ fs.copyFile = copyFile
+ function copyFile (src, dest, cb) {
+ return fs$copyFile(src, dest, function (err) {
+ if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
+ enqueue([fs$copyFile, [src, dest, cb]])
+ else {
+ if (typeof cb === 'function')
+ cb.apply(this, arguments)
+ retry()
+ }
+ })
+ }
+
var fs$readdir = fs.readdir
fs.readdir = readdir
function readdir (path, options, cb) {
diff --git a/deps/npm/node_modules/graceful-fs/package.json b/deps/npm/node_modules/graceful-fs/package.json
index 0a56eb73f37515..8eca6d66ff8d61 100644
--- a/deps/npm/node_modules/graceful-fs/package.json
+++ b/deps/npm/node_modules/graceful-fs/package.json
@@ -1,7 +1,7 @@
{
"name": "graceful-fs",
"description": "A drop-in replacement for fs, making various improvements.",
- "version": "4.2.4",
+ "version": "4.2.5",
"repository": {
"type": "git",
"url": "https://github.com/isaacs/node-graceful-fs"
@@ -14,7 +14,8 @@
"preversion": "npm test",
"postversion": "npm publish",
"postpublish": "git push origin --follow-tags",
- "test": "node test.js | tap -"
+ "test": "nyc --silent node test.js | tap -",
+ "posttest": "nyc report"
},
"keywords": [
"fs",
diff --git a/deps/npm/node_modules/graceful-fs/polyfills.js b/deps/npm/node_modules/graceful-fs/polyfills.js
index a5808d23f132e2..56d08d180017e8 100644
--- a/deps/npm/node_modules/graceful-fs/polyfills.js
+++ b/deps/npm/node_modules/graceful-fs/polyfills.js
@@ -19,6 +19,7 @@ process.chdir = function(d) {
cwd = null
chdir.call(process, d)
}
+if (Object.setPrototypeOf) Object.setPrototypeOf(process.chdir, chdir)
module.exports = patch
@@ -132,7 +133,7 @@ function patch (fs) {
}
// This ensures `util.promisify` works as it does for native `fs.read`.
- read.__proto__ = fs$read
+ if (Object.setPrototypeOf) Object.setPrototypeOf(read, fs$read)
return read
})(fs.read)
diff --git a/deps/npm/node_modules/init-package-json/README.md b/deps/npm/node_modules/init-package-json/README.md
index bd64c1230986fc..528acf355158ab 100644
--- a/deps/npm/node_modules/init-package-json/README.md
+++ b/deps/npm/node_modules/init-package-json/README.md
@@ -23,7 +23,7 @@ var dir = process.cwd()
var configData = { some: 'extra stuff' }
// Any existing stuff from the package.json file is also exposed in the
-// PromZard module as the `package` object. There will also be free
+// PromZard module as the `package` object. There will also be three
// vars for:
// * `filename` path to the package.json file
// * `basename` the tip of the package dir
diff --git a/deps/npm/node_modules/init-package-json/init-package-json.js b/deps/npm/node_modules/init-package-json/init-package-json.js
index 5b2889e55da6ba..83e7342d0aa4f4 100644
--- a/deps/npm/node_modules/init-package-json/init-package-json.js
+++ b/deps/npm/node_modules/init-package-json/init-package-json.js
@@ -103,7 +103,7 @@ function init (dir, input, config, cb) {
if (!pkg.description)
pkg.description = data.description
- var d = JSON.stringify(pkg, null, 2) + '\n'
+ var d = JSON.stringify(updateDeps(pkg), null, 2) + '\n'
function write (yes) {
fs.writeFile(packageFile, d, 'utf8', function (er) {
if (!er && yes && !config.get('silent')) {
@@ -132,6 +132,20 @@ function init (dir, input, config, cb) {
}
+function updateDeps(depsData) {
+ // optionalDependencies don't need to be repeated in two places
+ if (depsData.dependencies) {
+ if (depsData.optionalDependencies) {
+ for (const name of Object.keys(depsData.optionalDependencies))
+ delete depsData.dependencies[name]
+ }
+ if (Object.keys(depsData.dependencies).length === 0)
+ delete depsData.dependencies
+ }
+
+ return depsData
+}
+
// turn the objects into somewhat more humane strings.
function unParsePeople (data) {
if (data.author) data.author = unParsePerson(data.author)
diff --git a/deps/npm/node_modules/init-package-json/package.json b/deps/npm/node_modules/init-package-json/package.json
index abf06969264e49..91c6bfba82049d 100644
--- a/deps/npm/node_modules/init-package-json/package.json
+++ b/deps/npm/node_modules/init-package-json/package.json
@@ -1,6 +1,6 @@
{
"name": "init-package-json",
- "version": "2.0.1",
+ "version": "2.0.2",
"main": "init-package-json.js",
"scripts": {
"test": "tap",
diff --git a/deps/npm/node_modules/libnpmversion/package.json b/deps/npm/node_modules/libnpmversion/package.json
index d7e8d5fa586476..b19edd84171f15 100644
--- a/deps/npm/node_modules/libnpmversion/package.json
+++ b/deps/npm/node_modules/libnpmversion/package.json
@@ -1,6 +1,6 @@
{
"name": "libnpmversion",
- "version": "1.0.7",
+ "version": "1.0.8",
"main": "lib/index.js",
"files": [
"lib/*.js"
@@ -25,13 +25,13 @@
},
"devDependencies": {
"require-inject": "^1.4.4",
- "tap": "^14.10.6"
+ "tap": "^14.11.0"
},
"dependencies": {
- "@npmcli/git": "^2.0.1",
- "@npmcli/run-script": "^1.2.1",
- "read-package-json-fast": "^1.2.1",
- "semver": "^7.1.3",
+ "@npmcli/git": "^2.0.4",
+ "@npmcli/run-script": "^1.8.2",
+ "read-package-json-fast": "^2.0.1",
+ "semver": "^7.3.4",
"stringify-package": "^1.0.1"
}
}
diff --git a/deps/npm/node_modules/pacote/lib/fetcher.js b/deps/npm/node_modules/pacote/lib/fetcher.js
index ad3cacec89bf48..c9a3201f0ae4ab 100644
--- a/deps/npm/node_modules/pacote/lib/fetcher.js
+++ b/deps/npm/node_modules/pacote/lib/fetcher.js
@@ -110,7 +110,7 @@ class FetcherBase {
// going to be packing in the context of a publish, which may set
// a dist-tag, but certainly wants to keep defaulting to latest.
this.npmCliConfig = opts.npmCliConfig || [
- `--cache=${this.cache}`,
+ `--cache=${dirname(this.cache)}`,
`--prefer-offline=${!!this.preferOffline}`,
`--prefer-online=${!!this.preferOnline}`,
`--offline=${!!this.offline}`,
diff --git a/deps/npm/node_modules/pacote/lib/git.js b/deps/npm/node_modules/pacote/lib/git.js
index 406ab5c600221b..14d8a833659ce3 100644
--- a/deps/npm/node_modules/pacote/lib/git.js
+++ b/deps/npm/node_modules/pacote/lib/git.js
@@ -161,12 +161,28 @@ class GitFetcher extends Fetcher {
scripts.prepare))
return
+ // to avoid cases where we have an cycle of git deps that depend
+ // on one another, we only ever do preparation for one instance
+ // of a given git dep along the chain of installations.
+ // Note that this does mean that a dependency MAY in theory end up
+ // trying to run its prepare script using a dependency that has not
+ // been properly prepared itself, but that edge case is smaller
+ // and less hazardous than a fork bomb of npm and git commands.
+ const noPrepare = !process.env._PACOTE_NO_PREPARE_ ? []
+ : process.env._PACOTE_NO_PREPARE_.split('\n')
+ if (noPrepare.includes(this.resolved)) {
+ this.log.info('prepare', 'skip prepare, already seen', this.resolved)
+ return
+ }
+ noPrepare.push(this.resolved)
+
// the DirFetcher will do its own preparation to run the prepare scripts
// All we have to do is put the deps in place so that it can succeed.
return npm(
this.npmBin,
[].concat(this.npmInstallCmd).concat(this.npmCliConfig),
dir,
+ { ...process.env, _PACOTE_NO_PREPARE_: noPrepare.join('\n') },
{ message: 'git dep preparation failed' }
)
})
diff --git a/deps/npm/node_modules/pacote/lib/util/cache-dir.js b/deps/npm/node_modules/pacote/lib/util/cache-dir.js
index d5c0bf28fb81eb..abd24532320279 100644
--- a/deps/npm/node_modules/pacote/lib/util/cache-dir.js
+++ b/deps/npm/node_modules/pacote/lib/util/cache-dir.js
@@ -7,6 +7,6 @@ module.exports = (fakePlatform = false) => {
const home = os.homedir() || resolve(temp, 'npm-' + uidOrPid)
const platform = fakePlatform || process.platform
const cacheExtra = platform === 'win32' ? 'npm-cache' : '.npm'
- const cacheRoot = (platform === 'win32' && process.env.APPDATA) || home
- return resolve(cacheRoot, cacheExtra)
+ const cacheRoot = (platform === 'win32' && process.env.LOCALAPPDATA) || home
+ return resolve(cacheRoot, cacheExtra, '_cacache')
}
diff --git a/deps/npm/node_modules/pacote/lib/util/npm.js b/deps/npm/node_modules/pacote/lib/util/npm.js
index 293695525c7268..f2f29bd0acbd1f 100644
--- a/deps/npm/node_modules/pacote/lib/util/npm.js
+++ b/deps/npm/node_modules/pacote/lib/util/npm.js
@@ -1,9 +1,15 @@
// run an npm command
const spawn = require('@npmcli/promise-spawn')
+const {dirname} = require('path')
-module.exports = (npmBin, npmCommand, cwd, extra) => {
+module.exports = (npmBin, npmCommand, cwd, env, extra) => {
const isJS = npmBin.endsWith('.js')
const cmd = isJS ? process.execPath : npmBin
const args = (isJS ? [npmBin] : []).concat(npmCommand)
- return spawn(cmd, args, { cwd, stdioString: true }, extra)
+ // when installing to run the `prepare` script for a git dep, we need
+ // to ensure that we don't run into a cycle of checking out packages
+ // in temp directories. this lets us link previously-seen repos that
+ // are also being prepared.
+
+ return spawn(cmd, args, { cwd, stdioString: true, env }, extra)
}
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/.editorconfig b/deps/npm/node_modules/pacote/node_modules/err-code/.editorconfig
new file mode 100644
index 00000000000000..829280bee1ac31
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*]
+indent_style = space
+indent_size = 4
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+
+[package.json]
+indent_size = 2
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/.eslintrc.json b/deps/npm/node_modules/pacote/node_modules/err-code/.eslintrc.json
new file mode 100644
index 00000000000000..4829595a424ed5
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/.eslintrc.json
@@ -0,0 +1,7 @@
+{
+ "root": true,
+ "extends": [
+ "@satazor/eslint-config/es6",
+ "@satazor/eslint-config/addons/node"
+ ]
+}
\ No newline at end of file
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/.travis.yml b/deps/npm/node_modules/pacote/node_modules/err-code/.travis.yml
new file mode 100644
index 00000000000000..b29cf66a2b3b3b
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/.travis.yml
@@ -0,0 +1,4 @@
+language: node_js
+node_js:
+ - "4"
+ - "6"
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/README.md b/deps/npm/node_modules/pacote/node_modules/err-code/README.md
new file mode 100644
index 00000000000000..5afdab00c93482
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/README.md
@@ -0,0 +1,70 @@
+# err-code
+
+[![NPM version][npm-image]][npm-url] [![Downloads][downloads-image]][npm-url] [![Build Status][travis-image]][travis-url] [![Dependency status][david-dm-image]][david-dm-url] [![Dev Dependency status][david-dm-dev-image]][david-dm-dev-url] [![Greenkeeper badge][greenkeeper-image]][greenkeeper-url]
+
+[npm-url]:https://npmjs.org/package/err-code
+[downloads-image]:http://img.shields.io/npm/dm/err-code.svg
+[npm-image]:http://img.shields.io/npm/v/err-code.svg
+[travis-url]:https://travis-ci.org/IndigoUnited/js-err-code
+[travis-image]:http://img.shields.io/travis/IndigoUnited/js-err-code/master.svg
+[david-dm-url]:https://david-dm.org/IndigoUnited/js-err-code
+[david-dm-image]:https://img.shields.io/david/IndigoUnited/js-err-code.svg
+[david-dm-dev-url]:https://david-dm.org/IndigoUnited/js-err-code?type=dev
+[david-dm-dev-image]:https://img.shields.io/david/dev/IndigoUnited/js-err-code.svg
+[greenkeeper-image]:https://badges.greenkeeper.io/IndigoUnited/js-err-code.svg
+[greenkeeper-url]:https://greenkeeper.io/
+
+Create new error instances with a code and additional properties.
+
+
+## Installation
+
+```console
+$ npm install err-code
+// or
+$ bower install err-code
+```
+
+The browser file is named index.umd.js which supports CommonJS, AMD and globals (errCode).
+
+
+## Why
+
+I find myself doing this repeatedly:
+
+```js
+var err = new Error('My message');
+err.code = 'SOMECODE';
+err.detail = 'Additional information about the error';
+throw err;
+```
+
+
+## Usage
+
+Simple usage.
+
+```js
+var errcode = require('err-code');
+
+// fill error with message + code
+throw errcode(new Error('My message'), 'ESOMECODE');
+// fill error with message + code + props
+throw errcode(new Error('My message'), 'ESOMECODE', { detail: 'Additional information about the error' });
+// fill error with message + props
+throw errcode(new Error('My message'), { detail: 'Additional information about the error' });
+```
+
+## Pre-existing fields
+
+If the passed `Error` already has a `.code` field, or fields specified in the third argument to `errcode` they will be overwritten, unless the fields are read only or otherwise throw during assignment in which case a new object will be created that shares a prototype chain with the original `Error`. The `.stack` and `.message` properties will be carried over from the original error and `.code` or any passed properties will be set on it.
+
+
+## Tests
+
+`$ npm test`
+
+
+## License
+
+Released under the [MIT License](http://www.opensource.org/licenses/mit-license.php).
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/bower.json b/deps/npm/node_modules/pacote/node_modules/err-code/bower.json
new file mode 100644
index 00000000000000..a39cb702cedb21
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/bower.json
@@ -0,0 +1,30 @@
+{
+ "name": "err-code",
+ "version": "1.1.1",
+ "description": "Create new error instances with a code and additional properties",
+ "main": "index.umd.js",
+ "homepage": "https://github.com/IndigoUnited/js-err-code",
+ "authors": [
+ "IndigoUnited (http://indigounited.com)"
+ ],
+ "moduleType": [
+ "amd",
+ "globals",
+ "node"
+ ],
+ "keywords": [
+ "error",
+ "err",
+ "code",
+ "properties",
+ "property"
+ ],
+ "license": "MIT",
+ "ignore": [
+ "**/.*",
+ "node_modules",
+ "bower_components",
+ "test",
+ "tests"
+ ]
+}
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/index.js b/deps/npm/node_modules/pacote/node_modules/err-code/index.js
new file mode 100644
index 00000000000000..9ff3e9c5de4c2c
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/index.js
@@ -0,0 +1,47 @@
+'use strict';
+
+function assign(obj, props) {
+ for (const key in props) {
+ Object.defineProperty(obj, key, {
+ value: props[key],
+ enumerable: true,
+ configurable: true,
+ });
+ }
+
+ return obj;
+}
+
+function createError(err, code, props) {
+ if (!err || typeof err === 'string') {
+ throw new TypeError('Please pass an Error to err-code');
+ }
+
+ if (!props) {
+ props = {};
+ }
+
+ if (typeof code === 'object') {
+ props = code;
+ code = undefined;
+ }
+
+ if (code != null) {
+ props.code = code;
+ }
+
+ try {
+ return assign(err, props);
+ } catch (_) {
+ props.message = err.message;
+ props.stack = err.stack;
+
+ const ErrClass = function () {};
+
+ ErrClass.prototype = Object.create(Object.getPrototypeOf(err));
+
+ return assign(new ErrClass(), props);
+ }
+}
+
+module.exports = createError;
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/index.umd.js b/deps/npm/node_modules/pacote/node_modules/err-code/index.umd.js
new file mode 100644
index 00000000000000..41007269d3d039
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/index.umd.js
@@ -0,0 +1,51 @@
+(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.errCode = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i index.umd.js"
+ },
+ "bugs": {
+ "url": "https://github.com/IndigoUnited/js-err-code/issues/"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git://github.com/IndigoUnited/js-err-code.git"
+ },
+ "keywords": [
+ "error",
+ "err",
+ "code",
+ "properties",
+ "property"
+ ],
+ "author": "IndigoUnited (http://indigounited.com)",
+ "license": "MIT",
+ "devDependencies": {
+ "@satazor/eslint-config": "^3.0.0",
+ "browserify": "^16.5.1",
+ "eslint": "^7.2.0",
+ "expect.js": "^0.3.1",
+ "mocha": "^8.0.1"
+ }
+}
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/test/.eslintrc.json b/deps/npm/node_modules/pacote/node_modules/err-code/test/.eslintrc.json
new file mode 100644
index 00000000000000..f9fbb2d6ce6ab8
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/test/.eslintrc.json
@@ -0,0 +1,5 @@
+{
+ "env": {
+ "mocha": true
+ }
+}
\ No newline at end of file
diff --git a/deps/npm/node_modules/pacote/node_modules/err-code/test/test.js b/deps/npm/node_modules/pacote/node_modules/err-code/test/test.js
new file mode 100644
index 00000000000000..22ba0a8a1a8c1f
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/err-code/test/test.js
@@ -0,0 +1,159 @@
+'use strict';
+
+const errcode = require('../index');
+const expect = require('expect.js');
+
+describe('errcode', () => {
+ describe('string as first argument', () => {
+ it('should throw an error', () => {
+ expect(() => { errcode('my message'); }).to.throwError((err) => {
+ expect(err).to.be.a(TypeError);
+ });
+ });
+ });
+
+ describe('error as first argument', () => {
+ it('should accept an error and do nothing', () => {
+ const myErr = new Error('my message');
+ const err = errcode(myErr);
+
+ expect(err).to.be(myErr);
+ expect(err.hasOwnProperty(err.code)).to.be(false);
+ });
+
+ it('should accept an error and add a code', () => {
+ const myErr = new Error('my message');
+ const err = errcode(myErr, 'ESOME');
+
+ expect(err).to.be(myErr);
+ expect(err.code).to.be('ESOME');
+ });
+
+ it('should accept an error object and add code & properties', () => {
+ const myErr = new Error('my message');
+ const err = errcode(myErr, 'ESOME', { foo: 'bar', bar: 'foo' });
+
+ expect(err).to.be.an(Error);
+ expect(err.code).to.be('ESOME');
+ expect(err.foo).to.be('bar');
+ expect(err.bar).to.be('foo');
+ });
+
+ it('should create an error object without code but with properties', () => {
+ const myErr = new Error('my message');
+ const err = errcode(myErr, { foo: 'bar', bar: 'foo' });
+
+ expect(err).to.be.an(Error);
+ expect(err.code).to.be(undefined);
+ expect(err.foo).to.be('bar');
+ expect(err.bar).to.be('foo');
+ });
+
+ it('should set a non-writable field', () => {
+ const myErr = new Error('my message');
+
+ Object.defineProperty(myErr, 'code', {
+ value: 'derp',
+ writable: false,
+ });
+ const err = errcode(myErr, 'ERR_WAT');
+
+ expect(err).to.be.an(Error);
+ expect(err.stack).to.equal(myErr.stack);
+ expect(err.code).to.be('ERR_WAT');
+ });
+
+ it('should add a code to frozen object', () => {
+ const myErr = new Error('my message');
+ const err = errcode(Object.freeze(myErr), 'ERR_WAT');
+
+ expect(err).to.be.an(Error);
+ expect(err.stack).to.equal(myErr.stack);
+ expect(err.code).to.be('ERR_WAT');
+ });
+
+ it('should to set a field that throws at assignment time', () => {
+ const myErr = new Error('my message');
+
+ Object.defineProperty(myErr, 'code', {
+ enumerable: true,
+ set() {
+ throw new Error('Nope!');
+ },
+ get() {
+ return 'derp';
+ },
+ });
+ const err = errcode(myErr, 'ERR_WAT');
+
+ expect(err).to.be.an(Error);
+ expect(err.stack).to.equal(myErr.stack);
+ expect(err.code).to.be('ERR_WAT');
+ });
+
+ it('should retain error type', () => {
+ const myErr = new TypeError('my message');
+
+ Object.defineProperty(myErr, 'code', {
+ value: 'derp',
+ writable: false,
+ });
+ const err = errcode(myErr, 'ERR_WAT');
+
+ expect(err).to.be.a(TypeError);
+ expect(err.stack).to.equal(myErr.stack);
+ expect(err.code).to.be('ERR_WAT');
+ });
+
+ it('should add a code to a class that extends Error', () => {
+ class CustomError extends Error {
+ set code(val) {
+ throw new Error('Nope!');
+ }
+ }
+
+ const myErr = new CustomError('my message');
+
+ Object.defineProperty(myErr, 'code', {
+ value: 'derp',
+ writable: false,
+ configurable: false,
+ });
+ const err = errcode(myErr, 'ERR_WAT');
+
+ expect(err).to.be.a(CustomError);
+ expect(err.stack).to.equal(myErr.stack);
+ expect(err.code).to.be('ERR_WAT');
+
+ // original prototype chain should be intact
+ expect(() => {
+ const otherErr = new CustomError('my message');
+
+ otherErr.code = 'derp';
+ }).to.throwError();
+ });
+
+ it('should support errors that are not Errors', () => {
+ const err = errcode({
+ message: 'Oh noes!',
+ }, 'ERR_WAT');
+
+ expect(err.message).to.be('Oh noes!');
+ expect(err.code).to.be('ERR_WAT');
+ });
+ });
+
+ describe('falsy first arguments', () => {
+ it('should not allow passing null as the first argument', () => {
+ expect(() => { errcode(null); }).to.throwError((err) => {
+ expect(err).to.be.a(TypeError);
+ });
+ });
+
+ it('should not allow passing undefined as the first argument', () => {
+ expect(() => { errcode(undefined); }).to.throwError((err) => {
+ expect(err).to.be.a(TypeError);
+ });
+ });
+ });
+});
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/.editorconfig b/deps/npm/node_modules/pacote/node_modules/promise-retry/.editorconfig
new file mode 100644
index 00000000000000..8bc4f108d549f1
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/.editorconfig
@@ -0,0 +1,15 @@
+root = true
+
+[*]
+indent_style = space
+indent_size = 4
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+
+[*.md]
+trim_trailing_whitespace = false
+
+[package.json]
+indent_size = 2
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/.travis.yml b/deps/npm/node_modules/pacote/node_modules/promise-retry/.travis.yml
new file mode 100644
index 00000000000000..e2d26a9cad62b0
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/.travis.yml
@@ -0,0 +1,4 @@
+language: node_js
+node_js:
+ - "10"
+ - "12"
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/LICENSE b/deps/npm/node_modules/pacote/node_modules/promise-retry/LICENSE
new file mode 100644
index 00000000000000..db5e914de1f585
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 IndigoUnited
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/README.md b/deps/npm/node_modules/pacote/node_modules/promise-retry/README.md
new file mode 100644
index 00000000000000..10c2b6285b797b
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/README.md
@@ -0,0 +1,94 @@
+# node-promise-retry
+
+[![NPM version][npm-image]][npm-url] [![Downloads][downloads-image]][npm-url] [![Build Status][travis-image]][travis-url] [![Dependency status][david-dm-image]][david-dm-url] [![Dev Dependency status][david-dm-dev-image]][david-dm-dev-url] [![Greenkeeper badge][greenkeeper-image]][greenkeeper-url]
+
+[npm-url]:https://npmjs.org/package/promise-retry
+[downloads-image]:http://img.shields.io/npm/dm/promise-retry.svg
+[npm-image]:http://img.shields.io/npm/v/promise-retry.svg
+[travis-url]:https://travis-ci.org/IndigoUnited/node-promise-retry
+[travis-image]:http://img.shields.io/travis/IndigoUnited/node-promise-retry/master.svg
+[david-dm-url]:https://david-dm.org/IndigoUnited/node-promise-retry
+[david-dm-image]:https://img.shields.io/david/IndigoUnited/node-promise-retry.svg
+[david-dm-dev-url]:https://david-dm.org/IndigoUnited/node-promise-retry?type=dev
+[david-dm-dev-image]:https://img.shields.io/david/dev/IndigoUnited/node-promise-retry.svg
+[greenkeeper-image]:https://badges.greenkeeper.io/IndigoUnited/node-promise-retry.svg
+[greenkeeper-url]:https://greenkeeper.io/
+
+Retries a function that returns a promise, leveraging the power of the [retry](https://github.com/tim-kos/node-retry) module to the promises world.
+
+There's already some modules that are able to retry functions that return promises but
+they were rather difficult to use or do not offer an easy way to do conditional retries.
+
+
+## Installation
+
+`$ npm install promise-retry`
+
+
+## Usage
+
+### promiseRetry(fn, [options])
+
+Calls `fn` until the returned promise ends up fulfilled or rejected with an error different than
+a `retry` error.
+The `options` argument is an object which maps to the [retry](https://github.com/tim-kos/node-retry) module options:
+
+- `retries`: The maximum amount of times to retry the operation. Default is `10`.
+- `factor`: The exponential factor to use. Default is `2`.
+- `minTimeout`: The number of milliseconds before starting the first retry. Default is `1000`.
+- `maxTimeout`: The maximum number of milliseconds between two retries. Default is `Infinity`.
+- `randomize`: Randomizes the timeouts by multiplying with a factor between `1` to `2`. Default is `false`.
+
+
+The `fn` function will receive a `retry` function as its first argument that should be called with an error whenever you want to retry `fn`. The `retry` function will always throw an error.
+If there are retries left, it will throw a special `retry` error that will be handled internally to call `fn` again.
+If there are no retries left, it will throw the actual error passed to it.
+
+If you prefer, you can pass the options first using the alternative function signature `promiseRetry([options], fn)`.
+
+## Example
+```js
+var promiseRetry = require('promise-retry');
+
+// Simple example
+promiseRetry(function (retry, number) {
+ console.log('attempt number', number);
+
+ return doSomething()
+ .catch(retry);
+})
+.then(function (value) {
+ // ..
+}, function (err) {
+ // ..
+});
+
+// Conditional example
+promiseRetry(function (retry, number) {
+ console.log('attempt number', number);
+
+ return doSomething()
+ .catch(function (err) {
+ if (err.code === 'ETIMEDOUT') {
+ retry(err);
+ }
+
+ throw err;
+ });
+})
+.then(function (value) {
+ // ..
+}, function (err) {
+ // ..
+});
+```
+
+
+## Tests
+
+`$ npm test`
+
+
+## License
+
+Released under the [MIT License](http://www.opensource.org/licenses/mit-license.php).
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/index.js b/deps/npm/node_modules/pacote/node_modules/promise-retry/index.js
new file mode 100644
index 00000000000000..5df48ae91602d6
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/index.js
@@ -0,0 +1,52 @@
+'use strict';
+
+var errcode = require('err-code');
+var retry = require('retry');
+
+var hasOwn = Object.prototype.hasOwnProperty;
+
+function isRetryError(err) {
+ return err && err.code === 'EPROMISERETRY' && hasOwn.call(err, 'retried');
+}
+
+function promiseRetry(fn, options) {
+ var temp;
+ var operation;
+
+ if (typeof fn === 'object' && typeof options === 'function') {
+ // Swap options and fn when using alternate signature (options, fn)
+ temp = options;
+ options = fn;
+ fn = temp;
+ }
+
+ operation = retry.operation(options);
+
+ return new Promise(function (resolve, reject) {
+ operation.attempt(function (number) {
+ Promise.resolve()
+ .then(function () {
+ return fn(function (err) {
+ if (isRetryError(err)) {
+ err = err.retried;
+ }
+
+ throw errcode(new Error('Retrying'), 'EPROMISERETRY', { retried: err });
+ }, number);
+ })
+ .then(resolve, function (err) {
+ if (isRetryError(err)) {
+ err = err.retried;
+
+ if (operation.retry(err || new Error())) {
+ return;
+ }
+ }
+
+ reject(err);
+ });
+ });
+ });
+}
+
+module.exports = promiseRetry;
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/package.json b/deps/npm/node_modules/pacote/node_modules/promise-retry/package.json
new file mode 100644
index 00000000000000..6842de823fd198
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/package.json
@@ -0,0 +1,37 @@
+{
+ "name": "promise-retry",
+ "version": "2.0.1",
+ "description": "Retries a function that returns a promise, leveraging the power of the retry module.",
+ "main": "index.js",
+ "scripts": {
+ "test": "mocha --bail -t 10000"
+ },
+ "bugs": {
+ "url": "https://github.com/IndigoUnited/node-promise-retry/issues/"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git://github.com/IndigoUnited/node-promise-retry.git"
+ },
+ "keywords": [
+ "retry",
+ "promise",
+ "backoff",
+ "repeat",
+ "replay"
+ ],
+ "author": "IndigoUnited (http://indigounited.com)",
+ "license": "MIT",
+ "devDependencies": {
+ "expect.js": "^0.3.1",
+ "mocha": "^8.0.1",
+ "sleep-promise": "^8.0.1"
+ },
+ "dependencies": {
+ "err-code": "^2.0.2",
+ "retry": "^0.12.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+}
diff --git a/deps/npm/node_modules/pacote/node_modules/promise-retry/test/test.js b/deps/npm/node_modules/pacote/node_modules/promise-retry/test/test.js
new file mode 100644
index 00000000000000..466b0991e0f558
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/promise-retry/test/test.js
@@ -0,0 +1,263 @@
+'use strict';
+
+var expect = require('expect.js');
+var promiseRetry = require('../');
+var promiseDelay = require('sleep-promise');
+
+describe('promise-retry', function () {
+ it('should call fn again if retry was called', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry) {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ if (count <= 2) {
+ retry(new Error('foo'));
+ }
+
+ return 'final';
+ });
+ }, { factor: 1 })
+ .then(function (value) {
+ expect(value).to.be('final');
+ expect(count).to.be(3);
+ }, function () {
+ throw new Error('should not fail');
+ });
+ });
+
+ it('should call fn with the attempt number', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry, number) {
+ count += 1;
+ expect(count).to.equal(number);
+
+ return promiseDelay(10)
+ .then(function () {
+ if (count <= 2) {
+ retry(new Error('foo'));
+ }
+
+ return 'final';
+ });
+ }, { factor: 1 })
+ .then(function (value) {
+ expect(value).to.be('final');
+ expect(count).to.be(3);
+ }, function () {
+ throw new Error('should not fail');
+ });
+ });
+
+ it('should not retry on fulfillment if retry was not called', function () {
+ var count = 0;
+
+ return promiseRetry(function () {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ return 'final';
+ });
+ })
+ .then(function (value) {
+ expect(value).to.be('final');
+ expect(count).to.be(1);
+ }, function () {
+ throw new Error('should not fail');
+ });
+ });
+
+ it('should not retry on rejection if retry was not called', function () {
+ var count = 0;
+
+ return promiseRetry(function () {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ throw new Error('foo');
+ });
+ })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err.message).to.be('foo');
+ expect(count).to.be(1);
+ });
+ });
+
+ it('should not retry on rejection if nr of retries is 0', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry) {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ throw new Error('foo');
+ })
+ .catch(retry);
+ }, { retries : 0 })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err.message).to.be('foo');
+ expect(count).to.be(1);
+ });
+ });
+
+ it('should reject the promise if the retries were exceeded', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry) {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ throw new Error('foo');
+ })
+ .catch(retry);
+ }, { retries: 2, factor: 1 })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err.message).to.be('foo');
+ expect(count).to.be(3);
+ });
+ });
+
+ it('should pass options to the underlying retry module', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry) {
+ return promiseDelay(10)
+ .then(function () {
+ if (count < 2) {
+ count += 1;
+ retry(new Error('foo'));
+ }
+
+ return 'final';
+ });
+ }, { retries: 1, factor: 1 })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err.message).to.be('foo');
+ });
+ });
+
+ it('should convert direct fulfillments into promises', function () {
+ return promiseRetry(function () {
+ return 'final';
+ }, { factor: 1 })
+ .then(function (value) {
+ expect(value).to.be('final');
+ }, function () {
+ throw new Error('should not fail');
+ });
+ });
+
+ it('should convert direct rejections into promises', function () {
+ promiseRetry(function () {
+ throw new Error('foo');
+ }, { retries: 1, factor: 1 })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err.message).to.be('foo');
+ });
+ });
+
+ it('should not crash on undefined rejections', function () {
+ return promiseRetry(function () {
+ throw undefined;
+ }, { retries: 1, factor: 1 })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err).to.be(undefined);
+ })
+ .then(function () {
+ return promiseRetry(function (retry) {
+ retry();
+ }, { retries: 1, factor: 1 });
+ })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err).to.be(undefined);
+ });
+ });
+
+ it('should retry if retry() was called with undefined', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry) {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ if (count <= 2) {
+ retry();
+ }
+
+ return 'final';
+ });
+ }, { factor: 1 })
+ .then(function (value) {
+ expect(value).to.be('final');
+ expect(count).to.be(3);
+ }, function () {
+ throw new Error('should not fail');
+ });
+ });
+
+ it('should work with several retries in the same chain', function () {
+ var count = 0;
+
+ return promiseRetry(function (retry) {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ retry(new Error('foo'));
+ })
+ .catch(function (err) {
+ retry(err);
+ });
+ }, { retries: 1, factor: 1 })
+ .then(function () {
+ throw new Error('should not succeed');
+ }, function (err) {
+ expect(err.message).to.be('foo');
+ expect(count).to.be(2);
+ });
+ });
+
+ it('should allow options to be passed first', function () {
+ var count = 0;
+
+ return promiseRetry({ factor: 1 }, function (retry) {
+ count += 1;
+
+ return promiseDelay(10)
+ .then(function () {
+ if (count <= 2) {
+ retry(new Error('foo'));
+ }
+
+ return 'final';
+ });
+ }).then(function (value) {
+ expect(value).to.be('final');
+ expect(count).to.be(3);
+ }, function () {
+ throw new Error('should not fail');
+ });
+ });
+});
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/.npmignore b/deps/npm/node_modules/pacote/node_modules/retry/.npmignore
new file mode 100644
index 00000000000000..432f2855d6839d
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/.npmignore
@@ -0,0 +1,3 @@
+/node_modules/*
+npm-debug.log
+coverage
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/.travis.yml b/deps/npm/node_modules/pacote/node_modules/retry/.travis.yml
new file mode 100644
index 00000000000000..bcde2122b90065
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/.travis.yml
@@ -0,0 +1,15 @@
+language: node_js
+node_js:
+ - "4"
+before_install:
+ - pip install --user codecov
+after_success:
+ - codecov --file coverage/lcov.info --disable search
+# travis encrypt [subdomain]:[api token]@[room id]
+# notifications:
+# email: false
+# campfire:
+# rooms:
+# secure: xyz
+# on_failure: always
+# on_success: always
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/License b/deps/npm/node_modules/pacote/node_modules/retry/License
new file mode 100644
index 00000000000000..0b58de379fb308
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/License
@@ -0,0 +1,21 @@
+Copyright (c) 2011:
+Tim Koschützki (tim@debuggable.com)
+Felix Geisendörfer (felix@debuggable.com)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/Makefile b/deps/npm/node_modules/pacote/node_modules/retry/Makefile
new file mode 100644
index 00000000000000..1968d8ff8b07bc
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/Makefile
@@ -0,0 +1,18 @@
+SHELL := /bin/bash
+
+release-major: test
+ npm version major -m "Release %s"
+ git push
+ npm publish
+
+release-minor: test
+ npm version minor -m "Release %s"
+ git push
+ npm publish
+
+release-patch: test
+ npm version patch -m "Release %s"
+ git push
+ npm publish
+
+.PHONY: test release-major release-minor release-patch
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/README.md b/deps/npm/node_modules/pacote/node_modules/retry/README.md
new file mode 100644
index 00000000000000..1c888deee9c9d4
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/README.md
@@ -0,0 +1,227 @@
+
+[![Build Status](https://secure.travis-ci.org/tim-kos/node-retry.png?branch=master)](http://travis-ci.org/tim-kos/node-retry "Check this project's build status on TravisCI")
+[![codecov](https://codecov.io/gh/tim-kos/node-retry/branch/master/graph/badge.svg)](https://codecov.io/gh/tim-kos/node-retry)
+
+
+# retry
+
+Abstraction for exponential and custom retry strategies for failed operations.
+
+## Installation
+
+ npm install retry
+
+## Current Status
+
+This module has been tested and is ready to be used.
+
+## Tutorial
+
+The example below will retry a potentially failing `dns.resolve` operation
+`10` times using an exponential backoff strategy. With the default settings, this
+means the last attempt is made after `17 minutes and 3 seconds`.
+
+``` javascript
+var dns = require('dns');
+var retry = require('retry');
+
+function faultTolerantResolve(address, cb) {
+ var operation = retry.operation();
+
+ operation.attempt(function(currentAttempt) {
+ dns.resolve(address, function(err, addresses) {
+ if (operation.retry(err)) {
+ return;
+ }
+
+ cb(err ? operation.mainError() : null, addresses);
+ });
+ });
+}
+
+faultTolerantResolve('nodejs.org', function(err, addresses) {
+ console.log(err, addresses);
+});
+```
+
+Of course you can also configure the factors that go into the exponential
+backoff. See the API documentation below for all available settings.
+currentAttempt is an int representing the number of attempts so far.
+
+``` javascript
+var operation = retry.operation({
+ retries: 5,
+ factor: 3,
+ minTimeout: 1 * 1000,
+ maxTimeout: 60 * 1000,
+ randomize: true,
+});
+```
+
+## API
+
+### retry.operation([options])
+
+Creates a new `RetryOperation` object. `options` is the same as `retry.timeouts()`'s `options`, with two additions:
+
+* `forever`: Whether to retry forever, defaults to `false`.
+* `unref`: Whether to [unref](https://nodejs.org/api/timers.html#timers_unref) the setTimeout's, defaults to `false`.
+* `maxRetryTime`: The maximum time (in milliseconds) that the retried operation is allowed to run. Default is `Infinity`.
+
+### retry.timeouts([options])
+
+Returns an array of timeouts. All time `options` and return values are in
+milliseconds. If `options` is an array, a copy of that array is returned.
+
+`options` is a JS object that can contain any of the following keys:
+
+* `retries`: The maximum amount of times to retry the operation. Default is `10`. Seting this to `1` means `do it once, then retry it once`.
+* `factor`: The exponential factor to use. Default is `2`.
+* `minTimeout`: The number of milliseconds before starting the first retry. Default is `1000`.
+* `maxTimeout`: The maximum number of milliseconds between two retries. Default is `Infinity`.
+* `randomize`: Randomizes the timeouts by multiplying with a factor between `1` to `2`. Default is `false`.
+
+The formula used to calculate the individual timeouts is:
+
+```
+Math.min(random * minTimeout * Math.pow(factor, attempt), maxTimeout)
+```
+
+Have a look at [this article][article] for a better explanation of approach.
+
+If you want to tune your `factor` / `times` settings to attempt the last retry
+after a certain amount of time, you can use wolfram alpha. For example in order
+to tune for `10` attempts in `5 minutes`, you can use this equation:
+
+![screenshot](https://github.com/tim-kos/node-retry/raw/master/equation.gif)
+
+Explaining the various values from left to right:
+
+* `k = 0 ... 9`: The `retries` value (10)
+* `1000`: The `minTimeout` value in ms (1000)
+* `x^k`: No need to change this, `x` will be your resulting factor
+* `5 * 60 * 1000`: The desired total amount of time for retrying in ms (5 minutes)
+
+To make this a little easier for you, use wolfram alpha to do the calculations:
+
+
+
+[article]: http://dthain.blogspot.com/2009/02/exponential-backoff-in-distributed.html
+
+### retry.createTimeout(attempt, opts)
+
+Returns a new `timeout` (integer in milliseconds) based on the given parameters.
+
+`attempt` is an integer representing for which retry the timeout should be calculated. If your retry operation was executed 4 times you had one attempt and 3 retries. If you then want to calculate a new timeout, you should set `attempt` to 4 (attempts are zero-indexed).
+
+`opts` can include `factor`, `minTimeout`, `randomize` (boolean) and `maxTimeout`. They are documented above.
+
+`retry.createTimeout()` is used internally by `retry.timeouts()` and is public for you to be able to create your own timeouts for reinserting an item, see [issue #13](https://github.com/tim-kos/node-retry/issues/13).
+
+### retry.wrap(obj, [options], [methodNames])
+
+Wrap all functions of the `obj` with retry. Optionally you can pass operation options and
+an array of method names which need to be wrapped.
+
+```
+retry.wrap(obj)
+
+retry.wrap(obj, ['method1', 'method2'])
+
+retry.wrap(obj, {retries: 3})
+
+retry.wrap(obj, {retries: 3}, ['method1', 'method2'])
+```
+The `options` object can take any options that the usual call to `retry.operation` can take.
+
+### new RetryOperation(timeouts, [options])
+
+Creates a new `RetryOperation` where `timeouts` is an array where each value is
+a timeout given in milliseconds.
+
+Available options:
+* `forever`: Whether to retry forever, defaults to `false`.
+* `unref`: Wether to [unref](https://nodejs.org/api/timers.html#timers_unref) the setTimeout's, defaults to `false`.
+
+If `forever` is true, the following changes happen:
+* `RetryOperation.errors()` will only output an array of one item: the last error.
+* `RetryOperation` will repeatedly use the `timeouts` array. Once all of its timeouts have been used up, it restarts with the first timeout, then uses the second and so on.
+
+#### retryOperation.errors()
+
+Returns an array of all errors that have been passed to `retryOperation.retry()` so far. The
+returning array has the errors ordered chronologically based on when they were passed to
+`retryOperation.retry()`, which means the first passed error is at index zero and the last is
+at the last index.
+
+#### retryOperation.mainError()
+
+A reference to the error object that occured most frequently. Errors are
+compared using the `error.message` property.
+
+If multiple error messages occured the same amount of time, the last error
+object with that message is returned.
+
+If no errors occured so far, the value is `null`.
+
+#### retryOperation.attempt(fn, timeoutOps)
+
+Defines the function `fn` that is to be retried and executes it for the first
+time right away. The `fn` function can receive an optional `currentAttempt` callback that represents the number of attempts to execute `fn` so far.
+
+Optionally defines `timeoutOps` which is an object having a property `timeout` in miliseconds and a property `cb` callback function.
+Whenever your retry operation takes longer than `timeout` to execute, the timeout callback function `cb` is called.
+
+
+#### retryOperation.try(fn)
+
+This is an alias for `retryOperation.attempt(fn)`. This is deprecated. Please use `retryOperation.attempt(fn)` instead.
+
+#### retryOperation.start(fn)
+
+This is an alias for `retryOperation.attempt(fn)`. This is deprecated. Please use `retryOperation.attempt(fn)` instead.
+
+#### retryOperation.retry(error)
+
+Returns `false` when no `error` value is given, or the maximum amount of retries
+has been reached.
+
+Otherwise it returns `true`, and retries the operation after the timeout for
+the current attempt number.
+
+#### retryOperation.stop()
+
+Allows you to stop the operation being retried. Useful for aborting the operation on a fatal error etc.
+
+#### retryOperation.reset()
+
+Resets the internal state of the operation object, so that you can call `attempt()` again as if this was a new operation object.
+
+#### retryOperation.attempts()
+
+Returns an int representing the number of attempts it took to call `fn` before it was successful.
+
+## License
+
+retry is licensed under the MIT license.
+
+
+# Changelog
+
+0.10.0 Adding `stop` functionality, thanks to @maxnachlinger.
+
+0.9.0 Adding `unref` functionality, thanks to @satazor.
+
+0.8.0 Implementing retry.wrap.
+
+0.7.0 Some bug fixes and made retry.createTimeout() public. Fixed issues [#10](https://github.com/tim-kos/node-retry/issues/10), [#12](https://github.com/tim-kos/node-retry/issues/12), and [#13](https://github.com/tim-kos/node-retry/issues/13).
+
+0.6.0 Introduced optional timeOps parameter for the attempt() function which is an object having a property timeout in milliseconds and a property cb callback function. Whenever your retry operation takes longer than timeout to execute, the timeout callback function cb is called.
+
+0.5.0 Some minor refactoring.
+
+0.4.0 Changed retryOperation.try() to retryOperation.attempt(). Deprecated the aliases start() and try() for it.
+
+0.3.0 Added retryOperation.start() which is an alias for retryOperation.try().
+
+0.2.0 Added attempts() function and parameter to retryOperation.try() representing the number of attempts it took to call fn().
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/equation.gif b/deps/npm/node_modules/pacote/node_modules/retry/equation.gif
new file mode 100644
index 00000000000000..97107237ba19f5
Binary files /dev/null and b/deps/npm/node_modules/pacote/node_modules/retry/equation.gif differ
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/example/dns.js b/deps/npm/node_modules/pacote/node_modules/retry/example/dns.js
new file mode 100644
index 00000000000000..446729b6f9af6b
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/example/dns.js
@@ -0,0 +1,31 @@
+var dns = require('dns');
+var retry = require('../lib/retry');
+
+function faultTolerantResolve(address, cb) {
+ var opts = {
+ retries: 2,
+ factor: 2,
+ minTimeout: 1 * 1000,
+ maxTimeout: 2 * 1000,
+ randomize: true
+ };
+ var operation = retry.operation(opts);
+
+ operation.attempt(function(currentAttempt) {
+ dns.resolve(address, function(err, addresses) {
+ if (operation.retry(err)) {
+ return;
+ }
+
+ cb(operation.mainError(), operation.errors(), addresses);
+ });
+ });
+}
+
+faultTolerantResolve('nodejs.org', function(err, errors, addresses) {
+ console.warn('err:');
+ console.log(err);
+
+ console.warn('addresses:');
+ console.log(addresses);
+});
\ No newline at end of file
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/example/stop.js b/deps/npm/node_modules/pacote/node_modules/retry/example/stop.js
new file mode 100644
index 00000000000000..e1ceafeebafc51
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/example/stop.js
@@ -0,0 +1,40 @@
+var retry = require('../lib/retry');
+
+function attemptAsyncOperation(someInput, cb) {
+ var opts = {
+ retries: 2,
+ factor: 2,
+ minTimeout: 1 * 1000,
+ maxTimeout: 2 * 1000,
+ randomize: true
+ };
+ var operation = retry.operation(opts);
+
+ operation.attempt(function(currentAttempt) {
+ failingAsyncOperation(someInput, function(err, result) {
+
+ if (err && err.message === 'A fatal error') {
+ operation.stop();
+ return cb(err);
+ }
+
+ if (operation.retry(err)) {
+ return;
+ }
+
+ cb(operation.mainError(), operation.errors(), result);
+ });
+ });
+}
+
+attemptAsyncOperation('test input', function(err, errors, result) {
+ console.warn('err:');
+ console.log(err);
+
+ console.warn('result:');
+ console.log(result);
+});
+
+function failingAsyncOperation(input, cb) {
+ return setImmediate(cb.bind(null, new Error('A fatal error')));
+}
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/index.js b/deps/npm/node_modules/pacote/node_modules/retry/index.js
new file mode 100644
index 00000000000000..ee62f3a112c28b
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/index.js
@@ -0,0 +1 @@
+module.exports = require('./lib/retry');
\ No newline at end of file
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/lib/retry.js b/deps/npm/node_modules/pacote/node_modules/retry/lib/retry.js
new file mode 100644
index 00000000000000..dcb57680727948
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/lib/retry.js
@@ -0,0 +1,100 @@
+var RetryOperation = require('./retry_operation');
+
+exports.operation = function(options) {
+ var timeouts = exports.timeouts(options);
+ return new RetryOperation(timeouts, {
+ forever: options && options.forever,
+ unref: options && options.unref,
+ maxRetryTime: options && options.maxRetryTime
+ });
+};
+
+exports.timeouts = function(options) {
+ if (options instanceof Array) {
+ return [].concat(options);
+ }
+
+ var opts = {
+ retries: 10,
+ factor: 2,
+ minTimeout: 1 * 1000,
+ maxTimeout: Infinity,
+ randomize: false
+ };
+ for (var key in options) {
+ opts[key] = options[key];
+ }
+
+ if (opts.minTimeout > opts.maxTimeout) {
+ throw new Error('minTimeout is greater than maxTimeout');
+ }
+
+ var timeouts = [];
+ for (var i = 0; i < opts.retries; i++) {
+ timeouts.push(this.createTimeout(i, opts));
+ }
+
+ if (options && options.forever && !timeouts.length) {
+ timeouts.push(this.createTimeout(i, opts));
+ }
+
+ // sort the array numerically ascending
+ timeouts.sort(function(a,b) {
+ return a - b;
+ });
+
+ return timeouts;
+};
+
+exports.createTimeout = function(attempt, opts) {
+ var random = (opts.randomize)
+ ? (Math.random() + 1)
+ : 1;
+
+ var timeout = Math.round(random * opts.minTimeout * Math.pow(opts.factor, attempt));
+ timeout = Math.min(timeout, opts.maxTimeout);
+
+ return timeout;
+};
+
+exports.wrap = function(obj, options, methods) {
+ if (options instanceof Array) {
+ methods = options;
+ options = null;
+ }
+
+ if (!methods) {
+ methods = [];
+ for (var key in obj) {
+ if (typeof obj[key] === 'function') {
+ methods.push(key);
+ }
+ }
+ }
+
+ for (var i = 0; i < methods.length; i++) {
+ var method = methods[i];
+ var original = obj[method];
+
+ obj[method] = function retryWrapper(original) {
+ var op = exports.operation(options);
+ var args = Array.prototype.slice.call(arguments, 1);
+ var callback = args.pop();
+
+ args.push(function(err) {
+ if (op.retry(err)) {
+ return;
+ }
+ if (err) {
+ arguments[0] = op.mainError();
+ }
+ callback.apply(this, arguments);
+ });
+
+ op.attempt(function() {
+ original.apply(obj, args);
+ });
+ }.bind(obj, original);
+ obj[method].options = options;
+ }
+};
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/lib/retry_operation.js b/deps/npm/node_modules/pacote/node_modules/retry/lib/retry_operation.js
new file mode 100644
index 00000000000000..1e564696fe7e07
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/lib/retry_operation.js
@@ -0,0 +1,158 @@
+function RetryOperation(timeouts, options) {
+ // Compatibility for the old (timeouts, retryForever) signature
+ if (typeof options === 'boolean') {
+ options = { forever: options };
+ }
+
+ this._originalTimeouts = JSON.parse(JSON.stringify(timeouts));
+ this._timeouts = timeouts;
+ this._options = options || {};
+ this._maxRetryTime = options && options.maxRetryTime || Infinity;
+ this._fn = null;
+ this._errors = [];
+ this._attempts = 1;
+ this._operationTimeout = null;
+ this._operationTimeoutCb = null;
+ this._timeout = null;
+ this._operationStart = null;
+
+ if (this._options.forever) {
+ this._cachedTimeouts = this._timeouts.slice(0);
+ }
+}
+module.exports = RetryOperation;
+
+RetryOperation.prototype.reset = function() {
+ this._attempts = 1;
+ this._timeouts = this._originalTimeouts;
+}
+
+RetryOperation.prototype.stop = function() {
+ if (this._timeout) {
+ clearTimeout(this._timeout);
+ }
+
+ this._timeouts = [];
+ this._cachedTimeouts = null;
+};
+
+RetryOperation.prototype.retry = function(err) {
+ if (this._timeout) {
+ clearTimeout(this._timeout);
+ }
+
+ if (!err) {
+ return false;
+ }
+ var currentTime = new Date().getTime();
+ if (err && currentTime - this._operationStart >= this._maxRetryTime) {
+ this._errors.unshift(new Error('RetryOperation timeout occurred'));
+ return false;
+ }
+
+ this._errors.push(err);
+
+ var timeout = this._timeouts.shift();
+ if (timeout === undefined) {
+ if (this._cachedTimeouts) {
+ // retry forever, only keep last error
+ this._errors.splice(this._errors.length - 1, this._errors.length);
+ this._timeouts = this._cachedTimeouts.slice(0);
+ timeout = this._timeouts.shift();
+ } else {
+ return false;
+ }
+ }
+
+ var self = this;
+ var timer = setTimeout(function() {
+ self._attempts++;
+
+ if (self._operationTimeoutCb) {
+ self._timeout = setTimeout(function() {
+ self._operationTimeoutCb(self._attempts);
+ }, self._operationTimeout);
+
+ if (self._options.unref) {
+ self._timeout.unref();
+ }
+ }
+
+ self._fn(self._attempts);
+ }, timeout);
+
+ if (this._options.unref) {
+ timer.unref();
+ }
+
+ return true;
+};
+
+RetryOperation.prototype.attempt = function(fn, timeoutOps) {
+ this._fn = fn;
+
+ if (timeoutOps) {
+ if (timeoutOps.timeout) {
+ this._operationTimeout = timeoutOps.timeout;
+ }
+ if (timeoutOps.cb) {
+ this._operationTimeoutCb = timeoutOps.cb;
+ }
+ }
+
+ var self = this;
+ if (this._operationTimeoutCb) {
+ this._timeout = setTimeout(function() {
+ self._operationTimeoutCb();
+ }, self._operationTimeout);
+ }
+
+ this._operationStart = new Date().getTime();
+
+ this._fn(this._attempts);
+};
+
+RetryOperation.prototype.try = function(fn) {
+ console.log('Using RetryOperation.try() is deprecated');
+ this.attempt(fn);
+};
+
+RetryOperation.prototype.start = function(fn) {
+ console.log('Using RetryOperation.start() is deprecated');
+ this.attempt(fn);
+};
+
+RetryOperation.prototype.start = RetryOperation.prototype.try;
+
+RetryOperation.prototype.errors = function() {
+ return this._errors;
+};
+
+RetryOperation.prototype.attempts = function() {
+ return this._attempts;
+};
+
+RetryOperation.prototype.mainError = function() {
+ if (this._errors.length === 0) {
+ return null;
+ }
+
+ var counts = {};
+ var mainError = null;
+ var mainErrorCount = 0;
+
+ for (var i = 0; i < this._errors.length; i++) {
+ var error = this._errors[i];
+ var message = error.message;
+ var count = (counts[message] || 0) + 1;
+
+ counts[message] = count;
+
+ if (count >= mainErrorCount) {
+ mainError = error;
+ mainErrorCount = count;
+ }
+ }
+
+ return mainError;
+};
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/package.json b/deps/npm/node_modules/pacote/node_modules/retry/package.json
new file mode 100644
index 00000000000000..73c7259707aeef
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/package.json
@@ -0,0 +1,32 @@
+{
+ "author": "Tim Koschützki (http://debuggable.com/)",
+ "name": "retry",
+ "description": "Abstraction for exponential and custom retry strategies for failed operations.",
+ "license": "MIT",
+ "version": "0.12.0",
+ "homepage": "https://github.com/tim-kos/node-retry",
+ "repository": {
+ "type": "git",
+ "url": "git://github.com/tim-kos/node-retry.git"
+ },
+ "directories": {
+ "lib": "./lib"
+ },
+ "main": "index",
+ "engines": {
+ "node": ">= 4"
+ },
+ "dependencies": {},
+ "devDependencies": {
+ "fake": "0.2.0",
+ "istanbul": "^0.4.5",
+ "tape": "^4.8.0"
+ },
+ "scripts": {
+ "test": "./node_modules/.bin/istanbul cover ./node_modules/tape/bin/tape ./test/integration/*.js",
+ "release:major": "env SEMANTIC=major npm run release",
+ "release:minor": "env SEMANTIC=minor npm run release",
+ "release:patch": "env SEMANTIC=patch npm run release",
+ "release": "npm version ${SEMANTIC:-patch} -m \"Release %s\" && git push && git push --tags && npm publish"
+ }
+}
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/test/common.js b/deps/npm/node_modules/pacote/node_modules/retry/test/common.js
new file mode 100644
index 00000000000000..224720696ebac8
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/test/common.js
@@ -0,0 +1,10 @@
+var common = module.exports;
+var path = require('path');
+
+var rootDir = path.join(__dirname, '..');
+common.dir = {
+ lib: rootDir + '/lib'
+};
+
+common.assert = require('assert');
+common.fake = require('fake');
\ No newline at end of file
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-forever.js b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-forever.js
new file mode 100644
index 00000000000000..b41307cb529f12
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-forever.js
@@ -0,0 +1,24 @@
+var common = require('../common');
+var assert = common.assert;
+var retry = require(common.dir.lib + '/retry');
+
+(function testForeverUsesFirstTimeout() {
+ var operation = retry.operation({
+ retries: 0,
+ minTimeout: 100,
+ maxTimeout: 100,
+ forever: true
+ });
+
+ operation.attempt(function(numAttempt) {
+ console.log('>numAttempt', numAttempt);
+ var err = new Error("foo");
+ if (numAttempt == 10) {
+ operation.stop();
+ }
+
+ if (operation.retry(err)) {
+ return;
+ }
+ });
+})();
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-retry-operation.js b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-retry-operation.js
new file mode 100644
index 00000000000000..e351bb683ed449
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-retry-operation.js
@@ -0,0 +1,258 @@
+var common = require('../common');
+var assert = common.assert;
+var fake = common.fake.create();
+var retry = require(common.dir.lib + '/retry');
+
+(function testReset() {
+ var error = new Error('some error');
+ var operation = retry.operation([1, 2, 3]);
+ var attempts = 0;
+
+ var finalCallback = fake.callback('finalCallback');
+ fake.expectAnytime(finalCallback);
+
+ var expectedFinishes = 1;
+ var finishes = 0;
+
+ var fn = function() {
+ operation.attempt(function(currentAttempt) {
+ attempts++;
+ assert.equal(currentAttempt, attempts);
+ if (operation.retry(error)) {
+ return;
+ }
+
+ finishes++
+ assert.equal(expectedFinishes, finishes);
+ assert.strictEqual(attempts, 4);
+ assert.strictEqual(operation.attempts(), attempts);
+ assert.strictEqual(operation.mainError(), error);
+
+ if (finishes < 2) {
+ attempts = 0;
+ expectedFinishes++;
+ operation.reset();
+ fn()
+ } else {
+ finalCallback();
+ }
+ });
+ };
+
+ fn();
+})();
+
+(function testErrors() {
+ var operation = retry.operation();
+
+ var error = new Error('some error');
+ var error2 = new Error('some other error');
+ operation._errors.push(error);
+ operation._errors.push(error2);
+
+ assert.deepEqual(operation.errors(), [error, error2]);
+})();
+
+(function testMainErrorReturnsMostFrequentError() {
+ var operation = retry.operation();
+ var error = new Error('some error');
+ var error2 = new Error('some other error');
+
+ operation._errors.push(error);
+ operation._errors.push(error2);
+ operation._errors.push(error);
+
+ assert.strictEqual(operation.mainError(), error);
+})();
+
+(function testMainErrorReturnsLastErrorOnEqualCount() {
+ var operation = retry.operation();
+ var error = new Error('some error');
+ var error2 = new Error('some other error');
+
+ operation._errors.push(error);
+ operation._errors.push(error2);
+
+ assert.strictEqual(operation.mainError(), error2);
+})();
+
+(function testAttempt() {
+ var operation = retry.operation();
+ var fn = new Function();
+
+ var timeoutOpts = {
+ timeout: 1,
+ cb: function() {}
+ };
+ operation.attempt(fn, timeoutOpts);
+
+ assert.strictEqual(fn, operation._fn);
+ assert.strictEqual(timeoutOpts.timeout, operation._operationTimeout);
+ assert.strictEqual(timeoutOpts.cb, operation._operationTimeoutCb);
+})();
+
+(function testRetry() {
+ var error = new Error('some error');
+ var operation = retry.operation([1, 2, 3]);
+ var attempts = 0;
+
+ var finalCallback = fake.callback('finalCallback');
+ fake.expectAnytime(finalCallback);
+
+ var fn = function() {
+ operation.attempt(function(currentAttempt) {
+ attempts++;
+ assert.equal(currentAttempt, attempts);
+ if (operation.retry(error)) {
+ return;
+ }
+
+ assert.strictEqual(attempts, 4);
+ assert.strictEqual(operation.attempts(), attempts);
+ assert.strictEqual(operation.mainError(), error);
+ finalCallback();
+ });
+ };
+
+ fn();
+})();
+
+(function testRetryForever() {
+ var error = new Error('some error');
+ var operation = retry.operation({ retries: 3, forever: true });
+ var attempts = 0;
+
+ var finalCallback = fake.callback('finalCallback');
+ fake.expectAnytime(finalCallback);
+
+ var fn = function() {
+ operation.attempt(function(currentAttempt) {
+ attempts++;
+ assert.equal(currentAttempt, attempts);
+ if (attempts !== 6 && operation.retry(error)) {
+ return;
+ }
+
+ assert.strictEqual(attempts, 6);
+ assert.strictEqual(operation.attempts(), attempts);
+ assert.strictEqual(operation.mainError(), error);
+ finalCallback();
+ });
+ };
+
+ fn();
+})();
+
+(function testRetryForeverNoRetries() {
+ var error = new Error('some error');
+ var delay = 50
+ var operation = retry.operation({
+ retries: null,
+ forever: true,
+ minTimeout: delay,
+ maxTimeout: delay
+ });
+
+ var attempts = 0;
+ var startTime = new Date().getTime();
+
+ var finalCallback = fake.callback('finalCallback');
+ fake.expectAnytime(finalCallback);
+
+ var fn = function() {
+ operation.attempt(function(currentAttempt) {
+ attempts++;
+ assert.equal(currentAttempt, attempts);
+ if (attempts !== 4 && operation.retry(error)) {
+ return;
+ }
+
+ var endTime = new Date().getTime();
+ var minTime = startTime + (delay * 3);
+ var maxTime = minTime + 20 // add a little headroom for code execution time
+ assert(endTime >= minTime)
+ assert(endTime < maxTime)
+ assert.strictEqual(attempts, 4);
+ assert.strictEqual(operation.attempts(), attempts);
+ assert.strictEqual(operation.mainError(), error);
+ finalCallback();
+ });
+ };
+
+ fn();
+})();
+
+(function testStop() {
+ var error = new Error('some error');
+ var operation = retry.operation([1, 2, 3]);
+ var attempts = 0;
+
+ var finalCallback = fake.callback('finalCallback');
+ fake.expectAnytime(finalCallback);
+
+ var fn = function() {
+ operation.attempt(function(currentAttempt) {
+ attempts++;
+ assert.equal(currentAttempt, attempts);
+
+ if (attempts === 2) {
+ operation.stop();
+
+ assert.strictEqual(attempts, 2);
+ assert.strictEqual(operation.attempts(), attempts);
+ assert.strictEqual(operation.mainError(), error);
+ finalCallback();
+ }
+
+ if (operation.retry(error)) {
+ return;
+ }
+ });
+ };
+
+ fn();
+})();
+
+(function testMaxRetryTime() {
+ var error = new Error('some error');
+ var maxRetryTime = 30;
+ var operation = retry.operation({
+ minTimeout: 1,
+ maxRetryTime: maxRetryTime
+ });
+ var attempts = 0;
+
+ var finalCallback = fake.callback('finalCallback');
+ fake.expectAnytime(finalCallback);
+
+ var longAsyncFunction = function (wait, callback){
+ setTimeout(callback, wait);
+ };
+
+ var fn = function() {
+ var startTime = new Date().getTime();
+ operation.attempt(function(currentAttempt) {
+ attempts++;
+ assert.equal(currentAttempt, attempts);
+
+ if (attempts !== 2) {
+ if (operation.retry(error)) {
+ return;
+ }
+ } else {
+ var curTime = new Date().getTime();
+ longAsyncFunction(maxRetryTime - (curTime - startTime - 1), function(){
+ if (operation.retry(error)) {
+ assert.fail('timeout should be occurred');
+ return;
+ }
+
+ assert.strictEqual(operation.mainError(), error);
+ finalCallback();
+ });
+ }
+ });
+ };
+
+ fn();
+})();
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-retry-wrap.js b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-retry-wrap.js
new file mode 100644
index 00000000000000..3d2b6bfa6436d2
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-retry-wrap.js
@@ -0,0 +1,101 @@
+var common = require('../common');
+var assert = common.assert;
+var fake = common.fake.create();
+var retry = require(common.dir.lib + '/retry');
+
+function getLib() {
+ return {
+ fn1: function() {},
+ fn2: function() {},
+ fn3: function() {}
+ };
+}
+
+(function wrapAll() {
+ var lib = getLib();
+ retry.wrap(lib);
+ assert.equal(lib.fn1.name, 'bound retryWrapper');
+ assert.equal(lib.fn2.name, 'bound retryWrapper');
+ assert.equal(lib.fn3.name, 'bound retryWrapper');
+}());
+
+(function wrapAllPassOptions() {
+ var lib = getLib();
+ retry.wrap(lib, {retries: 2});
+ assert.equal(lib.fn1.name, 'bound retryWrapper');
+ assert.equal(lib.fn2.name, 'bound retryWrapper');
+ assert.equal(lib.fn3.name, 'bound retryWrapper');
+ assert.equal(lib.fn1.options.retries, 2);
+ assert.equal(lib.fn2.options.retries, 2);
+ assert.equal(lib.fn3.options.retries, 2);
+}());
+
+(function wrapDefined() {
+ var lib = getLib();
+ retry.wrap(lib, ['fn2', 'fn3']);
+ assert.notEqual(lib.fn1.name, 'bound retryWrapper');
+ assert.equal(lib.fn2.name, 'bound retryWrapper');
+ assert.equal(lib.fn3.name, 'bound retryWrapper');
+}());
+
+(function wrapDefinedAndPassOptions() {
+ var lib = getLib();
+ retry.wrap(lib, {retries: 2}, ['fn2', 'fn3']);
+ assert.notEqual(lib.fn1.name, 'bound retryWrapper');
+ assert.equal(lib.fn2.name, 'bound retryWrapper');
+ assert.equal(lib.fn3.name, 'bound retryWrapper');
+ assert.equal(lib.fn2.options.retries, 2);
+ assert.equal(lib.fn3.options.retries, 2);
+}());
+
+(function runWrappedWithoutError() {
+ var callbackCalled;
+ var lib = {method: function(a, b, callback) {
+ assert.equal(a, 1);
+ assert.equal(b, 2);
+ assert.equal(typeof callback, 'function');
+ callback();
+ }};
+ retry.wrap(lib);
+ lib.method(1, 2, function() {
+ callbackCalled = true;
+ });
+ assert.ok(callbackCalled);
+}());
+
+(function runWrappedSeveralWithoutError() {
+ var callbacksCalled = 0;
+ var lib = {
+ fn1: function (a, callback) {
+ assert.equal(a, 1);
+ assert.equal(typeof callback, 'function');
+ callback();
+ },
+ fn2: function (a, callback) {
+ assert.equal(a, 2);
+ assert.equal(typeof callback, 'function');
+ callback();
+ }
+ };
+ retry.wrap(lib, {}, ['fn1', 'fn2']);
+ lib.fn1(1, function() {
+ callbacksCalled++;
+ });
+ lib.fn2(2, function() {
+ callbacksCalled++;
+ });
+ assert.equal(callbacksCalled, 2);
+}());
+
+(function runWrappedWithError() {
+ var callbackCalled;
+ var lib = {method: function(callback) {
+ callback(new Error('Some error'));
+ }};
+ retry.wrap(lib, {retries: 1});
+ lib.method(function(err) {
+ callbackCalled = true;
+ assert.ok(err instanceof Error);
+ });
+ assert.ok(!callbackCalled);
+}());
diff --git a/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-timeouts.js b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-timeouts.js
new file mode 100644
index 00000000000000..7206b0fb0b01d0
--- /dev/null
+++ b/deps/npm/node_modules/pacote/node_modules/retry/test/integration/test-timeouts.js
@@ -0,0 +1,69 @@
+var common = require('../common');
+var assert = common.assert;
+var retry = require(common.dir.lib + '/retry');
+
+(function testDefaultValues() {
+ var timeouts = retry.timeouts();
+
+ assert.equal(timeouts.length, 10);
+ assert.equal(timeouts[0], 1000);
+ assert.equal(timeouts[1], 2000);
+ assert.equal(timeouts[2], 4000);
+})();
+
+(function testDefaultValuesWithRandomize() {
+ var minTimeout = 5000;
+ var timeouts = retry.timeouts({
+ minTimeout: minTimeout,
+ randomize: true
+ });
+
+ assert.equal(timeouts.length, 10);
+ assert.ok(timeouts[0] > minTimeout);
+ assert.ok(timeouts[1] > timeouts[0]);
+ assert.ok(timeouts[2] > timeouts[1]);
+})();
+
+(function testPassedTimeoutsAreUsed() {
+ var timeoutsArray = [1000, 2000, 3000];
+ var timeouts = retry.timeouts(timeoutsArray);
+ assert.deepEqual(timeouts, timeoutsArray);
+ assert.notStrictEqual(timeouts, timeoutsArray);
+})();
+
+(function testTimeoutsAreWithinBoundaries() {
+ var minTimeout = 1000;
+ var maxTimeout = 10000;
+ var timeouts = retry.timeouts({
+ minTimeout: minTimeout,
+ maxTimeout: maxTimeout
+ });
+ for (var i = 0; i < timeouts; i++) {
+ assert.ok(timeouts[i] >= minTimeout);
+ assert.ok(timeouts[i] <= maxTimeout);
+ }
+})();
+
+(function testTimeoutsAreIncremental() {
+ var timeouts = retry.timeouts();
+ var lastTimeout = timeouts[0];
+ for (var i = 0; i < timeouts; i++) {
+ assert.ok(timeouts[i] > lastTimeout);
+ lastTimeout = timeouts[i];
+ }
+})();
+
+(function testTimeoutsAreIncrementalForFactorsLessThanOne() {
+ var timeouts = retry.timeouts({
+ retries: 3,
+ factor: 0.5
+ });
+
+ var expected = [250, 500, 1000];
+ assert.deepEqual(expected, timeouts);
+})();
+
+(function testRetries() {
+ var timeouts = retry.timeouts({retries: 2});
+ assert.strictEqual(timeouts.length, 2);
+})();
diff --git a/deps/npm/node_modules/pacote/package.json b/deps/npm/node_modules/pacote/package.json
index 959cb1ec488314..8c25e68330bdcc 100644
--- a/deps/npm/node_modules/pacote/package.json
+++ b/deps/npm/node_modules/pacote/package.json
@@ -1,6 +1,6 @@
{
"name": "pacote",
- "version": "11.2.4",
+ "version": "11.2.6",
"description": "JavaScript package downloader",
"author": "Isaac Z. Schlueter (https://izs.me)",
"bin": {
@@ -25,7 +25,7 @@
"mutate-fs": "^2.1.1",
"npm-registry-mock": "^1.3.1",
"require-inject": "^1.4.4",
- "tap": "^14.10.8"
+ "tap": "^14.11.0"
},
"files": [
"lib/**/*.js"
@@ -37,9 +37,9 @@
],
"dependencies": {
"@npmcli/git": "^2.0.1",
- "@npmcli/installed-package-contents": "^1.0.5",
+ "@npmcli/installed-package-contents": "^1.0.6",
"@npmcli/promise-spawn": "^1.2.0",
- "@npmcli/run-script": "^1.3.0",
+ "@npmcli/run-script": "^1.8.2",
"cacache": "^15.0.5",
"chownr": "^2.0.0",
"fs-minipass": "^2.1.0",
@@ -50,10 +50,10 @@
"npm-packlist": "^2.1.4",
"npm-pick-manifest": "^6.0.0",
"npm-registry-fetch": "^9.0.0",
- "promise-retry": "^1.1.1",
- "read-package-json-fast": "^1.1.3",
+ "promise-retry": "^2.0.1",
+ "read-package-json-fast": "^2.0.1",
"rimraf": "^3.0.2",
- "ssri": "^8.0.0",
+ "ssri": "^8.0.1",
"tar": "^6.1.0"
},
"engines": {
diff --git a/deps/npm/node_modules/read-package-json-fast/index.js b/deps/npm/node_modules/read-package-json-fast/index.js
index bfef5d6abcacc0..cf373029ddf686 100644
--- a/deps/npm/node_modules/read-package-json-fast/index.js
+++ b/deps/npm/node_modules/read-package-json-fast/index.js
@@ -13,7 +13,7 @@ const normalizePackageBin = require('npm-normalize-package-bin')
const normalize = data => {
add_id(data)
fixBundled(data)
- foldinOptionalDeps(data)
+ pruneRepeatedOptionals(data)
fixScripts(data)
fixFunding(data)
normalizePackageBin(data)
@@ -28,14 +28,20 @@ const add_id = data => {
return data
}
-const foldinOptionalDeps = data => {
+// it was once common practice to list deps both in optionalDependencies
+// and in dependencies, to support npm versions that did not know abbout
+// optionalDependencies. This is no longer a relevant need, so duplicating
+// the deps in two places is unnecessary and excessive.
+const pruneRepeatedOptionals = data => {
const od = data.optionalDependencies
+ const dd = data.dependencies || {}
if (od && typeof od === 'object') {
- data.dependencies = data.dependencies || {}
- for (const [name, spec] of Object.entries(od)) {
- data.dependencies[name] = spec
+ for (const name of Object.keys(od)) {
+ delete dd[name]
}
}
+ if (Object.keys(dd).length === 0)
+ delete data.dependencies
return data
}
diff --git a/deps/npm/node_modules/read-package-json-fast/package.json b/deps/npm/node_modules/read-package-json-fast/package.json
index a59a3b2e86e9bc..aa5f5d87007b86 100644
--- a/deps/npm/node_modules/read-package-json-fast/package.json
+++ b/deps/npm/node_modules/read-package-json-fast/package.json
@@ -1,6 +1,6 @@
{
"name": "read-package-json-fast",
- "version": "1.2.1",
+ "version": "2.0.1",
"description": "Like read-package-json, but faster",
"author": "Isaac Z. Schlueter (https://izs.me)",
"license": "ISC",
@@ -11,6 +11,9 @@
"postversion": "npm publish",
"postpublish": "git push origin --follow-tags"
},
+ "engines": {
+ "node": ">=10"
+ },
"tap": {
"check-coverage": true
},
diff --git a/deps/npm/package.json b/deps/npm/package.json
index 57b7f48964b816..8f88726ad1e04e 100644
--- a/deps/npm/package.json
+++ b/deps/npm/package.json
@@ -1,5 +1,5 @@
{
- "version": "7.5.1",
+ "version": "7.5.3",
"name": "npm",
"description": "a package manager for JavaScript",
"keywords": [
@@ -42,10 +42,11 @@
"./package.json": "./package.json"
},
"dependencies": {
- "@npmcli/arborist": "^2.1.1",
+ "@npmcli/arborist": "^2.2.1",
"@npmcli/ci-detect": "^1.2.0",
- "@npmcli/config": "^1.2.8",
- "@npmcli/run-script": "^1.8.1",
+ "@npmcli/config": "^1.2.9",
+ "@npmcli/installed-package-contents": "^1.0.7",
+ "@npmcli/run-script": "^1.8.2",
"abbrev": "~1.1.1",
"ansicolors": "~0.3.2",
"ansistyles": "~0.1.3",
@@ -58,10 +59,10 @@
"cli-table3": "^0.6.0",
"columnify": "~1.5.4",
"glob": "^7.1.4",
- "graceful-fs": "^4.2.3",
+ "graceful-fs": "^4.2.5",
"hosted-git-info": "^3.0.8",
"ini": "^2.0.0",
- "init-package-json": "^2.0.1",
+ "init-package-json": "^2.0.2",
"is-cidr": "^4.0.2",
"json-parse-even-better-errors": "^2.3.1",
"leven": "^3.1.0",
@@ -74,7 +75,7 @@
"libnpmpublish": "^4.0.0",
"libnpmsearch": "^3.1.0",
"libnpmteam": "^2.0.2",
- "libnpmversion": "^1.0.7",
+ "libnpmversion": "^1.0.8",
"make-fetch-happen": "^8.0.13",
"minipass": "^3.1.3",
"minipass-pipeline": "^1.2.4",
@@ -91,12 +92,12 @@
"npm-user-validate": "^1.0.1",
"npmlog": "~4.1.2",
"opener": "^1.5.2",
- "pacote": "^11.2.3",
+ "pacote": "^11.2.6",
"parse-conflict-json": "^1.1.1",
"qrcode-terminal": "^0.12.0",
"read": "~1.0.7",
"read-package-json": "^3.0.0",
- "read-package-json-fast": "^1.2.1",
+ "read-package-json-fast": "^2.0.1",
"readdir-scoped-modules": "^1.1.0",
"rimraf": "^3.0.2",
"semver": "^7.3.4",
@@ -181,7 +182,7 @@
],
"devDependencies": {
"cmark-gfm": "^0.8.5",
- "eslint": "^7.18.0",
+ "eslint": "^7.19.0",
"eslint-plugin-import": "^2.22.1",
"eslint-plugin-node": "^11.1.0",
"eslint-plugin-promise": "^4.2.1",
diff --git a/deps/npm/scripts/install.sh b/deps/npm/scripts/install.sh
index 4458de87faefbe..8c0ba3de72f12c 100755
--- a/deps/npm/scripts/install.sh
+++ b/deps/npm/scripts/install.sh
@@ -18,7 +18,7 @@ if [ "x$0" = "xsh" ]; then
# which is a bit cuter. But on others, &1 is already closed,
# so catting to another script file won't do anything.
# Follow Location: headers, and fail on errors
- curl -f -L -s https://www.npmjs.org/install.sh > npm-install-$$.sh
+ curl -q -f -L -s https://www.npmjs.org/install.sh > npm-install-$$.sh
ret=$?
if [ $ret -eq 0 ]; then
(exit 0)
@@ -134,7 +134,7 @@ fi
# need to echo "" after, because Posix sed doesn't treat EOF
# as an implied end of line.
-url=`(curl -SsL https://registry.npmjs.org/npm/$t; echo "") \
+url=`(curl -qSsL https://registry.npmjs.org/npm/$t; echo "") \
| sed -e 's/^.*tarball":"//' \
| sed -e 's/".*$//'`
@@ -142,7 +142,7 @@ ret=$?
if [ "x$url" = "x" ]; then
ret=125
# try without the -e arg to sed.
- url=`(curl -SsL https://registry.npmjs.org/npm/$t; echo "") \
+ url=`(curl -qSsL https://registry.npmjs.org/npm/$t; echo "") \
| sed 's/^.*tarball":"//' \
| sed 's/".*$//'`
ret=$?
@@ -159,7 +159,7 @@ fi
echo "fetching: $url" >&2
cd "$TMP" \
- && curl -SsL -o npm.tgz "$url" \
+ && curl -qSsL -o npm.tgz "$url" \
&& $tar -xzf npm.tgz \
&& cd "$TMP"/package \
&& echo "removing existing npm" \
diff --git a/deps/npm/tap-snapshots/test-lib-utils-open-url.js-TAP.test.js b/deps/npm/tap-snapshots/test-lib-utils-open-url.js-TAP.test.js
new file mode 100644
index 00000000000000..8c8159ebcfc04c
--- /dev/null
+++ b/deps/npm/tap-snapshots/test-lib-utils-open-url.js-TAP.test.js
@@ -0,0 +1,25 @@
+/* IMPORTANT
+ * This snapshot file is auto-generated, but designed for humans.
+ * It should be checked into source control and tracked carefully.
+ * Re-generate by setting TAP_SNAPSHOT=1 and running tests.
+ * Make sure to inspect the output below. Do not ignore changes!
+ */
+'use strict'
+exports[`test/lib/utils/open-url.js TAP prints where to go when browser is disabled > printed expected message 1`] = `
+npm home:
+ https://www.npmjs.com
+
+`
+
+exports[`test/lib/utils/open-url.js TAP prints where to go when browser is disabled and json is enabled > printed expected message 1`] = `
+{
+ "title": "npm home",
+ "url": "https://www.npmjs.com"
+}
+`
+
+exports[`test/lib/utils/open-url.js TAP prints where to go when given browser does not exist > printed expected message 1`] = `
+npm home:
+ https://www.npmjs.com
+
+`
diff --git a/deps/npm/test/lib/help.js b/deps/npm/test/lib/help.js
index 17018acc61620e..40a0354210b92e 100644
--- a/deps/npm/test/lib/help.js
+++ b/deps/npm/test/lib/help.js
@@ -55,12 +55,13 @@ const glob = (p, cb) => {
let spawnBin = null
let spawnArgs = null
+let spawnCode = 0
const spawn = (bin, args) => {
spawnBin = bin
spawnArgs = args
const spawnEmitter = new EventEmitter()
process.nextTick(() => {
- spawnEmitter.emit('close', 0)
+ spawnEmitter.emit('exit', spawnCode)
})
return spawnEmitter
}
@@ -76,7 +77,9 @@ const help = requireInject('../../lib/help.js', {
'../../lib/utils/npm-usage.js': npmUsage,
'../../lib/utils/open-url.js': openUrl,
'../../lib/utils/output.js': output,
- '../../lib/utils/spawn.js': spawn,
+ child_process: {
+ spawn,
+ },
glob,
})
@@ -339,6 +342,29 @@ test('npm help ?(un)star', t => {
})
})
+test('npm help - woman viewer propagates errors', t => {
+ npmConfig.viewer = 'woman'
+ spawnCode = 1
+ globResult = [
+ '/root/man/man1/npm-star.1',
+ '/root/man/man1/npm-unstar.1',
+ ]
+ t.teardown(() => {
+ npmConfig.viewer = undefined
+ spawnCode = 0
+ globResult = globDefaults
+ spawnBin = null
+ spawnArgs = null
+ })
+
+ return help(['?(un)star'], (err) => {
+ t.match(err, /help process exited with code: 1/, 'received the correct error')
+ t.equal(spawnBin, 'emacsclient', 'maps woman to emacs correctly')
+ t.strictSame(spawnArgs, ['-e', `(woman-find-file '/root/man/man1/npm-unstar.1')`], 'passes the correct arguments')
+ t.end()
+ })
+})
+
test('npm help un*', t => {
globResult = [
'/root/man/man1/npm-unstar.1',
@@ -360,3 +386,25 @@ test('npm help un*', t => {
t.end()
})
})
+
+test('npm help - man viewer propagates errors', t => {
+ spawnCode = 1
+ globResult = [
+ '/root/man/man1/npm-unstar.1',
+ '/root/man/man1/npm-uninstall.1',
+ '/root/man/man1/npm-unpublish.1',
+ ]
+ t.teardown(() => {
+ spawnCode = 0
+ globResult = globDefaults
+ spawnBin = null
+ spawnArgs = null
+ })
+
+ return help(['un*'], (err) => {
+ t.match(err, /help process exited with code: 1/, 'received correct error')
+ t.equal(spawnBin, 'man', 'calls man by default')
+ t.strictSame(spawnArgs, ['1', 'npm-unstar'], 'passes the correct arguments')
+ t.end()
+ })
+})
diff --git a/deps/npm/test/lib/publish.js b/deps/npm/test/lib/publish.js
index f0ce0b966533c0..6d5cebf5406988 100644
--- a/deps/npm/test/lib/publish.js
+++ b/deps/npm/test/lib/publish.js
@@ -4,19 +4,40 @@ const requireInject = require('require-inject')
// mock config
const {defaults} = require('../../lib/utils/config.js')
const credentials = {
- token: 'asdfasdf',
- alwaysAuth: false,
+ 'https://unauthed.registry': {
+ email: 'me@example.com',
+ },
+ 'https://scope.specific.registry': {
+ token: 'some.registry.token',
+ alwaysAuth: false,
+ },
+ 'https://some.registry': {
+ token: 'some.registry.token',
+ alwaysAuth: false,
+ },
+ 'https://registry.npmjs.org/': {
+ token: 'npmjs.registry.token',
+ alwaysAuth: false,
+ },
}
const config = {
list: [defaults],
- getCredentialsByURI: () => credentials,
}
+
+const registryCredentials = (t, registry) => {
+ return (uri) => {
+ t.same(uri, registry, 'gets credentials for expected registry')
+ return credentials[uri]
+ }
+}
+
const fs = require('fs')
t.test('should publish with libnpmpublish, respecting publishConfig', (t) => {
- t.plan(5)
+ t.plan(6)
- const publishConfig = { registry: 'https://some.registry' }
+ const registry = 'https://some.registry'
+ const publishConfig = { registry }
const testDir = t.testdir({
'package.json': JSON.stringify({
name: 'my-cool-pkg',
@@ -30,9 +51,12 @@ t.test('should publish with libnpmpublish, respecting publishConfig', (t) => {
flatOptions: {
json: true,
defaultTag: 'latest',
- registry: 'https://registry.npmjs.org',
+ registry,
+ },
+ config: {
+ ...config,
+ getCredentialsByURI: registryCredentials(t, registry),
},
- config,
},
'../../lib/utils/tar.js': {
getContents: () => ({
@@ -71,8 +95,9 @@ t.test('should publish with libnpmpublish, respecting publishConfig', (t) => {
})
t.test('re-loads publishConfig if added during script process', (t) => {
- t.plan(5)
- const publishConfig = { registry: 'https://some.registry' }
+ t.plan(6)
+ const registry = 'https://some.registry'
+ const publishConfig = { registry }
const testDir = t.testdir({
'package.json': JSON.stringify({
name: 'my-cool-pkg',
@@ -87,7 +112,10 @@ t.test('re-loads publishConfig if added during script process', (t) => {
defaultTag: 'latest',
registry: 'https://registry.npmjs.org/',
},
- config,
+ config: {
+ ...config,
+ getCredentialsByURI: registryCredentials(t, registry),
+ },
},
'../../lib/utils/tar.js': {
getContents: () => ({
@@ -112,7 +140,7 @@ t.test('re-loads publishConfig if added during script process', (t) => {
t.match(manifest, { name: 'my-cool-pkg', version: '1.0.0' }, 'gets manifest')
t.isa(tarData, Buffer, 'tarData is a buffer')
t.ok(opts, 'gets opts object')
- t.same(opts.registry, publishConfig.registry, 'publishConfig is passed through')
+ t.same(opts.registry, registry, 'publishConfig is passed through')
},
},
})
@@ -124,9 +152,10 @@ t.test('re-loads publishConfig if added during script process', (t) => {
})
})
-t.test('should not log if silent', (t) => {
+t.test('should not log if silent (dry run)', (t) => {
t.plan(2)
+ const registry = 'https://registry.npmjs.org'
const testDir = t.testdir({
'package.json': JSON.stringify({
name: 'my-cool-pkg',
@@ -140,9 +169,14 @@ t.test('should not log if silent', (t) => {
json: false,
defaultTag: 'latest',
dryRun: true,
- registry: 'https://registry.npmjs.org/',
+ registry,
+ },
+ config: {
+ ...config,
+ getCredentialsByURI: () => {
+ throw new Error('should not call getCredentialsByURI in dry run')
+ },
},
- config,
},
'../../lib/utils/tar.js': {
getContents: () => ({}),
@@ -164,7 +198,7 @@ t.test('should not log if silent', (t) => {
libnpmpack: async () => '',
libnpmpublish: {
publish: (manifest, tarData, opts) => {
- throw new Error('should not call libnpmpublish!')
+ throw new Error('should not call libnpmpublish in dry run')
},
},
})
@@ -176,8 +210,10 @@ t.test('should not log if silent', (t) => {
})
})
-t.test('should log tarball contents', (t) => {
+t.test('should log tarball contents (dry run)', (t) => {
t.plan(3)
+
+ const registry = 'https://registry.npmjs.org'
const testDir = t.testdir({
'package.json': JSON.stringify({
name: 'my-cool-pkg',
@@ -191,12 +227,12 @@ t.test('should log tarball contents', (t) => {
json: false,
defaultTag: 'latest',
dryRun: true,
- registry: 'https://registry.npmjs.org/',
+ registry,
},
config: {
...config,
getCredentialsByURI: () => {
- throw new Error('should not call getCredentialsByURI!')
+ throw new Error('should not call getCredentialsByURI in dry run')
}},
},
'../../lib/utils/tar.js': {
@@ -216,7 +252,7 @@ t.test('should log tarball contents', (t) => {
libnpmpack: async () => '',
libnpmpublish: {
publish: () => {
- throw new Error('should not call libnpmpublish!')
+ throw new Error('should not call libnpmpublish in dry run')
},
},
})
@@ -246,12 +282,15 @@ t.test('shows usage with wrong set of arguments', (t) => {
t.test('throws when invalid tag', (t) => {
t.plan(1)
+
+ const registry = 'https://registry.npmjs.org'
+
const publish = requireInject('../../lib/publish.js', {
'../../lib/npm.js': {
flatOptions: {
json: false,
defaultTag: '0.0.13',
- registry: 'https://registry.npmjs.org/',
+ registry,
},
config,
},
@@ -265,7 +304,9 @@ t.test('throws when invalid tag', (t) => {
})
t.test('can publish a tarball', t => {
- t.plan(3)
+ t.plan(4)
+
+ const registry = 'https://registry.npmjs.org/'
const testDir = t.testdir({
package: {
'package.json': JSON.stringify({
@@ -291,9 +332,12 @@ t.test('can publish a tarball', t => {
flatOptions: {
json: true,
defaultTag: 'latest',
- registry: 'https://registry.npmjs.org/',
+ registry,
+ },
+ config: {
+ ...config,
+ getCredentialsByURI: registryCredentials(t, registry),
},
- config,
},
'../../lib/utils/tar.js': {
getContents: () => ({
@@ -323,39 +367,25 @@ t.test('can publish a tarball', t => {
})
})
-t.test('throw if no registry', async t => {
- t.plan(1)
- const publish = requireInject('../../lib/publish.js', {
- '../../lib/npm.js': {
- flatOptions: {
- json: false,
- registry: null,
- },
- config,
- },
- })
-
- return publish([], (err) => {
- t.match(err, {
- message: 'No registry specified.',
- code: 'ENOREGISTRY',
- }, 'throws when registry unset')
- })
-})
-
t.test('throw if not logged in', async t => {
- t.plan(1)
+ t.plan(2)
+ const registry = 'https://unauthed.registry'
+
const publish = requireInject('../../lib/publish.js', {
+ '../../lib/utils/tar.js': {
+ getContents: () => ({
+ id: 'someid',
+ }),
+ logTar: () => {},
+ },
'../../lib/npm.js': {
flatOptions: {
json: false,
- registry: 'https://registry.npmjs.org/',
+ registry,
},
config: {
...config,
- getCredentialsByURI: () => ({
- email: 'me@example.com',
- }),
+ getCredentialsByURI: registryCredentials(t, registry),
},
},
})
@@ -369,9 +399,10 @@ t.test('throw if not logged in', async t => {
})
t.test('read registry only from publishConfig', t => {
- t.plan(3)
+ t.plan(4)
- const publishConfig = { registry: 'https://some.registry' }
+ const registry = 'https://some.registry'
+ const publishConfig = { registry }
const testDir = t.testdir({
'package.json': JSON.stringify({
name: 'my-cool-pkg',
@@ -385,7 +416,10 @@ t.test('read registry only from publishConfig', t => {
flatOptions: {
json: false,
},
- config,
+ config: {
+ ...config,
+ getCredentialsByURI: registryCredentials(t, registry),
+ },
},
'../../lib/utils/tar.js': {
getContents: () => ({
@@ -397,7 +431,7 @@ t.test('read registry only from publishConfig', t => {
libnpmpublish: {
publish: (manifest, tarData, opts) => {
t.match(manifest, { name: 'my-cool-pkg', version: '1.0.0' }, 'gets manifest')
- t.same(opts.registry, publishConfig.registry, 'publishConfig is passed through')
+ t.same(opts.registry, registry, 'publishConfig is passed through')
},
},
})
@@ -408,3 +442,44 @@ t.test('read registry only from publishConfig', t => {
t.pass('got to callback')
})
})
+
+t.test('should check auth for scope specific registry', t => {
+ const testDir = t.testdir({
+ 'package.json': JSON.stringify({
+ name: '@npm/my-cool-pkg',
+ version: '1.0.0',
+ }, null, 2),
+ })
+
+ const registry = 'https://scope.specific.registry'
+ const publish = requireInject('../../lib/publish.js', {
+ '../../lib/npm.js': {
+ flatOptions: {
+ json: false,
+ '@npm:registry': registry,
+ },
+ config: {
+ ...config,
+ getCredentialsByURI: registryCredentials(t, registry),
+ },
+ },
+ '../../lib/utils/tar.js': {
+ getContents: () => ({
+ id: 'someid',
+ }),
+ logTar: () => {},
+ },
+ '../../lib/utils/output.js': () => {},
+ '../../lib/utils/otplease.js': (opts, fn) => {
+ return Promise.resolve().then(() => fn(opts))
+ },
+ libnpmpublish: {
+ publish: () => '',
+ },
+ })
+ return publish([testDir], (er) => {
+ if (er)
+ throw er
+ t.pass('got to callback')
+ })
+})
diff --git a/deps/npm/test/lib/utils/open-url.js b/deps/npm/test/lib/utils/open-url.js
new file mode 100644
index 00000000000000..ce1783dadcd7bb
--- /dev/null
+++ b/deps/npm/test/lib/utils/open-url.js
@@ -0,0 +1,165 @@
+const { test } = require('tap')
+const requireInject = require('require-inject')
+
+const npm = {
+ _config: {
+ json: false,
+ browser: true,
+ },
+ config: {
+ get: (k) => npm._config[k],
+ set: (k, v) => {
+ npm._config[k] = v
+ },
+ },
+}
+
+const OUTPUT = []
+const output = (...args) => OUTPUT.push(args)
+
+let openerUrl = null
+let openerOpts = null
+let openerResult = null
+const opener = (url, opts, cb) => {
+ openerUrl = url
+ openerOpts = opts
+ return cb(openerResult)
+}
+
+const openUrl = requireInject('../../../lib/utils/open-url.js', {
+ '../../../lib/npm.js': npm,
+ '../../../lib/utils/output.js': output,
+ opener,
+})
+
+test('opens a url', (t) => {
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ })
+ openUrl('https://www.npmjs.com', 'npm home', (err) => {
+ if (err)
+ throw err
+
+ t.equal(openerUrl, 'https://www.npmjs.com', 'opened the given url')
+ t.same(openerOpts, { command: null }, 'passed command as null (the default)')
+ t.same(OUTPUT, [], 'printed no output')
+ t.done()
+ })
+})
+
+test('returns error for non-https and non-file url', (t) => {
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ })
+ openUrl('ftp://www.npmjs.com', 'npm home', (err) => {
+ t.match(err, /Invalid URL/, 'got the correct error')
+ t.equal(openerUrl, null, 'did not open')
+ t.same(openerOpts, null, 'did not open')
+ t.same(OUTPUT, [], 'printed no output')
+ t.done()
+ })
+})
+
+test('returns error for non-parseable url', (t) => {
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ })
+ openUrl('git+ssh://user@host:repo.git', 'npm home', (err) => {
+ t.match(err, /Invalid URL/, 'got the correct error')
+ t.equal(openerUrl, null, 'did not open')
+ t.same(openerOpts, null, 'did not open')
+ t.same(OUTPUT, [], 'printed no output')
+ t.done()
+ })
+})
+
+test('opens a url with the given browser', (t) => {
+ npm.config.set('browser', 'chrome')
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ npm.config.set('browser', true)
+ })
+ openUrl('https://www.npmjs.com', 'npm home', (err) => {
+ if (err)
+ throw err
+
+ t.equal(openerUrl, 'https://www.npmjs.com', 'opened the given url')
+ t.same(openerOpts, { command: 'chrome' }, 'passed the given browser as command')
+ t.same(OUTPUT, [], 'printed no output')
+ t.done()
+ })
+})
+
+test('prints where to go when browser is disabled', (t) => {
+ npm.config.set('browser', false)
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ npm.config.set('browser', true)
+ })
+ openUrl('https://www.npmjs.com', 'npm home', (err) => {
+ if (err)
+ throw err
+
+ t.equal(openerUrl, null, 'did not open')
+ t.same(openerOpts, null, 'did not open')
+ t.equal(OUTPUT.length, 1, 'got one logged message')
+ t.equal(OUTPUT[0].length, 1, 'logged message had one value')
+ t.matchSnapshot(OUTPUT[0][0], 'printed expected message')
+ t.done()
+ })
+})
+
+test('prints where to go when browser is disabled and json is enabled', (t) => {
+ npm.config.set('browser', false)
+ npm.config.set('json', true)
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ npm.config.set('browser', true)
+ npm.config.set('json', false)
+ })
+ openUrl('https://www.npmjs.com', 'npm home', (err) => {
+ if (err)
+ throw err
+
+ t.equal(openerUrl, null, 'did not open')
+ t.same(openerOpts, null, 'did not open')
+ t.equal(OUTPUT.length, 1, 'got one logged message')
+ t.equal(OUTPUT[0].length, 1, 'logged message had one value')
+ t.matchSnapshot(OUTPUT[0][0], 'printed expected message')
+ t.done()
+ })
+})
+
+test('prints where to go when given browser does not exist', (t) => {
+ npm.config.set('browser', 'firefox')
+ openerResult = Object.assign(new Error('failed'), { code: 'ENOENT' })
+ t.teardown(() => {
+ openerUrl = null
+ openerOpts = null
+ OUTPUT.length = 0
+ npm.config.set('browser', true)
+ })
+ openUrl('https://www.npmjs.com', 'npm home', (err) => {
+ if (err)
+ throw err
+
+ t.equal(openerUrl, 'https://www.npmjs.com', 'tried to open the correct url')
+ t.same(openerOpts, { command: 'firefox' }, 'tried to use the correct browser')
+ t.equal(OUTPUT.length, 1, 'got one logged message')
+ t.equal(OUTPUT[0].length, 1, 'logged message had one value')
+ t.matchSnapshot(OUTPUT[0][0], 'printed expected message')
+ t.done()
+ })
+})
diff --git a/deps/npm/test/lib/utils/otplease.js b/deps/npm/test/lib/utils/otplease.js
new file mode 100644
index 00000000000000..048856b4857707
--- /dev/null
+++ b/deps/npm/test/lib/utils/otplease.js
@@ -0,0 +1,94 @@
+const { test } = require('tap')
+const requireInject = require('require-inject')
+
+const readUserInfo = {
+ otp: async () => '1234',
+}
+
+const otplease = requireInject('../../../lib/utils/otplease.js', {
+ '../../../lib/utils/read-user-info.js': readUserInfo,
+})
+
+test('prompts for otp for EOTP', async (t) => {
+ const stdinTTY = process.stdin.isTTY
+ const stdoutTTY = process.stdout.isTTY
+ process.stdin.isTTY = true
+ process.stdout.isTTY = true
+ t.teardown(() => {
+ process.stdin.isTTY = stdinTTY
+ process.stdout.isTTY = stdoutTTY
+ })
+
+ let runs = 0
+ const fn = async (opts) => {
+ if (++runs === 1)
+ throw Object.assign(new Error('nope'), { code: 'EOTP' })
+
+ t.equal(opts.some, 'prop', 'carried original options')
+ t.equal(opts.otp, '1234', 'received the otp')
+ t.done()
+ }
+
+ await otplease({ some: 'prop' }, fn)
+})
+
+test('prompts for otp for 401', async (t) => {
+ const stdinTTY = process.stdin.isTTY
+ const stdoutTTY = process.stdout.isTTY
+ process.stdin.isTTY = true
+ process.stdout.isTTY = true
+ t.teardown(() => {
+ process.stdin.isTTY = stdinTTY
+ process.stdout.isTTY = stdoutTTY
+ })
+
+ let runs = 0
+ const fn = async (opts) => {
+ if (++runs === 1) {
+ throw Object.assign(new Error('nope'), {
+ code: 'E401',
+ body: 'one-time pass required',
+ })
+ }
+
+ t.equal(opts.some, 'prop', 'carried original options')
+ t.equal(opts.otp, '1234', 'received the otp')
+ t.done()
+ }
+
+ await otplease({ some: 'prop' }, fn)
+})
+
+test('does not prompt for non-otp errors', async (t) => {
+ const stdinTTY = process.stdin.isTTY
+ const stdoutTTY = process.stdout.isTTY
+ process.stdin.isTTY = true
+ process.stdout.isTTY = true
+ t.teardown(() => {
+ process.stdin.isTTY = stdinTTY
+ process.stdout.isTTY = stdoutTTY
+ })
+
+ const fn = async (opts) => {
+ throw new Error('nope')
+ }
+
+ t.rejects(otplease({ some: 'prop' }, fn), { message: 'nope' }, 'rejects with the original error')
+})
+
+test('does not prompt if stdin or stdout is not a tty', async (t) => {
+ const stdinTTY = process.stdin.isTTY
+ const stdoutTTY = process.stdout.isTTY
+ process.stdin.isTTY = false
+ process.stdout.isTTY = false
+ t.teardown(() => {
+ process.stdin.isTTY = stdinTTY
+ process.stdout.isTTY = stdoutTTY
+ })
+
+ const fn = async (opts) => {
+ throw Object.assign(new Error('nope'), { code: 'EOTP' })
+ }
+
+ t.rejects(otplease({ some: 'prop' }, fn), { message: 'nope' }, 'rejects with the original error')
+})
diff --git a/deps/npm/test/lib/utils/pulse-till-done.js b/deps/npm/test/lib/utils/pulse-till-done.js
new file mode 100644
index 00000000000000..16c2d521dad082
--- /dev/null
+++ b/deps/npm/test/lib/utils/pulse-till-done.js
@@ -0,0 +1,35 @@
+const { test } = require('tap')
+const requireInject = require('require-inject')
+
+let pulseStarted = null
+const npmlog = {
+ gauge: {
+ pulse: () => {
+ if (pulseStarted)
+ pulseStarted()
+ },
+ },
+}
+
+const pulseTillDone = requireInject('../../../lib/utils/pulse-till-done.js', {
+ npmlog,
+})
+
+test('pulses (with promise)', async (t) => {
+ t.teardown(() => {
+ pulseStarted = null
+ })
+
+ let resolver
+ const promise = new Promise(resolve => {
+ resolver = resolve
+ })
+
+ const result = pulseTillDone.withPromise(promise)
+ // wait until the gauge has fired at least once
+ await new Promise(resolve => {
+ pulseStarted = resolve
+ })
+ resolver('value')
+ t.resolveMatch(result, 'value', 'returned the resolved promise')
+})
diff --git a/deps/npm/test/lib/utils/read-user-info.js b/deps/npm/test/lib/utils/read-user-info.js
new file mode 100644
index 00000000000000..99d85d66c4feb6
--- /dev/null
+++ b/deps/npm/test/lib/utils/read-user-info.js
@@ -0,0 +1,116 @@
+const { test } = require('tap')
+const requireInject = require('require-inject')
+
+let readOpts = null
+let readResult = null
+const read = (opts, cb) => {
+ readOpts = opts
+ return cb(null, readResult)
+}
+
+const npmlog = {
+ clearProgress: () => {},
+ showProgress: () => {},
+}
+
+const npmUserValidate = {
+ username: (username) => {
+ if (username === 'invalid')
+ return new Error('invalid username')
+
+ return null
+ },
+ email: (email) => {
+ if (email.startsWith('invalid'))
+ return new Error('invalid email')
+
+ return null
+ },
+}
+
+const readUserInfo = requireInject('../../../lib/utils/read-user-info.js', {
+ read,
+ npmlog,
+ 'npm-user-validate': npmUserValidate,
+})
+
+test('otp', async (t) => {
+ readResult = '1234'
+ t.teardown(() => {
+ readResult = null
+ readOpts = null
+ })
+ const result = await readUserInfo.otp()
+ t.equal(result, '1234', 'received the otp')
+})
+
+test('password', async (t) => {
+ readResult = 'password'
+ t.teardown(() => {
+ readResult = null
+ readOpts = null
+ })
+ const result = await readUserInfo.password()
+ t.equal(result, 'password', 'received the password')
+ t.match(readOpts, {
+ silent: true,
+ }, 'got the correct options')
+})
+
+test('username', async (t) => {
+ readResult = 'username'
+ t.teardown(() => {
+ readResult = null
+ readOpts = null
+ })
+ const result = await readUserInfo.username()
+ t.equal(result, 'username', 'received the username')
+})
+
+test('username - invalid warns and retries', async (t) => {
+ readResult = 'invalid'
+ t.teardown(() => {
+ readResult = null
+ readOpts = null
+ })
+
+ let logMsg
+ const log = {
+ warn: (msg) => logMsg = msg,
+ }
+ const pResult = readUserInfo.username(null, null, { log })
+ // have to swap it to a valid username after execution starts
+ // or it will loop forever
+ readResult = 'valid'
+ const result = await pResult
+ t.equal(result, 'valid', 'received the username')
+ t.equal(logMsg, 'invalid username')
+})
+
+test('email', async (t) => {
+ readResult = 'foo@bar.baz'
+ t.teardown(() => {
+ readResult = null
+ readOpts = null
+ })
+ const result = await readUserInfo.email()
+ t.equal(result, 'foo@bar.baz', 'received the email')
+})
+
+test('email - invalid warns and retries', async (t) => {
+ readResult = 'invalid@bar.baz'
+ t.teardown(() => {
+ readResult = null
+ readOpts = null
+ })
+
+ let logMsg
+ const log = {
+ warn: (msg) => logMsg = msg,
+ }
+ const pResult = readUserInfo.email(null, null, { log })
+ readResult = 'foo@bar.baz'
+ const result = await pResult
+ t.equal(result, 'foo@bar.baz', 'received the email')
+ t.equal(logMsg, 'invalid email')
+})
diff --git a/deps/uv/.mailmap b/deps/uv/.mailmap
index 56a80f586b3c17..045b7702d958de 100644
--- a/deps/uv/.mailmap
+++ b/deps/uv/.mailmap
@@ -32,6 +32,7 @@ Nicholas Vavilov
Nick Logan
Rasmus Christian Pedersen
Rasmus Christian Pedersen
+Richard Lau
Robert Mustacchi
Ryan Dahl
Ryan Emery
@@ -47,6 +48,7 @@ Timothy J. Fontaine
Yasuhiro Matsumoto
Yazhong Liu
Yuki Okumura
+cjihrig
gengjiawen
jBarz
jBarz
diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS
index e7c789cfd1b81f..9f327af9f8303f 100644
--- a/deps/uv/AUTHORS
+++ b/deps/uv/AUTHORS
@@ -212,7 +212,7 @@ guworks
RossBencina
Roger A. Light
chenttuuvv
-Richard Lau
+Richard Lau
ronkorving
Corbin Simpson
Zachary Hamm
@@ -448,3 +448,14 @@ Aleksej Lebedev
Nikolay Mitev
Ulrik Strid
Elad Lahav
+Elad Nachmias
+Darshan Sen
+Simon Kadisch
+Momtchil Momtchev
+Ethel Weston <66453757+ethelweston@users.noreply.github.com>
+Drew DeVault
+Mark Klein
+schamberg97 <50446906+schamberg97@users.noreply.github.com>
+Bob Weinand
+Issam E. Maghni
+Juan Pablo Canepa
diff --git a/deps/uv/CMakeLists.txt b/deps/uv/CMakeLists.txt
index e648b00be6432f..c8e881d18f503e 100644
--- a/deps/uv/CMakeLists.txt
+++ b/deps/uv/CMakeLists.txt
@@ -30,6 +30,13 @@ if(QEMU)
add_definitions(-D__QEMU__=1)
endif()
+option(ASAN "Enable AddressSanitizer (ASan)" OFF)
+if(ASAN AND CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang")
+ add_definitions(-D__ASAN__=1)
+ set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address")
+ set (CMAKE_LINKER_FLAGS_DEBUG "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address")
+endif()
+
# Compiler check
string(CONCAT is-msvc $,
@@ -95,6 +102,9 @@ list(APPEND uv_cflags ${lint-no-conditional-assignment-msvc})
list(APPEND uv_cflags ${lint-no-unsafe-msvc})
list(APPEND uv_cflags ${lint-utf8-msvc} )
+check_c_compiler_flag(-fno-strict-aliasing UV_F_STRICT_ALIASING)
+list(APPEND uv_cflags $<$:-fno-strict-aliasing>)
+
set(uv_sources
src/fs-poll.c
src/idna.c
@@ -108,7 +118,7 @@ set(uv_sources
src/version.c)
if(WIN32)
- list(APPEND uv_defines WIN32_LEAN_AND_MEAN _WIN32_WINNT=0x0600)
+ list(APPEND uv_defines WIN32_LEAN_AND_MEAN _WIN32_WINNT=0x0602)
list(APPEND uv_libraries
psapi
user32
@@ -318,7 +328,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL "QNX")
src/unix/bsd-ifaddrs.c
src/unix/no-proctitle.c
src/unix/no-fsevents.c)
- list(APPEND uv_cflags -fno-strict-aliasing)
list(APPEND uv_libraries socket)
endif()
@@ -466,6 +475,7 @@ if(LIBUV_BUILD_TESTS)
test/test-poll-close-doesnt-corrupt-stack.c
test/test-poll-close.c
test/test-poll-closesocket.c
+ test/test-poll-multiple-handles.c
test/test-poll-oob.c
test/test-poll.c
test/test-process-priority.c
diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog
index 055dcaf9f18b4e..d0eaf9fe34a20f 100644
--- a/deps/uv/ChangeLog
+++ b/deps/uv/ChangeLog
@@ -1,3 +1,82 @@
+2021.02.14, Version 1.41.0 (Stable), 1dff88e5161cba5c59276d2070d2e304e4dcb242
+
+Changes since version 1.40.0:
+
+* mailmap: update contact information for richardlau (Richard Lau)
+
+* build: add asan checks (gengjiawen)
+
+* unix: report bind error in uv_tcp_connect() (Ben Noordhuis)
+
+* doc: uv_tcp_bind() never returns UV_EADDRINUSE (Ben Noordhuis)
+
+* test: fix pump and tcp_write_batch benchmarks (Santiago Gimeno)
+
+* doc: mark IBM i as Tier 2 support (Jesse Gorzinski)
+
+* doc,poll: add notes (repeated cb & cancel pending cb) (Elad Nachmias)
+
+* linux: fix -Wincompatible-pointer-types warning (Ben Noordhuis)
+
+* linux: fix -Wsign-compare warning (Ben Noordhuis)
+
+* android: add system call api guards (Ben Noordhuis)
+
+* unix,win: harmonize uv_read_start() error handling (Ben Noordhuis)
+
+* unix,win: more uv_read_start() argument validation (Ben Noordhuis)
+
+* build: turn on -fno-strict-aliasing (Ben Noordhuis)
+
+* stream: add uv_pipe and uv_socketpair to the API (Jameson Nash)
+
+* unix,win: initialize timer `timeout` field (Ben Noordhuis)
+
+* bsd-ifaddrs: improve comments (Darshan Sen)
+
+* test: remove unnecessary uv_fs_stat() calls (Ben Noordhuis)
+
+* fs: fix utime/futime timestamp rounding errors (Ben Noordhuis)
+
+* test: ensure reliable floating point comparison (Jameson Nash)
+
+* unix,fs: fix uv_fs_sendfile() (Santiago Gimeno)
+
+* unix: fix uv_fs_stat when using statx (Simon Kadisch)
+
+* linux,macos: fix uv_set_process_title regression (Momtchil Momtchev)
+
+* doc: clarify UDP errors and recvmmsg (Ethel Weston)
+
+* test-getaddrinfo: use example.invalid (Drew DeVault)
+
+* Revert "build: fix android autotools build" (Bernardo Ramos)
+
+* unix,fs: on DVS fs, statx returns EOPNOTSUPP (Mark Klein)
+
+* win, fs: mkdir really return UV_EINVAL for invalid names (Nicholas Vavilov)
+
+* tools: migrate tools/make_dist_html.py to python3 (Dominique Dumont)
+
+* unix: fix uv_uptime() on linux (schamberg97)
+
+* unix: check for partial copy_file_range support (Momtchil Momtchev)
+
+* win: bump minimum supported version to windows 8 (Ben Noordhuis)
+
+* poll,unix: ensure safety of rapid fd reuse (Bob Weinand)
+
+* test: fix some warnings (Issam E. Maghni)
+
+* unix: fix uv_uptime() regression (Santiago Gimeno)
+
+* doc: fix versionadded metadata (cjihrig)
+
+* test: fix 'incompatible pointer types' warnings (cjihrig)
+
+* unix: check for EXDEV in uv__fs_sendfile() (Darshan Sen)
+
+
2020.09.26, Version 1.40.0 (Stable), 4e69e333252693bd82d6338d6124f0416538dbfc
Changes since version 1.39.0:
diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am
index 46308eaae28ee4..e8bab4963dda78 100644
--- a/deps/uv/Makefile.am
+++ b/deps/uv/Makefile.am
@@ -56,7 +56,7 @@ if WINNT
uvinclude_HEADERS += include/uv/win.h include/uv/tree.h
AM_CPPFLAGS += -I$(top_srcdir)/src/win \
-DWIN32_LEAN_AND_MEAN \
- -D_WIN32_WINNT=0x0600
+ -D_WIN32_WINNT=0x0602
libuv_la_SOURCES += src/win/async.c \
src/win/atomicops-inl.h \
src/win/core.c \
@@ -225,6 +225,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-poll-close.c \
test/test-poll-close-doesnt-corrupt-stack.c \
test/test-poll-closesocket.c \
+ test/test-poll-multiple-handles.c \
test/test-poll-oob.c \
test/test-process-priority.c \
test/test-process-title.c \
@@ -385,10 +386,6 @@ if ANDROID
uvinclude_HEADERS += include/uv/android-ifaddrs.h
libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
- src/unix/linux-core.c \
- src/unix/linux-inotify.c \
- src/unix/linux-syscalls.c \
- src/unix/procfs-exepath.c \
src/unix/pthread-fixes.c \
src/unix/random-getrandom.c \
src/unix/random-sysctl-linux.c
diff --git a/deps/uv/README.md b/deps/uv/README.md
index 98007c5e7d21de..f6c73709cc5b82 100644
--- a/deps/uv/README.md
+++ b/deps/uv/README.md
@@ -286,6 +286,16 @@ listed in `test/benchmark-list.h`.
Check the [SUPPORTED_PLATFORMS file](SUPPORTED_PLATFORMS.md).
+### `-fno-strict-aliasing`
+
+It is recommended to turn on the `-fno-strict-aliasing` compiler flag in
+projects that use libuv. The use of ad hoc "inheritance" in the libuv API
+may not be safe in the presence of compiler optimizations that depend on
+strict aliasing.
+
+MSVC does not have an equivalent flag but it also does not appear to need it
+at the time of writing (December 2019.)
+
### AIX Notes
AIX compilation using IBM XL C/C++ requires version 12.1 or greater.
diff --git a/deps/uv/SUPPORTED_PLATFORMS.md b/deps/uv/SUPPORTED_PLATFORMS.md
index 72e054eba067ec..30e0ea617a6fca 100644
--- a/deps/uv/SUPPORTED_PLATFORMS.md
+++ b/deps/uv/SUPPORTED_PLATFORMS.md
@@ -4,14 +4,14 @@
|---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | |
| macOS | Tier 1 | macOS >= 10.7 | |
-| Windows | Tier 1 | >= Windows 7 | MSVC 2008 and later are supported |
+| Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported |
| FreeBSD | Tier 1 | >= 10 | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
+| IBM i | Tier 2 | >= IBM i 7.2 | Maintainers: @libuv/ibmi |
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |
| Linux with musl | Tier 2 | musl >= 1.0 | |
| SmartOS | Tier 2 | >= 14.4 | Maintainers: @libuv/smartos |
| Android | Tier 3 | NDK >= r15b | |
-| IBM i | Tier 3 | >= IBM i 7.2 | Maintainers: @libuv/ibmi |
| MinGW | Tier 3 | MinGW32 and MinGW-w64 | |
| SunOS | Tier 3 | Solaris 121 and later | |
| Other | Tier 3 | N/A | |
diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac
index 1a66b74d28357a..4bdc7fd3f532b8 100644
--- a/deps/uv/configure.ac
+++ b/deps/uv/configure.ac
@@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
-AC_INIT([libuv], [1.40.0], [https://github.com/libuv/libuv/issues])
+AC_INIT([libuv], [1.41.0], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
@@ -25,6 +25,7 @@ AC_ENABLE_STATIC
AC_PROG_CC
AM_PROG_CC_C_O
CC_FLAG_VISIBILITY #[-fvisibility=hidden]
+CC_CHECK_CFLAGS_APPEND([-fno-strict-aliasing])
CC_CHECK_CFLAGS_APPEND([-g])
CC_CHECK_CFLAGS_APPEND([-std=gnu89])
CC_CHECK_CFLAGS_APPEND([-Wall])
diff --git a/deps/uv/docs/src/pipe.rst b/deps/uv/docs/src/pipe.rst
index 6437a9d9948148..5fa83b80d36543 100644
--- a/deps/uv/docs/src/pipe.rst
+++ b/deps/uv/docs/src/pipe.rst
@@ -118,3 +118,21 @@ API
function is blocking.
.. versionadded:: 1.16.0
+
+.. c:function:: int uv_pipe(uv_file fds[2], int read_flags, int write_flags)
+
+ Create a pair of connected pipe handles.
+ Data may be written to `fds[1]` and read from `fds[0]`.
+ The resulting handles can be passed to `uv_pipe_open`, used with `uv_spawn`,
+ or for any other purpose.
+
+ Valid values for `flags` are:
+
+ - UV_NONBLOCK_PIPE: Opens the specified socket handle for `OVERLAPPED`
+ or `FIONBIO`/`O_NONBLOCK` I/O usage.
+ This is recommended for handles that will be used by libuv,
+ and not usually recommended otherwise.
+
+ Equivalent to :man:`pipe(2)` with the `O_CLOEXEC` flag set.
+
+ .. versionadded:: 1.41.0
diff --git a/deps/uv/docs/src/poll.rst b/deps/uv/docs/src/poll.rst
index aba8915886bb5f..93a101ec686c53 100644
--- a/deps/uv/docs/src/poll.rst
+++ b/deps/uv/docs/src/poll.rst
@@ -86,36 +86,63 @@ API
.. c:function:: int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb)
Starts polling the file descriptor. `events` is a bitmask made up of
- UV_READABLE, UV_WRITABLE, UV_PRIORITIZED and UV_DISCONNECT. As soon as an
- event is detected the callback will be called with `status` set to 0, and the
- detected events set on the `events` field.
+ `UV_READABLE`, `UV_WRITABLE`, `UV_PRIORITIZED` and `UV_DISCONNECT`. As soon
+ as an event is detected the callback will be called with `status` set to 0,
+ and the detected events set on the `events` field.
- The UV_PRIORITIZED event is used to watch for sysfs interrupts or TCP out-of-band
- messages.
+ The `UV_PRIORITIZED` event is used to watch for sysfs interrupts or TCP
+ out-of-band messages.
- The UV_DISCONNECT event is optional in the sense that it may not be
- reported and the user is free to ignore it, but it can help optimize the shutdown
- path because an extra read or write call might be avoided.
+ The `UV_DISCONNECT` event is optional in the sense that it may not be
+ reported and the user is free to ignore it, but it can help optimize the
+ shutdown path because an extra read or write call might be avoided.
If an error happens while polling, `status` will be < 0 and corresponds
- with one of the UV_E* error codes (see :ref:`errors`). The user should
+ with one of the `UV_E*` error codes (see :ref:`errors`). The user should
not close the socket while the handle is active. If the user does that
- anyway, the callback *may* be called reporting an error status, but this
- is **not** guaranteed.
+ anyway, the callback *may* be called reporting an error status, but this is
+ **not** guaranteed.
.. note::
- Calling :c:func:`uv_poll_start` on a handle that is already active is fine. Doing so
- will update the events mask that is being watched for.
+ Calling :c:func:`uv_poll_start` on a handle that is already active is
+ fine. Doing so will update the events mask that is being watched for.
.. note::
- Though UV_DISCONNECT can be set, it is unsupported on AIX and as such will not be set
- on the `events` field in the callback.
+ Though `UV_DISCONNECT` can be set, it is unsupported on AIX and as such
+ will not be set on the `events` field in the callback.
- .. versionchanged:: 1.9.0 Added the UV_DISCONNECT event.
- .. versionchanged:: 1.14.0 Added the UV_PRIORITIZED event.
+ .. note::
+ If one of the events `UV_READABLE` or `UV_WRITABLE` are set, the
+ callback will be called again, as long as the given fd/socket remains
+ readable or writable accordingly. Particularly in each of the following
+ scenarios:
+
+ * The callback has been called because the socket became
+ readable/writable and the callback did not conduct a read/write on
+ this socket at all.
+ * The callback committed a read on the socket, and has not read all the
+ available data (when `UV_READABLE` is set).
+ * The callback committed a write on the socket, but it remained
+ writable afterwards (when `UV_WRITABLE` is set).
+ * The socket has already became readable/writable before calling
+ :c:func:`uv_poll_start` on a poll handle associated with this socket,
+ and since then the state of the socket did not changed.
+
+ In all of the above listed scenarios, the socket remains readable or
+ writable and hence the callback will be called again (depending on the
+ events set in the bitmask). This behaviour is known as level
+ triggering.
+
+ .. versionchanged:: 1.9.0 Added the `UV_DISCONNECT` event.
+ .. versionchanged:: 1.14.0 Added the `UV_PRIORITIZED` event.
.. c:function:: int uv_poll_stop(uv_poll_t* poll)
Stop polling the file descriptor, the callback will no longer be called.
+ .. note::
+ Calling :c:func:`uv_poll_stop` is effective immediately: any pending
+ callback is also canceled, even if the socket state change notification
+ was already pending.
+
.. seealso:: The :c:type:`uv_handle_t` API functions also apply.
diff --git a/deps/uv/docs/src/process.rst b/deps/uv/docs/src/process.rst
index 8ff19add57849f..ea6c4b9ad2811d 100644
--- a/deps/uv/docs/src/process.rst
+++ b/deps/uv/docs/src/process.rst
@@ -119,12 +119,14 @@ Data types
* flags may be specified to create a duplex data stream.
*/
UV_READABLE_PIPE = 0x10,
- UV_WRITABLE_PIPE = 0x20
+ UV_WRITABLE_PIPE = 0x20,
/*
- * Open the child pipe handle in overlapped mode on Windows.
- * On Unix it is silently ignored.
- */
- UV_OVERLAPPED_PIPE = 0x40
+ * When UV_CREATE_PIPE is specified, specifying UV_NONBLOCK_PIPE opens the
+ * handle in non-blocking mode in the child. This may cause loss of data,
+ * if the child is not designed to handle to encounter this mode,
+ * but can also be significantly more efficient.
+ */
+ UV_NONBLOCK_PIPE = 0x40
} uv_stdio_flags;
diff --git a/deps/uv/docs/src/stream.rst b/deps/uv/docs/src/stream.rst
index 2ccb59b51cb432..429ebdab28f5f1 100644
--- a/deps/uv/docs/src/stream.rst
+++ b/deps/uv/docs/src/stream.rst
@@ -139,6 +139,11 @@ API
be made several times until there is no more data to read or
:c:func:`uv_read_stop` is called.
+ .. versionchanged:: 1.38.0 :c:func:`uv_read_start()` now consistently
+ returns `UV_EALREADY` when called twice, and `UV_EINVAL` when the
+ stream is closing. With older libuv versions, it returns `UV_EALREADY`
+ on Windows but not UNIX, and `UV_EINVAL` on UNIX but not Windows.
+
.. c:function:: int uv_read_stop(uv_stream_t*)
Stop reading data from the stream. The :c:type:`uv_read_cb` callback will
diff --git a/deps/uv/docs/src/tcp.rst b/deps/uv/docs/src/tcp.rst
index 3cc8efaac10bf6..cccc86bbfc0335 100644
--- a/deps/uv/docs/src/tcp.rst
+++ b/deps/uv/docs/src/tcp.rst
@@ -81,10 +81,9 @@ API
initialized ``struct sockaddr_in`` or ``struct sockaddr_in6``.
When the port is already taken, you can expect to see an ``UV_EADDRINUSE``
- error from either :c:func:`uv_tcp_bind`, :c:func:`uv_listen` or
- :c:func:`uv_tcp_connect`. That is, a successful call to this function does
- not guarantee that the call to :c:func:`uv_listen` or :c:func:`uv_tcp_connect`
- will succeed as well.
+ error from :c:func:`uv_listen` or :c:func:`uv_tcp_connect`. That is,
+ a successful call to this function does not guarantee that the call
+ to :c:func:`uv_listen` or :c:func:`uv_tcp_connect` will succeed as well.
`flags` can contain ``UV_TCP_IPV6ONLY``, in which case dual-stack support
is disabled and only IPv6 is used.
@@ -128,3 +127,20 @@ API
:c:func:`uv_tcp_close_reset` calls is not allowed.
.. versionadded:: 1.32.0
+
+.. c:function:: int uv_socketpair(int type, int protocol, uv_os_sock_t socket_vector[2], int flags0, int flags1)
+
+ Create a pair of connected sockets with the specified properties.
+ The resulting handles can be passed to `uv_tcp_open`, used with `uv_spawn`,
+ or for any other purpose.
+
+ Valid values for `flags0` and `flags1` are:
+
+ - UV_NONBLOCK_PIPE: Opens the specified socket handle for `OVERLAPPED`
+ or `FIONBIO`/`O_NONBLOCK` I/O usage.
+ This is recommended for handles that will be used by libuv,
+ and not usually recommended otherwise.
+
+ Equivalent to :man:`socketpair(2)` with a domain of AF_UNIX.
+
+ .. versionadded:: 1.41.0
diff --git a/deps/uv/docs/src/udp.rst b/deps/uv/docs/src/udp.rst
index 30aa4593f01936..827fbaad6c0476 100644
--- a/deps/uv/docs/src/udp.rst
+++ b/deps/uv/docs/src/udp.rst
@@ -73,7 +73,8 @@ Data types
* `nread`: Number of bytes that have been received.
0 if there is no more data to read. Note that 0 may also mean that an
empty datagram was received (in this case `addr` is not NULL). < 0 if
- a transmission error was detected.
+ a transmission error was detected; if using :man:`recvmmsg(2)` no more
+ chunks will be received and the buffer can be freed safely.
* `buf`: :c:type:`uv_buf_t` with the received data.
* `addr`: ``struct sockaddr*`` containing the address of the sender.
Can be NULL. Valid for the duration of the callback only.
@@ -84,10 +85,11 @@ Data types
on error.
When using :man:`recvmmsg(2)`, chunks will have the `UV_UDP_MMSG_CHUNK` flag set,
- those must not be freed. There will be a final callback with `nread` set to 0,
- `addr` set to NULL and the buffer pointing at the initially allocated data with
- the `UV_UDP_MMSG_CHUNK` flag cleared and the `UV_UDP_MMSG_FREE` flag set.
- The callee can now safely free the provided buffer.
+ those must not be freed. If no errors occur, there will be a final callback with
+ `nread` set to 0, `addr` set to NULL and the buffer pointing at the initially
+ allocated data with the `UV_UDP_MMSG_CHUNK` flag cleared and the `UV_UDP_MMSG_FREE`
+ flag set. If a UDP socket error occurs, `nread` will be < 0. In either scenario,
+ the callee can now safely free the provided buffer.
.. versionchanged:: 1.40.0 added the `UV_UDP_MMSG_FREE` flag.
diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h
index 2557961eedba7f..1e1fc94bfcc3dc 100644
--- a/deps/uv/include/uv.h
+++ b/deps/uv/include/uv.h
@@ -475,6 +475,12 @@ UV_EXTERN int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd);
UV_EXTERN uv_buf_t uv_buf_init(char* base, unsigned int len);
+UV_EXTERN int uv_pipe(uv_file fds[2], int read_flags, int write_flags);
+UV_EXTERN int uv_socketpair(int type,
+ int protocol,
+ uv_os_sock_t socket_vector[2],
+ int flags0,
+ int flags1);
#define UV_STREAM_FIELDS \
/* number of bytes queued for writing */ \
@@ -933,10 +939,13 @@ typedef enum {
UV_WRITABLE_PIPE = 0x20,
/*
- * Open the child pipe handle in overlapped mode on Windows.
- * On Unix it is silently ignored.
+ * When UV_CREATE_PIPE is specified, specifying UV_NONBLOCK_PIPE opens the
+ * handle in non-blocking mode in the child. This may cause loss of data,
+ * if the child is not designed to handle to encounter this mode,
+ * but can also be significantly more efficient.
*/
- UV_OVERLAPPED_PIPE = 0x40
+ UV_NONBLOCK_PIPE = 0x40,
+ UV_OVERLAPPED_PIPE = 0x40 /* old name, for compatibility */
} uv_stdio_flags;
typedef struct uv_stdio_container_s {
diff --git a/deps/uv/include/uv/version.h b/deps/uv/include/uv/version.h
index 5272008a3434b5..e94f1e02e15354 100644
--- a/deps/uv/include/uv/version.h
+++ b/deps/uv/include/uv/version.h
@@ -31,7 +31,7 @@
*/
#define UV_VERSION_MAJOR 1
-#define UV_VERSION_MINOR 40
+#define UV_VERSION_MINOR 41
#define UV_VERSION_PATCH 0
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""
diff --git a/deps/uv/src/timer.c b/deps/uv/src/timer.c
index 1bea2a8bd29cdf..bc680e71a9ef04 100644
--- a/deps/uv/src/timer.c
+++ b/deps/uv/src/timer.c
@@ -58,6 +58,7 @@ static int timer_less_than(const struct heap_node* ha,
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
handle->timer_cb = NULL;
+ handle->timeout = 0;
handle->repeat = 0;
return 0;
}
diff --git a/deps/uv/src/unix/async.c b/deps/uv/src/unix/async.c
index 5f58fb88d628ec..e1805c323795e5 100644
--- a/deps/uv/src/unix/async.c
+++ b/deps/uv/src/unix/async.c
@@ -214,7 +214,7 @@ static int uv__async_start(uv_loop_t* loop) {
pipefd[0] = err;
pipefd[1] = -1;
#else
- err = uv__make_pipe(pipefd, UV__F_NONBLOCK);
+ err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err < 0)
return err;
#endif
diff --git a/deps/uv/src/unix/bsd-ifaddrs.c b/deps/uv/src/unix/bsd-ifaddrs.c
index 5223ab4879677e..e48934bce2b65d 100644
--- a/deps/uv/src/unix/bsd-ifaddrs.c
+++ b/deps/uv/src/unix/bsd-ifaddrs.c
@@ -42,8 +42,8 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return 1;
#if !defined(__CYGWIN__) && !defined(__MSYS__)
/*
- * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, just see whether `sa_family`
- * equals to `AF_LINK` or not. Otherwise, the result depends on the operation
+ * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
+ * equals `AF_LINK`. Otherwise, the result depends on the operating
* system with `AF_LINK` or `PF_INET`.
*/
if (exclude_type == UV__EXCLUDE_IFPHYS)
@@ -53,7 +53,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
defined(__HAIKU__)
/*
* On BSD getifaddrs returns information related to the raw underlying
- * devices. We're not interested in this information.
+ * devices. We're not interested in this information.
*/
if (ent->ifa_addr->sa_family == AF_LINK)
return 1;
diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c
index 1597828c868b38..63f268f795f100 100644
--- a/deps/uv/src/unix/core.c
+++ b/deps/uv/src/unix/core.c
@@ -925,13 +925,12 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
if (w->pevents == 0) {
QUEUE_REMOVE(&w->watcher_queue);
QUEUE_INIT(&w->watcher_queue);
+ w->events = 0;
- if (loop->watchers[w->fd] != NULL) {
- assert(loop->watchers[w->fd] == w);
+ if (w == loop->watchers[w->fd]) {
assert(loop->nfds > 0);
loop->watchers[w->fd] = NULL;
loop->nfds--;
- w->events = 0;
}
}
else if (QUEUE_EMPTY(&w->watcher_queue))
diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c
index 556fd103c3a954..fd7ae08755f519 100644
--- a/deps/uv/src/unix/fs.c
+++ b/deps/uv/src/unix/fs.c
@@ -58,6 +58,7 @@
#if defined(__linux__) || defined(__sun)
# include
+# include
#endif
#if defined(__APPLE__)
@@ -212,14 +213,30 @@ static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
struct timespec ts;
ts.tv_sec = time;
- ts.tv_nsec = (uint64_t)(time * 1000000) % 1000000 * 1000;
+ ts.tv_nsec = (time - ts.tv_sec) * 1e9;
+
+ /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
+ * stick to microsecond resolution for the sake of consistency with other
+ * platforms. I'm the original author of this compatibility hack but I'm
+ * less convinced it's useful nowadays.
+ */
+ ts.tv_nsec -= ts.tv_nsec % 1000;
+
+ if (ts.tv_nsec < 0) {
+ ts.tv_nsec += 1e9;
+ ts.tv_sec -= 1;
+ }
return ts;
}
UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
struct timeval tv;
tv.tv_sec = time;
- tv.tv_usec = (uint64_t)(time * 1000000) % 1000000;
+ tv.tv_usec = (time - tv.tv_sec) * 1e6;
+ if (tv.tv_usec < 0) {
+ tv.tv_usec += 1e6;
+ tv.tv_sec -= 1;
+ }
return tv;
}
@@ -227,9 +244,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
#if defined(__linux__) \
|| defined(_AIX71) \
|| defined(__HAIKU__)
- /* utimesat() has nanosecond resolution but we stick to microseconds
- * for the sake of consistency with other platforms.
- */
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@@ -906,11 +920,17 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
static int copy_file_range_support = 1;
if (copy_file_range_support) {
- r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
+ r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
if (r == -1 && errno == ENOSYS) {
+ /* ENOSYS - it will never work */
errno = 0;
copy_file_range_support = 0;
+ } else if (r == -1 && (errno == ENOTSUP || errno == EXDEV)) {
+ /* ENOTSUP - it could work on another file system type */
+ /* EXDEV - it will not work when in_fd and out_fd are not on the same
+ mounted filesystem (pre Linux 5.3) */
+ errno = 0;
} else {
goto ok;
}
@@ -1010,9 +1030,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
|| defined(_AIX71) \
|| defined(__sun) \
|| defined(__HAIKU__)
- /* utimesat() has nanosecond resolution but we stick to microseconds
- * for the sake of consistency with other platforms.
- */
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@@ -1220,7 +1237,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
if (fstatfs(dstfd, &s) == -1)
goto out;
- if (s.f_type != /* CIFS */ 0xFF534D42u)
+ if ((unsigned) s.f_type != /* CIFS */ 0xFF534D42u)
goto out;
}
@@ -1420,8 +1437,9 @@ static int uv__fs_statx(int fd,
case -1:
/* EPERM happens when a seccomp filter rejects the system call.
* Has been observed with libseccomp < 2.3.3 and docker < 18.04.
+ * EOPNOTSUPP is used on DVS exported filesystems
*/
- if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
+ if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
return -1;
/* Fall through. */
default:
@@ -1434,12 +1452,12 @@ static int uv__fs_statx(int fd,
return UV_ENOSYS;
}
- buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
+ buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
buf->st_mode = statxbuf.stx_mode;
buf->st_nlink = statxbuf.stx_nlink;
buf->st_uid = statxbuf.stx_uid;
buf->st_gid = statxbuf.stx_gid;
- buf->st_rdev = statxbuf.stx_rdev_major;
+ buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
buf->st_ino = statxbuf.stx_ino;
buf->st_size = statxbuf.stx_size;
buf->st_blksize = statxbuf.stx_blksize;
diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h
index 570274ed60bebc..3bdf7283bd4fd8 100644
--- a/deps/uv/src/unix/internal.h
+++ b/deps/uv/src/unix/internal.h
@@ -282,12 +282,6 @@ int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */
-#ifdef O_NONBLOCK
-# define UV__F_NONBLOCK O_NONBLOCK
-#else
-# define UV__F_NONBLOCK 1
-#endif
-
int uv__make_pipe(int fds[2], int flags);
#if defined(__APPLE__)
diff --git a/deps/uv/src/unix/linux-core.c b/deps/uv/src/unix/linux-core.c
index 4db2f05053a1cc..c356e96d2dec5b 100644
--- a/deps/uv/src/unix/linux-core.c
+++ b/deps/uv/src/unix/linux-core.c
@@ -602,22 +602,53 @@ int uv_resident_set_memory(size_t* rss) {
return UV_EINVAL;
}
+static int uv__slurp(const char* filename, char* buf, size_t len) {
+ ssize_t n;
+ int fd;
+
+ assert(len > 0);
+
+ fd = uv__open_cloexec(filename, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ do
+ n = read(fd, buf, len - 1);
+ while (n == -1 && errno == EINTR);
+
+ if (uv__close_nocheckstdio(fd))
+ abort();
+
+ if (n < 0)
+ return UV__ERR(errno);
+
+ buf[n] = '\0';
+
+ return 0;
+}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
+ char buf[128];
struct timespec now;
int r;
+ /* Try /proc/uptime first, then fallback to clock_gettime(). */
+
+ if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
+ if (1 == sscanf(buf, "%lf", uptime))
+ return 0;
+
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
- retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
+ retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
- goto retry;
+ goto retry_clock_gettime;
}
if (r)
@@ -1025,32 +1056,6 @@ void uv__set_process_title(const char* title) {
}
-static int uv__slurp(const char* filename, char* buf, size_t len) {
- ssize_t n;
- int fd;
-
- assert(len > 0);
-
- fd = uv__open_cloexec(filename, O_RDONLY);
- if (fd < 0)
- return fd;
-
- do
- n = read(fd, buf, len - 1);
- while (n == -1 && errno == EINTR);
-
- if (uv__close_nocheckstdio(fd))
- abort();
-
- if (n < 0)
- return UV__ERR(errno);
-
- buf[n] = '\0';
-
- return 0;
-}
-
-
static uint64_t uv__read_proc_meminfo(const char* what) {
uint64_t rc;
char* p;
diff --git a/deps/uv/src/unix/linux-syscalls.c b/deps/uv/src/unix/linux-syscalls.c
index 44daaf12d49810..5071cd56d1fcb2 100644
--- a/deps/uv/src/unix/linux-syscalls.c
+++ b/deps/uv/src/unix/linux-syscalls.c
@@ -194,37 +194,37 @@ int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
-#if defined(__NR_preadv)
- return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
-#else
+#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
-#if defined(__NR_pwritev)
- return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
-#else
+#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
int uv__dup3(int oldfd, int newfd, int flags) {
-#if defined(__NR_dup3)
- return syscall(__NR_dup3, oldfd, newfd, flags);
-#else
+#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_dup3, oldfd, newfd, flags);
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
- ssize_t* off_in,
+ off_t* off_in,
int fd_out,
- ssize_t* off_out,
+ off_t* off_out,
size_t len,
unsigned int flags)
{
@@ -247,21 +247,18 @@ int uv__statx(int dirfd,
int flags,
unsigned int mask,
struct uv__statx* statxbuf) {
- /* __NR_statx make Android box killed by SIGSYS.
- * That looks like a seccomp2 sandbox filter rejecting the system call.
- */
-#if defined(__NR_statx) && !defined(__ANDROID__)
- return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
-#else
+#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
#endif
}
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
-#if defined(__NR_getrandom)
- return syscall(__NR_getrandom, buf, buflen, flags);
-#else
+#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_getrandom, buf, buflen, flags);
#endif
}
diff --git a/deps/uv/src/unix/linux-syscalls.h b/deps/uv/src/unix/linux-syscalls.h
index 761ff32e21bc53..c85231f6bf4436 100644
--- a/deps/uv/src/unix/linux-syscalls.h
+++ b/deps/uv/src/unix/linux-syscalls.h
@@ -66,9 +66,9 @@ ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset)
int uv__dup3(int oldfd, int newfd, int flags);
ssize_t
uv__fs_copy_file_range(int fd_in,
- ssize_t* off_in,
+ off_t* off_in,
int fd_out,
- ssize_t* off_out,
+ off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
diff --git a/deps/uv/src/unix/pipe.c b/deps/uv/src/unix/pipe.c
index 040d57817fa5b1..788e038e8aaae9 100644
--- a/deps/uv/src/unix/pipe.c
+++ b/deps/uv/src/unix/pipe.c
@@ -379,3 +379,57 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
return r != -1 ? 0 : UV__ERR(errno);
}
+
+
+int uv_pipe(uv_os_fd_t fds[2], int read_flags, int write_flags) {
+ uv_os_fd_t temp[2];
+ int err;
+#if defined(__FreeBSD__) || defined(__linux__)
+ int flags = O_CLOEXEC;
+
+ if ((read_flags & UV_NONBLOCK_PIPE) && (write_flags & UV_NONBLOCK_PIPE))
+ flags |= UV_FS_O_NONBLOCK;
+
+ if (pipe2(temp, flags))
+ return UV__ERR(errno);
+
+ if (flags & UV_FS_O_NONBLOCK) {
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+ }
+#else
+ if (pipe(temp))
+ return UV__ERR(errno);
+
+ if ((err = uv__cloexec(temp[0], 1)))
+ goto fail;
+
+ if ((err = uv__cloexec(temp[1], 1)))
+ goto fail;
+#endif
+
+ if (read_flags & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[0], 1)))
+ goto fail;
+
+ if (write_flags & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[1], 1)))
+ goto fail;
+
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+
+fail:
+ uv__close(temp[0]);
+ uv__close(temp[1]);
+ return err;
+}
+
+
+int uv__make_pipe(int fds[2], int flags) {
+ return uv_pipe(fds,
+ flags & UV_NONBLOCK_PIPE,
+ flags & UV_NONBLOCK_PIPE);
+}
diff --git a/deps/uv/src/unix/poll.c b/deps/uv/src/unix/poll.c
index 3d5022b22e85b6..7a1bc7b9dd58d8 100644
--- a/deps/uv/src/unix/poll.c
+++ b/deps/uv/src/unix/poll.c
@@ -116,12 +116,21 @@ int uv_poll_stop(uv_poll_t* handle) {
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
+ uv__io_t** watchers;
+ uv__io_t* w;
int events;
assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
UV_PRIORITIZED)) == 0);
assert(!uv__is_closing(handle));
+ watchers = handle->loop->watchers;
+ w = &handle->io_watcher;
+
+ if (uv__fd_exists(handle->loop, w->fd))
+ if (watchers[w->fd] != w)
+ return UV_EEXIST;
+
uv__poll_stop(handle);
if (pevents == 0)
diff --git a/deps/uv/src/unix/process.c b/deps/uv/src/unix/process.c
index b021aaeba87d0b..8f94c53b249978 100644
--- a/deps/uv/src/unix/process.c
+++ b/deps/uv/src/unix/process.c
@@ -111,68 +111,6 @@ static void uv__chld(uv_signal_t* handle, int signum) {
assert(QUEUE_EMPTY(&pending));
}
-
-static int uv__make_socketpair(int fds[2]) {
-#if defined(__FreeBSD__) || defined(__linux__)
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, fds))
- return UV__ERR(errno);
-
- return 0;
-#else
- int err;
-
- if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
- return UV__ERR(errno);
-
- err = uv__cloexec(fds[0], 1);
- if (err == 0)
- err = uv__cloexec(fds[1], 1);
-
- if (err != 0) {
- uv__close(fds[0]);
- uv__close(fds[1]);
- return UV__ERR(errno);
- }
-
- return 0;
-#endif
-}
-
-
-int uv__make_pipe(int fds[2], int flags) {
-#if defined(__FreeBSD__) || defined(__linux__)
- if (pipe2(fds, flags | O_CLOEXEC))
- return UV__ERR(errno);
-
- return 0;
-#else
- if (pipe(fds))
- return UV__ERR(errno);
-
- if (uv__cloexec(fds[0], 1))
- goto fail;
-
- if (uv__cloexec(fds[1], 1))
- goto fail;
-
- if (flags & UV__F_NONBLOCK) {
- if (uv__nonblock(fds[0], 1))
- goto fail;
-
- if (uv__nonblock(fds[1], 1))
- goto fail;
- }
-
- return 0;
-
-fail:
- uv__close(fds[0]);
- uv__close(fds[1]);
- return UV__ERR(errno);
-#endif
-}
-
-
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
@@ -192,7 +130,7 @@ static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
if (container->data.stream->type != UV_NAMED_PIPE)
return UV_EINVAL;
else
- return uv__make_socketpair(fds);
+ return uv_socketpair(SOCK_STREAM, 0, fds, 0, 0);
case UV_INHERIT_FD:
case UV_INHERIT_STREAM:
diff --git a/deps/uv/src/unix/proctitle.c b/deps/uv/src/unix/proctitle.c
index 9ffe5b629c2554..9e39545e44a0ff 100644
--- a/deps/uv/src/unix/proctitle.c
+++ b/deps/uv/src/unix/proctitle.c
@@ -119,6 +119,7 @@ int uv_set_process_title(const char* title) {
memcpy(pt->str, title, len);
memset(pt->str + len, '\0', pt->cap - len);
pt->len = len;
+ uv__set_process_title(pt->str);
uv_mutex_unlock(&process_title_mutex);
diff --git a/deps/uv/src/unix/signal.c b/deps/uv/src/unix/signal.c
index f40a3e54ebb74e..1133c73a955525 100644
--- a/deps/uv/src/unix/signal.c
+++ b/deps/uv/src/unix/signal.c
@@ -265,7 +265,7 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) {
if (loop->signal_pipefd[0] != -1)
return 0;
- err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK);
+ err = uv__make_pipe(loop->signal_pipefd, UV_NONBLOCK_PIPE);
if (err)
return err;
diff --git a/deps/uv/src/unix/stream.c b/deps/uv/src/unix/stream.c
index 8327f9ccfcea75..106785e4574ea0 100644
--- a/deps/uv/src/unix/stream.c
+++ b/deps/uv/src/unix/stream.c
@@ -1552,18 +1552,12 @@ int uv_try_write(uv_stream_t* stream,
}
-int uv_read_start(uv_stream_t* stream,
- uv_alloc_cb alloc_cb,
- uv_read_cb read_cb) {
+int uv__read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
stream->type == UV_TTY);
- if (stream->flags & UV_HANDLE_CLOSING)
- return UV_EINVAL;
-
- if (!(stream->flags & UV_HANDLE_READABLE))
- return UV_ENOTCONN;
-
/* The UV_HANDLE_READING flag is irrelevant of the state of the tcp - it just
* expresses the desired state of the user.
*/
diff --git a/deps/uv/src/unix/tcp.c b/deps/uv/src/unix/tcp.c
index 18acd20df14e79..bc0fb661f1c520 100644
--- a/deps/uv/src/unix/tcp.c
+++ b/deps/uv/src/unix/tcp.c
@@ -214,14 +214,15 @@ int uv__tcp_connect(uv_connect_t* req,
if (handle->connect_req != NULL)
return UV_EALREADY; /* FIXME(bnoordhuis) UV_EINVAL or maybe UV_EBUSY. */
+ if (handle->delayed_error != 0)
+ goto out;
+
err = maybe_new_socket(handle,
addr->sa_family,
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
if (err)
return err;
- handle->delayed_error = 0;
-
do {
errno = 0;
r = connect(uv__stream_fd(handle), addr, addrlen);
@@ -249,6 +250,8 @@ int uv__tcp_connect(uv_connect_t* req,
return UV__ERR(errno);
}
+out:
+
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
@@ -459,3 +462,49 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
void uv__tcp_close(uv_tcp_t* handle) {
uv__stream_close((uv_stream_t*)handle);
}
+
+
+int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
+ uv_os_sock_t temp[2];
+ int err;
+#if defined(__FreeBSD__) || defined(__linux__)
+ int flags;
+
+ flags = type | SOCK_CLOEXEC;
+ if ((flags0 & UV_NONBLOCK_PIPE) && (flags1 & UV_NONBLOCK_PIPE))
+ flags |= SOCK_NONBLOCK;
+
+ if (socketpair(AF_UNIX, flags, protocol, temp))
+ return UV__ERR(errno);
+
+ if (flags & UV_FS_O_NONBLOCK) {
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+ }
+#else
+ if (socketpair(AF_UNIX, type, protocol, temp))
+ return UV__ERR(errno);
+
+ if ((err = uv__cloexec(temp[0], 1)))
+ goto fail;
+ if ((err = uv__cloexec(temp[1], 1)))
+ goto fail;
+#endif
+
+ if (flags0 & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[0], 1)))
+ goto fail;
+ if (flags1 & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[1], 1)))
+ goto fail;
+
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+
+fail:
+ uv__close(temp[0]);
+ uv__close(temp[1]);
+ return err;
+}
diff --git a/deps/uv/src/uv-common.c b/deps/uv/src/uv-common.c
index 602e5f492fd2be..dd559a11d11b6a 100644
--- a/deps/uv/src/uv-common.c
+++ b/deps/uv/src/uv-common.c
@@ -832,6 +832,25 @@ void uv_loop_delete(uv_loop_t* loop) {
}
+int uv_read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
+ return UV_EINVAL;
+
+ if (stream->flags & UV_HANDLE_CLOSING)
+ return UV_EINVAL;
+
+ if (stream->flags & UV_HANDLE_READING)
+ return UV_EALREADY;
+
+ if (!(stream->flags & UV_HANDLE_READABLE))
+ return UV_ENOTCONN;
+
+ return uv__read_start(stream, alloc_cb, read_cb);
+}
+
+
void uv_os_free_environ(uv_env_item_t* envitems, int count) {
int i;
diff --git a/deps/uv/src/uv-common.h b/deps/uv/src/uv-common.h
index e851291cc06e01..a92912fdb1a72e 100644
--- a/deps/uv/src/uv-common.h
+++ b/deps/uv/src/uv-common.h
@@ -136,6 +136,10 @@ int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);
void uv__loop_close(uv_loop_t* loop);
+int uv__read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,
diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c
index 8a801749d472b0..a083b5e82c8c42 100644
--- a/deps/uv/src/win/fs.c
+++ b/deps/uv/src/win/fs.c
@@ -92,30 +92,24 @@
return; \
}
-#define MILLIONu (1000U * 1000U)
-#define BILLIONu (1000U * 1000U * 1000U)
+#define MILLION ((int64_t) 1000 * 1000)
+#define BILLION ((int64_t) 1000 * 1000 * 1000)
-#define FILETIME_TO_UINT(filetime) \
- (*((uint64_t*) &(filetime)) - (uint64_t) 116444736 * BILLIONu)
-
-#define FILETIME_TO_TIME_T(filetime) \
- (FILETIME_TO_UINT(filetime) / (10u * MILLIONu))
-
-#define FILETIME_TO_TIME_NS(filetime, secs) \
- ((FILETIME_TO_UINT(filetime) - (secs * (uint64_t) 10 * MILLIONu)) * 100U)
-
-#define FILETIME_TO_TIMESPEC(ts, filetime) \
- do { \
- (ts).tv_sec = (long) FILETIME_TO_TIME_T(filetime); \
- (ts).tv_nsec = (long) FILETIME_TO_TIME_NS(filetime, (ts).tv_sec); \
- } while(0)
+static void uv__filetime_to_timespec(uv_timespec_t *ts, int64_t filetime) {
+ filetime -= 116444736 * BILLION;
+ ts->tv_sec = (long) (filetime / (10 * MILLION));
+ ts->tv_nsec = (long) ((filetime - ts->tv_sec * 10 * MILLION) * 100U);
+ if (ts->tv_nsec < 0) {
+ ts->tv_sec -= 1;
+ ts->tv_nsec += 1e9;
+ }
+}
#define TIME_T_TO_FILETIME(time, filetime_ptr) \
do { \
- uint64_t bigtime = ((uint64_t) ((time) * (uint64_t) 10 * MILLIONu)) + \
- (uint64_t) 116444736 * BILLIONu; \
- (filetime_ptr)->dwLowDateTime = bigtime & 0xFFFFFFFF; \
- (filetime_ptr)->dwHighDateTime = bigtime >> 32; \
+ int64_t bigtime = ((time) * 10 * MILLION + 116444736 * BILLION); \
+ (filetime_ptr)->dwLowDateTime = (uint64_t) bigtime & 0xFFFFFFFF; \
+ (filetime_ptr)->dwHighDateTime = (uint64_t) bigtime >> 32; \
} while(0)
#define IS_SLASH(c) ((c) == L'\\' || (c) == L'/')
@@ -1224,7 +1218,8 @@ void fs__mkdir(uv_fs_t* req) {
SET_REQ_RESULT(req, 0);
} else {
SET_REQ_WIN32_ERROR(req, GetLastError());
- if (req->sys_errno_ == ERROR_INVALID_NAME)
+ if (req->sys_errno_ == ERROR_INVALID_NAME ||
+ req->sys_errno_ == ERROR_DIRECTORY)
req->result = UV_EINVAL;
}
}
@@ -1791,10 +1786,14 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
((_S_IREAD | _S_IWRITE) >> 6);
- FILETIME_TO_TIMESPEC(statbuf->st_atim, file_info.BasicInformation.LastAccessTime);
- FILETIME_TO_TIMESPEC(statbuf->st_ctim, file_info.BasicInformation.ChangeTime);
- FILETIME_TO_TIMESPEC(statbuf->st_mtim, file_info.BasicInformation.LastWriteTime);
- FILETIME_TO_TIMESPEC(statbuf->st_birthtim, file_info.BasicInformation.CreationTime);
+ uv__filetime_to_timespec(&statbuf->st_atim,
+ file_info.BasicInformation.LastAccessTime.QuadPart);
+ uv__filetime_to_timespec(&statbuf->st_ctim,
+ file_info.BasicInformation.ChangeTime.QuadPart);
+ uv__filetime_to_timespec(&statbuf->st_mtim,
+ file_info.BasicInformation.LastWriteTime.QuadPart);
+ uv__filetime_to_timespec(&statbuf->st_birthtim,
+ file_info.BasicInformation.CreationTime.QuadPart);
statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart;
diff --git a/deps/uv/src/win/internal.h b/deps/uv/src/win/internal.h
index b096255e4d63e0..b1b25b4c786bb7 100644
--- a/deps/uv/src/win/internal.h
+++ b/deps/uv/src/win/internal.h
@@ -115,8 +115,8 @@ void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
* Pipes
*/
-int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
- char* name, size_t nameSize);
+int uv__create_stdio_pipe_pair(uv_loop_t* loop,
+ uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags);
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
diff --git a/deps/uv/src/win/pipe.c b/deps/uv/src/win/pipe.c
index f81245ec606fcb..88ba99bbc0a49a 100644
--- a/deps/uv/src/win/pipe.c
+++ b/deps/uv/src/win/pipe.c
@@ -202,17 +202,17 @@ static void close_pipe(uv_pipe_t* pipe) {
}
-int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
- char* name, size_t nameSize) {
+static int uv__pipe_server(
+ HANDLE* pipeHandle_ptr, DWORD access,
+ char* name, size_t nameSize, char* random) {
HANDLE pipeHandle;
int err;
- char* ptr = (char*)handle;
for (;;) {
- uv_unique_pipe_name(ptr, name, nameSize);
+ uv_unique_pipe_name(random, name, nameSize);
pipeHandle = CreateNamedPipeA(name,
- access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE | WRITE_DAC,
+ access | FILE_FLAG_FIRST_PIPE_INSTANCE,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0,
NULL);
@@ -226,26 +226,225 @@ int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
goto error;
}
- /* Pipe name collision. Increment the pointer and try again. */
- ptr++;
+ /* Pipe name collision. Increment the random number and try again. */
+ random++;
}
- if (CreateIoCompletionPort(pipeHandle,
+ *pipeHandle_ptr = pipeHandle;
+
+ return 0;
+
+ error:
+ if (pipeHandle != INVALID_HANDLE_VALUE)
+ CloseHandle(pipeHandle);
+
+ return err;
+}
+
+
+static int uv__create_pipe_pair(
+ HANDLE* server_pipe_ptr, HANDLE* client_pipe_ptr,
+ unsigned int server_flags, unsigned int client_flags,
+ int inherit_client, char* random) {
+ /* allowed flags are: UV_READABLE_PIPE | UV_WRITABLE_PIPE | UV_NONBLOCK_PIPE */
+ char pipe_name[64];
+ SECURITY_ATTRIBUTES sa;
+ DWORD server_access;
+ DWORD client_access;
+ HANDLE server_pipe;
+ HANDLE client_pipe;
+ int err;
+
+ server_pipe = INVALID_HANDLE_VALUE;
+ client_pipe = INVALID_HANDLE_VALUE;
+
+ server_access = 0;
+ if (server_flags & UV_READABLE_PIPE)
+ server_access |= PIPE_ACCESS_INBOUND;
+ if (server_flags & UV_WRITABLE_PIPE)
+ server_access |= PIPE_ACCESS_OUTBOUND;
+ if (server_flags & UV_NONBLOCK_PIPE)
+ server_access |= FILE_FLAG_OVERLAPPED;
+ server_access |= WRITE_DAC;
+
+ client_access = 0;
+ if (client_flags & UV_READABLE_PIPE)
+ client_access |= GENERIC_READ;
+ else
+ client_access |= FILE_READ_ATTRIBUTES;
+ if (client_flags & UV_WRITABLE_PIPE)
+ client_access |= GENERIC_WRITE;
+ else
+ client_access |= FILE_WRITE_ATTRIBUTES;
+ client_access |= WRITE_DAC;
+
+ /* Create server pipe handle. */
+ err = uv__pipe_server(&server_pipe,
+ server_access,
+ pipe_name,
+ sizeof(pipe_name),
+ random);
+ if (err)
+ goto error;
+
+ /* Create client pipe handle. */
+ sa.nLength = sizeof sa;
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = inherit_client;
+
+ client_pipe = CreateFileA(pipe_name,
+ client_access,
+ 0,
+ &sa,
+ OPEN_EXISTING,
+ (client_flags & UV_NONBLOCK_PIPE) ? FILE_FLAG_OVERLAPPED : 0,
+ NULL);
+ if (client_pipe == INVALID_HANDLE_VALUE) {
+ err = GetLastError();
+ goto error;
+ }
+
+#ifndef NDEBUG
+ /* Validate that the pipe was opened in the right mode. */
+ {
+ DWORD mode;
+ BOOL r;
+ r = GetNamedPipeHandleState(client_pipe, &mode, NULL, NULL, NULL, NULL, 0);
+ if (r == TRUE) {
+ assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
+ } else {
+ fprintf(stderr, "libuv assertion failure: GetNamedPipeHandleState failed\n");
+ }
+ }
+#endif
+
+ /* Do a blocking ConnectNamedPipe. This should not block because we have
+ * both ends of the pipe created. */
+ if (!ConnectNamedPipe(server_pipe, NULL)) {
+ if (GetLastError() != ERROR_PIPE_CONNECTED) {
+ err = GetLastError();
+ goto error;
+ }
+ }
+
+ *client_pipe_ptr = client_pipe;
+ *server_pipe_ptr = server_pipe;
+ return 0;
+
+ error:
+ if (server_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(server_pipe);
+
+ if (client_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(client_pipe);
+
+ return err;
+}
+
+
+int uv_pipe(uv_file fds[2], int read_flags, int write_flags) {
+ uv_file temp[2];
+ int err;
+ HANDLE readh;
+ HANDLE writeh;
+
+ /* Make the server side the inbound (read) end, */
+ /* so that both ends will have FILE_READ_ATTRIBUTES permission. */
+ /* TODO: better source of local randomness than &fds? */
+ read_flags |= UV_READABLE_PIPE;
+ write_flags |= UV_WRITABLE_PIPE;
+ err = uv__create_pipe_pair(&readh, &writeh, read_flags, write_flags, 0, (char*) &fds[0]);
+ if (err != 0)
+ return err;
+ temp[0] = _open_osfhandle((intptr_t) readh, 0);
+ if (temp[0] == -1) {
+ if (errno == UV_EMFILE)
+ err = UV_EMFILE;
+ else
+ err = UV_UNKNOWN;
+ CloseHandle(readh);
+ CloseHandle(writeh);
+ return err;
+ }
+ temp[1] = _open_osfhandle((intptr_t) writeh, 0);
+ if (temp[1] == -1) {
+ if (errno == UV_EMFILE)
+ err = UV_EMFILE;
+ else
+ err = UV_UNKNOWN;
+ _close(temp[0]);
+ CloseHandle(writeh);
+ return err;
+ }
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+}
+
+
+int uv__create_stdio_pipe_pair(uv_loop_t* loop,
+ uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
+ /* The parent_pipe is always the server_pipe and kept by libuv.
+ * The child_pipe is always the client_pipe and is passed to the child.
+ * The flags are specified with respect to their usage in the child. */
+ HANDLE server_pipe;
+ HANDLE client_pipe;
+ unsigned int server_flags;
+ unsigned int client_flags;
+ int err;
+
+ server_pipe = INVALID_HANDLE_VALUE;
+ client_pipe = INVALID_HANDLE_VALUE;
+
+ server_flags = 0;
+ client_flags = 0;
+ if (flags & UV_READABLE_PIPE) {
+ /* The server needs inbound (read) access too, otherwise CreateNamedPipe()
+ * won't give us the FILE_READ_ATTRIBUTES permission. We need that to probe
+ * the state of the write buffer when we're trying to shutdown the pipe. */
+ server_flags |= UV_READABLE_PIPE | UV_WRITABLE_PIPE;
+ client_flags |= UV_READABLE_PIPE;
+ }
+ if (flags & UV_WRITABLE_PIPE) {
+ server_flags |= UV_READABLE_PIPE;
+ client_flags |= UV_WRITABLE_PIPE;
+ }
+ server_flags |= UV_NONBLOCK_PIPE;
+ if (flags & UV_NONBLOCK_PIPE || parent_pipe->ipc) {
+ client_flags |= UV_NONBLOCK_PIPE;
+ }
+
+ err = uv__create_pipe_pair(&server_pipe, &client_pipe,
+ server_flags, client_flags, 1, (char*) server_pipe);
+ if (err)
+ goto error;
+
+ if (CreateIoCompletionPort(server_pipe,
loop->iocp,
- (ULONG_PTR)handle,
+ (ULONG_PTR) parent_pipe,
0) == NULL) {
err = GetLastError();
goto error;
}
- uv_pipe_connection_init(handle);
- handle->handle = pipeHandle;
+ uv_pipe_connection_init(parent_pipe);
+ parent_pipe->handle = server_pipe;
+ *child_pipe_ptr = client_pipe;
+
+ /* The server end is now readable and/or writable. */
+ if (flags & UV_READABLE_PIPE)
+ parent_pipe->flags |= UV_HANDLE_WRITABLE;
+ if (flags & UV_WRITABLE_PIPE)
+ parent_pipe->flags |= UV_HANDLE_READABLE;
return 0;
error:
- if (pipeHandle != INVALID_HANDLE_VALUE)
- CloseHandle(pipeHandle);
+ if (server_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(server_pipe);
+
+ if (client_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(client_pipe);
return err;
}
@@ -712,9 +911,8 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
handle->name = NULL;
}
- if (pipeHandle != INVALID_HANDLE_VALUE) {
+ if (pipeHandle != INVALID_HANDLE_VALUE)
CloseHandle(pipeHandle);
- }
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, err);
diff --git a/deps/uv/src/win/process-stdio.c b/deps/uv/src/win/process-stdio.c
index 355d6188088b4a..0db35723731505 100644
--- a/deps/uv/src/win/process-stdio.c
+++ b/deps/uv/src/win/process-stdio.c
@@ -95,102 +95,6 @@ void uv_disable_stdio_inheritance(void) {
}
-static int uv__create_stdio_pipe_pair(uv_loop_t* loop,
- uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
- char pipe_name[64];
- SECURITY_ATTRIBUTES sa;
- DWORD server_access = 0;
- DWORD client_access = 0;
- HANDLE child_pipe = INVALID_HANDLE_VALUE;
- int err;
- int overlap;
-
- if (flags & UV_READABLE_PIPE) {
- /* The server needs inbound access too, otherwise CreateNamedPipe() won't
- * give us the FILE_READ_ATTRIBUTES permission. We need that to probe the
- * state of the write buffer when we're trying to shutdown the pipe. */
- server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND;
- client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
- }
- if (flags & UV_WRITABLE_PIPE) {
- server_access |= PIPE_ACCESS_INBOUND;
- client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
- }
-
- /* Create server pipe handle. */
- err = uv_stdio_pipe_server(loop,
- server_pipe,
- server_access,
- pipe_name,
- sizeof(pipe_name));
- if (err)
- goto error;
-
- /* Create child pipe handle. */
- sa.nLength = sizeof sa;
- sa.lpSecurityDescriptor = NULL;
- sa.bInheritHandle = TRUE;
-
- overlap = server_pipe->ipc || (flags & UV_OVERLAPPED_PIPE);
- child_pipe = CreateFileA(pipe_name,
- client_access,
- 0,
- &sa,
- OPEN_EXISTING,
- overlap ? FILE_FLAG_OVERLAPPED : 0,
- NULL);
- if (child_pipe == INVALID_HANDLE_VALUE) {
- err = GetLastError();
- goto error;
- }
-
-#ifndef NDEBUG
- /* Validate that the pipe was opened in the right mode. */
- {
- DWORD mode;
- BOOL r = GetNamedPipeHandleState(child_pipe,
- &mode,
- NULL,
- NULL,
- NULL,
- NULL,
- 0);
- assert(r == TRUE);
- assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
- }
-#endif
-
- /* Do a blocking ConnectNamedPipe. This should not block because we have both
- * ends of the pipe created. */
- if (!ConnectNamedPipe(server_pipe->handle, NULL)) {
- if (GetLastError() != ERROR_PIPE_CONNECTED) {
- err = GetLastError();
- goto error;
- }
- }
-
- /* The server end is now readable and/or writable. */
- if (flags & UV_READABLE_PIPE)
- server_pipe->flags |= UV_HANDLE_WRITABLE;
- if (flags & UV_WRITABLE_PIPE)
- server_pipe->flags |= UV_HANDLE_READABLE;
-
- *child_pipe_ptr = child_pipe;
- return 0;
-
- error:
- if (server_pipe->handle != INVALID_HANDLE_VALUE) {
- uv_pipe_cleanup(loop, server_pipe);
- }
-
- if (child_pipe != INVALID_HANDLE_VALUE) {
- CloseHandle(child_pipe);
- }
-
- return err;
-}
-
-
static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
HANDLE current_process;
diff --git a/deps/uv/src/win/stream.c b/deps/uv/src/win/stream.c
index 46a0709a38e3bd..ebb5fe5cb79d04 100644
--- a/deps/uv/src/win/stream.c
+++ b/deps/uv/src/win/stream.c
@@ -65,18 +65,11 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
}
-int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
- uv_read_cb read_cb) {
+int uv__read_start(uv_stream_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
int err;
- if (handle->flags & UV_HANDLE_READING) {
- return UV_EALREADY;
- }
-
- if (!(handle->flags & UV_HANDLE_READABLE)) {
- return UV_ENOTCONN;
- }
-
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c
index 0dcaa97df70821..517700e66d81b9 100644
--- a/deps/uv/src/win/tcp.c
+++ b/deps/uv/src/win/tcp.c
@@ -800,9 +800,8 @@ static int uv_tcp_try_connect(uv_connect_t* req,
if (err)
return err;
- if (handle->delayed_error) {
- return handle->delayed_error;
- }
+ if (handle->delayed_error != 0)
+ goto out;
if (!(handle->flags & UV_HANDLE_BOUND)) {
if (addrlen == sizeof(uv_addr_ip4_any_)) {
@@ -815,8 +814,8 @@ static int uv_tcp_try_connect(uv_connect_t* req,
err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
if (err)
return err;
- if (handle->delayed_error)
- return handle->delayed_error;
+ if (handle->delayed_error != 0)
+ goto out;
}
if (!handle->tcp.conn.func_connectex) {
@@ -844,11 +843,21 @@ static int uv_tcp_try_connect(uv_connect_t* req,
NULL);
}
+out:
+
UV_REQ_INIT(req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+ if (handle->delayed_error != 0) {
+ /* Process the req without IOCP. */
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ return 0;
+ }
+
success = handle->tcp.conn.func_connectex(handle->socket,
(const struct sockaddr*) &converted,
addrlen,
@@ -1215,7 +1224,14 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
UNREGISTER_HANDLE_REQ(loop, handle, req);
err = 0;
- if (REQ_SUCCESS(req)) {
+ if (handle->delayed_error) {
+ /* To smooth over the differences between unixes errors that
+ * were reported synchronously on the first connect can be delayed
+ * until the next tick--which is now.
+ */
+ err = handle->delayed_error;
+ handle->delayed_error = 0;
+ } else if (REQ_SUCCESS(req)) {
if (handle->flags & UV_HANDLE_CLOSING) {
/* use UV_ECANCELED for consistency with Unix */
err = ERROR_OPERATION_ABORTED;
@@ -1571,3 +1587,118 @@ int uv__tcp_connect(uv_connect_t* req,
return 0;
}
+
+#ifndef WSA_FLAG_NO_HANDLE_INHERIT
+/* Added in Windows 7 SP1. Specify this to avoid race conditions, */
+/* but also manually clear the inherit flag in case this failed. */
+#define WSA_FLAG_NO_HANDLE_INHERIT 0x80
+#endif
+
+int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
+ SOCKET server = INVALID_SOCKET;
+ SOCKET client0 = INVALID_SOCKET;
+ SOCKET client1 = INVALID_SOCKET;
+ SOCKADDR_IN name;
+ LPFN_ACCEPTEX func_acceptex;
+ WSAOVERLAPPED overlap;
+ char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
+ int namelen;
+ int err;
+ DWORD bytes;
+ DWORD flags;
+ DWORD client0_flags = WSA_FLAG_NO_HANDLE_INHERIT;
+ DWORD client1_flags = WSA_FLAG_NO_HANDLE_INHERIT;
+
+ if (flags0 & UV_NONBLOCK_PIPE)
+ client0_flags |= WSA_FLAG_OVERLAPPED;
+ if (flags1 & UV_NONBLOCK_PIPE)
+ client1_flags |= WSA_FLAG_OVERLAPPED;
+
+ server = WSASocketW(AF_INET, type, protocol, NULL, 0,
+ WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
+ if (server == INVALID_SOCKET)
+ goto wsaerror;
+ if (!SetHandleInformation((HANDLE) server, HANDLE_FLAG_INHERIT, 0))
+ goto error;
+ name.sin_family = AF_INET;
+ name.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ name.sin_port = 0;
+ if (bind(server, (SOCKADDR*) &name, sizeof(name)) != 0)
+ goto wsaerror;
+ if (listen(server, 1) != 0)
+ goto wsaerror;
+ namelen = sizeof(name);
+ if (getsockname(server, (SOCKADDR*) &name, &namelen) != 0)
+ goto wsaerror;
+ client0 = WSASocketW(AF_INET, type, protocol, NULL, 0, client0_flags);
+ if (client0 == INVALID_SOCKET)
+ goto wsaerror;
+ if (!SetHandleInformation((HANDLE) client0, HANDLE_FLAG_INHERIT, 0))
+ goto error;
+ if (connect(client0, (SOCKADDR*) &name, sizeof(name)) != 0)
+ goto wsaerror;
+ client1 = WSASocketW(AF_INET, type, protocol, NULL, 0, client1_flags);
+ if (client1 == INVALID_SOCKET)
+ goto wsaerror;
+ if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
+ goto error;
+ if (!uv_get_acceptex_function(server, &func_acceptex)) {
+ err = WSAEAFNOSUPPORT;
+ goto cleanup;
+ }
+ memset(&overlap, 0, sizeof(overlap));
+ if (!func_acceptex(server,
+ client1,
+ accept_buffer,
+ 0,
+ sizeof(struct sockaddr_storage),
+ sizeof(struct sockaddr_storage),
+ &bytes,
+ &overlap)) {
+ err = WSAGetLastError();
+ if (err == ERROR_IO_PENDING) {
+ /* Result should complete immediately, since we already called connect,
+ * but emperically, we sometimes have to poll the kernel a couple times
+ * until it notices that. */
+ while (!WSAGetOverlappedResult(client1, &overlap, &bytes, FALSE, &flags)) {
+ err = WSAGetLastError();
+ if (err != WSA_IO_INCOMPLETE)
+ goto cleanup;
+ SwitchToThread();
+ }
+ }
+ else {
+ goto cleanup;
+ }
+ }
+ if (setsockopt(client1, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char*) &server, sizeof(server)) != 0) {
+ goto wsaerror;
+ }
+
+ closesocket(server);
+
+ fds[0] = client0;
+ fds[1] = client1;
+
+ return 0;
+
+ wsaerror:
+ err = WSAGetLastError();
+ goto cleanup;
+
+ error:
+ err = GetLastError();
+ goto cleanup;
+
+ cleanup:
+ if (server != INVALID_SOCKET)
+ closesocket(server);
+ if (client0 != INVALID_SOCKET)
+ closesocket(client0);
+ if (client1 != INVALID_SOCKET)
+ closesocket(client1);
+
+ assert(err);
+ return uv_translate_sys_error(err);
+}
diff --git a/deps/uv/test/benchmark-pump.c b/deps/uv/test/benchmark-pump.c
index 8685258e052793..7d3977dfc32d0d 100644
--- a/deps/uv/test/benchmark-pump.c
+++ b/deps/uv/test/benchmark-pump.c
@@ -390,6 +390,7 @@ HELPER_IMPL(tcp_pump_server) {
r = uv_listen((uv_stream_t*)&tcpServer, MAX_WRITE_HANDLES, connection_cb);
ASSERT(r == 0);
+ notify_parent_process();
uv_run(loop, UV_RUN_DEFAULT);
return 0;
@@ -411,6 +412,7 @@ HELPER_IMPL(pipe_pump_server) {
r = uv_listen((uv_stream_t*)&pipeServer, MAX_WRITE_HANDLES, connection_cb);
ASSERT(r == 0);
+ notify_parent_process();
uv_run(loop, UV_RUN_DEFAULT);
MAKE_VALGRIND_HAPPY();
diff --git a/deps/uv/test/blackhole-server.c b/deps/uv/test/blackhole-server.c
index ad878b35c61a3e..9b21d1e0e07279 100644
--- a/deps/uv/test/blackhole-server.c
+++ b/deps/uv/test/blackhole-server.c
@@ -114,6 +114,7 @@ HELPER_IMPL(tcp4_blackhole_server) {
r = uv_listen((uv_stream_t*)&tcp_server, 128, connection_cb);
ASSERT(r == 0);
+ notify_parent_process();
r = uv_run(loop, UV_RUN_DEFAULT);
ASSERT(0 && "Blackhole server dropped out of event loop.");
diff --git a/deps/uv/test/echo-server.c b/deps/uv/test/echo-server.c
index c65142ff901794..69d0abc980bc32 100644
--- a/deps/uv/test/echo-server.c
+++ b/deps/uv/test/echo-server.c
@@ -209,6 +209,7 @@ static void on_recv(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned flags) {
uv_buf_t sndbuf;
+ uv_udp_send_t* req;
if (nread == 0) {
/* Everything OK, but nothing read. */
@@ -218,7 +219,7 @@ static void on_recv(uv_udp_t* handle,
ASSERT(nread > 0);
ASSERT(addr->sa_family == AF_INET);
- uv_udp_send_t* req = send_alloc();
+ req = send_alloc();
ASSERT(req != NULL);
sndbuf = uv_buf_init(rcvbuf->base, nread);
ASSERT(0 <= uv_udp_send(req, handle, &sndbuf, 1, addr, on_send));
@@ -228,7 +229,7 @@ static int tcp4_echo_start(int port) {
struct sockaddr_in addr;
int r;
- ASSERT(0 == uv_ip4_addr("0.0.0.0", port, &addr));
+ ASSERT(0 == uv_ip4_addr("127.0.0.1", port, &addr));
server = (uv_handle_t*)&tcpServer;
serverType = TCP;
diff --git a/deps/uv/test/run-tests.c b/deps/uv/test/run-tests.c
index e5e75e17c87e22..fc8a79a9eb9019 100644
--- a/deps/uv/test/run-tests.c
+++ b/deps/uv/test/run-tests.c
@@ -50,6 +50,10 @@ int spawn_tcp_server_helper(void);
static int maybe_run_test(int argc, char **argv);
+#ifdef _WIN32
+typedef BOOL (WINAPI *sCompareObjectHandles)(_In_ HANDLE, _In_ HANDLE);
+#endif
+
int main(int argc, char **argv) {
#ifndef _WIN32
@@ -202,22 +206,36 @@ static int maybe_run_test(int argc, char **argv) {
return 1;
}
-#ifndef _WIN32
if (strcmp(argv[1], "spawn_helper8") == 0) {
- int fd;
-
+ uv_os_fd_t closed_fd;
+ uv_os_fd_t open_fd;
+#ifdef _WIN32
+ DWORD flags;
+ HMODULE kernelbase_module;
+ sCompareObjectHandles pCompareObjectHandles; /* function introduced in Windows 10 */
+#endif
notify_parent_process();
- ASSERT(sizeof(fd) == read(0, &fd, sizeof(fd)));
- ASSERT(fd > 2);
+ ASSERT(sizeof(closed_fd) == read(0, &closed_fd, sizeof(closed_fd)));
+ ASSERT(sizeof(open_fd) == read(0, &open_fd, sizeof(open_fd)));
+#ifdef _WIN32
+ ASSERT((intptr_t) closed_fd > 0);
+ ASSERT((intptr_t) open_fd > 0);
+ ASSERT(0 != GetHandleInformation(open_fd, &flags));
+ kernelbase_module = GetModuleHandleA("kernelbase.dll");
+ pCompareObjectHandles = (sCompareObjectHandles)
+ GetProcAddress(kernelbase_module, "CompareObjectHandles");
+ ASSERT(pCompareObjectHandles == NULL || !pCompareObjectHandles(open_fd, closed_fd));
+#else
+ ASSERT(open_fd > 2);
+ ASSERT(closed_fd > 2);
# if defined(__PASE__) /* On IBMi PASE, write() returns 1 */
- ASSERT(1 == write(fd, "x", 1));
+ ASSERT(1 == write(closed_fd, "x", 1));
# else
- ASSERT(-1 == write(fd, "x", 1));
+ ASSERT(-1 == write(closed_fd, "x", 1));
# endif /* !__PASE__ */
-
+#endif
return 1;
}
-#endif /* !_WIN32 */
if (strcmp(argv[1], "spawn_helper9") == 0) {
notify_parent_process();
diff --git a/deps/uv/test/task.h b/deps/uv/test/task.h
index 8250f949b2bfbc..4e7e2f07f570d3 100644
--- a/deps/uv/test/task.h
+++ b/deps/uv/test/task.h
@@ -113,8 +113,8 @@ typedef enum {
#define ASSERT_BASE(a, operator, b, type, conv) \
do { \
- type eval_a = (type) (a); \
- type eval_b = (type) (b); \
+ volatile type eval_a = (type) (a); \
+ volatile type eval_b = (type) (b); \
if (!(eval_a operator eval_b)) { \
fprintf(stderr, \
"Assertion failed in %s on line %d: `%s %s %s` " \
@@ -196,22 +196,26 @@ typedef enum {
} \
} while (0)
-#define ASSERT_INT_BASE(a, operator, b, type, conv) \
- ASSERT_BASE(a, operator, b, type, conv)
-
-#define ASSERT_EQ(a, b) ASSERT_INT_BASE(a, ==, b, int64_t, PRId64)
-#define ASSERT_GE(a, b) ASSERT_INT_BASE(a, >=, b, int64_t, PRId64)
-#define ASSERT_GT(a, b) ASSERT_INT_BASE(a, >, b, int64_t, PRId64)
-#define ASSERT_LE(a, b) ASSERT_INT_BASE(a, <=, b, int64_t, PRId64)
-#define ASSERT_LT(a, b) ASSERT_INT_BASE(a, <, b, int64_t, PRId64)
-#define ASSERT_NE(a, b) ASSERT_INT_BASE(a, !=, b, int64_t, PRId64)
-
-#define ASSERT_UINT64_EQ(a, b) ASSERT_INT_BASE(a, ==, b, uint64_t, PRIu64)
-#define ASSERT_UINT64_GE(a, b) ASSERT_INT_BASE(a, >=, b, uint64_t, PRIu64)
-#define ASSERT_UINT64_GT(a, b) ASSERT_INT_BASE(a, >, b, uint64_t, PRIu64)
-#define ASSERT_UINT64_LE(a, b) ASSERT_INT_BASE(a, <=, b, uint64_t, PRIu64)
-#define ASSERT_UINT64_LT(a, b) ASSERT_INT_BASE(a, <, b, uint64_t, PRIu64)
-#define ASSERT_UINT64_NE(a, b) ASSERT_INT_BASE(a, !=, b, uint64_t, PRIu64)
+#define ASSERT_EQ(a, b) ASSERT_BASE(a, ==, b, int64_t, PRId64)
+#define ASSERT_GE(a, b) ASSERT_BASE(a, >=, b, int64_t, PRId64)
+#define ASSERT_GT(a, b) ASSERT_BASE(a, >, b, int64_t, PRId64)
+#define ASSERT_LE(a, b) ASSERT_BASE(a, <=, b, int64_t, PRId64)
+#define ASSERT_LT(a, b) ASSERT_BASE(a, <, b, int64_t, PRId64)
+#define ASSERT_NE(a, b) ASSERT_BASE(a, !=, b, int64_t, PRId64)
+
+#define ASSERT_UINT64_EQ(a, b) ASSERT_BASE(a, ==, b, uint64_t, PRIu64)
+#define ASSERT_UINT64_GE(a, b) ASSERT_BASE(a, >=, b, uint64_t, PRIu64)
+#define ASSERT_UINT64_GT(a, b) ASSERT_BASE(a, >, b, uint64_t, PRIu64)
+#define ASSERT_UINT64_LE(a, b) ASSERT_BASE(a, <=, b, uint64_t, PRIu64)
+#define ASSERT_UINT64_LT(a, b) ASSERT_BASE(a, <, b, uint64_t, PRIu64)
+#define ASSERT_UINT64_NE(a, b) ASSERT_BASE(a, !=, b, uint64_t, PRIu64)
+
+#define ASSERT_DOUBLE_EQ(a, b) ASSERT_BASE(a, ==, b, double, "f")
+#define ASSERT_DOUBLE_GE(a, b) ASSERT_BASE(a, >=, b, double, "f")
+#define ASSERT_DOUBLE_GT(a, b) ASSERT_BASE(a, >, b, double, "f")
+#define ASSERT_DOUBLE_LE(a, b) ASSERT_BASE(a, <=, b, double, "f")
+#define ASSERT_DOUBLE_LT(a, b) ASSERT_BASE(a, <, b, double, "f")
+#define ASSERT_DOUBLE_NE(a, b) ASSERT_BASE(a, !=, b, double, "f")
#define ASSERT_STR_EQ(a, b) \
ASSERT_BASE_STR(strcmp(a, b) == 0, a, == , b, char*, "s")
diff --git a/deps/uv/test/test-close-fd.c b/deps/uv/test/test-close-fd.c
index cea4a1b0b80bb4..0d3927f652ede0 100644
--- a/deps/uv/test/test-close-fd.c
+++ b/deps/uv/test/test-close-fd.c
@@ -19,12 +19,11 @@
* IN THE SOFTWARE.
*/
-#if !defined(_WIN32)
-
#include "uv.h"
#include "task.h"
-#include
+#ifndef _WIN32
#include
+#endif
static unsigned int read_cb_called;
@@ -51,14 +50,25 @@ static void read_cb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
TEST_IMPL(close_fd) {
uv_pipe_t pipe_handle;
- int fd[2];
+ uv_fs_t req;
+ uv_buf_t bufs[1];
+ uv_file fd[2];
+ bufs[0] = uv_buf_init("", 1);
- ASSERT(0 == pipe(fd));
+ ASSERT(0 == uv_pipe(fd, 0, 0));
ASSERT(0 == uv_pipe_init(uv_default_loop(), &pipe_handle, 0));
ASSERT(0 == uv_pipe_open(&pipe_handle, fd[0]));
- fd[0] = -1; /* uv_pipe_open() takes ownership of the file descriptor. */
- ASSERT(1 == write(fd[1], "", 1));
+ /* uv_pipe_open() takes ownership of the file descriptor. */
+ fd[0] = -1;
+
+ ASSERT(1 == uv_fs_write(NULL, &req, fd[1], bufs, 1, -1, NULL));
+ ASSERT(1 == req.result);
+ uv_fs_req_cleanup(&req);
+#ifdef _WIN32
+ ASSERT(0 == _close(fd[1]));
+#else
ASSERT(0 == close(fd[1]));
+#endif
fd[1] = -1;
ASSERT(0 == uv_read_start((uv_stream_t *) &pipe_handle, alloc_cb, read_cb));
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
@@ -72,9 +82,3 @@ TEST_IMPL(close_fd) {
MAKE_VALGRIND_HAPPY();
return 0;
}
-
-#else
-
-typedef int file_has_no_tests; /* ISO C forbids an empty translation unit. */
-
-#endif /* !_WIN32 */
diff --git a/deps/uv/test/test-error.c b/deps/uv/test/test-error.c
index 7f44f4a1bc606d..35d108a4a1f768 100644
--- a/deps/uv/test/test-error.c
+++ b/deps/uv/test/test-error.c
@@ -37,6 +37,9 @@
* See https://github.com/joyent/libuv/issues/210
*/
TEST_IMPL(error_message) {
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
char buf[32];
/* Cop out. Can't do proper checks on systems with
diff --git a/deps/uv/test/test-fs-copyfile.c b/deps/uv/test/test-fs-copyfile.c
index c785a4b51fbb10..fa00fe4ee89c87 100644
--- a/deps/uv/test/test-fs-copyfile.c
+++ b/deps/uv/test/test-fs-copyfile.c
@@ -96,6 +96,9 @@ static void touch_file(const char* name, unsigned int size) {
TEST_IMPL(fs_copyfile) {
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
const char src[] = "test_file_src";
uv_loop_t* loop;
uv_fs_t req;
diff --git a/deps/uv/test/test-fs-event.c b/deps/uv/test/test-fs-event.c
index 28a6a1ebb3f096..0992d5989e421b 100644
--- a/deps/uv/test/test-fs-event.c
+++ b/deps/uv/test/test-fs-event.c
@@ -673,6 +673,9 @@ TEST_IMPL(fs_event_watch_file_exact_path) {
TEST_IMPL(fs_event_watch_file_twice) {
#if defined(NO_FS_EVENTS)
RETURN_SKIP(NO_FS_EVENTS);
+#endif
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
#endif
const char path[] = "test/fixtures/empty_file";
uv_fs_event_t watchers[2];
diff --git a/deps/uv/test/test-fs-readdir.c b/deps/uv/test/test-fs-readdir.c
index 5efc853cc67f0f..cccaa7438baabc 100644
--- a/deps/uv/test/test-fs-readdir.c
+++ b/deps/uv/test/test-fs-readdir.c
@@ -230,6 +230,9 @@ static void file_opendir_cb(uv_fs_t* req) {
}
TEST_IMPL(fs_readdir_file) {
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
const char* path;
int r;
diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c
index 63189d01d5adc1..0292a96b4802ca 100644
--- a/deps/uv/test/test-fs.c
+++ b/deps/uv/test/test-fs.c
@@ -673,7 +673,7 @@ static void stat_cb(uv_fs_t* req) {
static void sendfile_cb(uv_fs_t* req) {
ASSERT(req == &sendfile_req);
ASSERT(req->fs_type == UV_FS_SENDFILE);
- ASSERT(req->result == 65546);
+ ASSERT(req->result == 65545);
sendfile_cb_count++;
uv_fs_req_cleanup(req);
}
@@ -816,13 +816,44 @@ static void check_utime(const char* path,
else
r = uv_fs_stat(loop, &req, path, NULL);
- ASSERT(r == 0);
+ ASSERT_EQ(r, 0);
- ASSERT(req.result == 0);
+ ASSERT_EQ(req.result, 0);
s = &req.statbuf;
- ASSERT(s->st_atim.tv_sec + (s->st_atim.tv_nsec / 1000000000.0) == atime);
- ASSERT(s->st_mtim.tv_sec + (s->st_mtim.tv_nsec / 1000000000.0) == mtime);
+ if (s->st_atim.tv_nsec == 0 && s->st_mtim.tv_nsec == 0) {
+ /*
+ * Test sub-second timestamps only when supported (such as Windows with
+ * NTFS). Some other platforms support sub-second timestamps, but that
+ * support is filesystem-dependent. Notably OS X (HFS Plus) does NOT
+ * support sub-second timestamps. But kernels may round or truncate in
+ * either direction, so we may accept either possible answer.
+ */
+#ifdef _WIN32
+ ASSERT_DOUBLE_EQ(atime, (long) atime);
+ ASSERT_DOUBLE_EQ(mtime, (long) atime);
+#endif
+ if (atime > 0 || (long) atime == atime)
+ ASSERT_EQ(s->st_atim.tv_sec, (long) atime);
+ if (mtime > 0 || (long) mtime == mtime)
+ ASSERT_EQ(s->st_mtim.tv_sec, (long) mtime);
+ ASSERT_GE(s->st_atim.tv_sec, (long) atime - 1);
+ ASSERT_GE(s->st_mtim.tv_sec, (long) mtime - 1);
+ ASSERT_LE(s->st_atim.tv_sec, (long) atime);
+ ASSERT_LE(s->st_mtim.tv_sec, (long) mtime);
+ } else {
+ double st_atim;
+ double st_mtim;
+#ifndef __APPLE__
+ /* TODO(vtjnash): would it be better to normalize this? */
+ ASSERT_DOUBLE_GE(s->st_atim.tv_nsec, 0);
+ ASSERT_DOUBLE_GE(s->st_mtim.tv_nsec, 0);
+#endif
+ st_atim = s->st_atim.tv_sec + s->st_atim.tv_nsec / 1e9;
+ st_mtim = s->st_mtim.tv_sec + s->st_mtim.tv_nsec / 1e9;
+ ASSERT_DOUBLE_EQ(st_atim, atime);
+ ASSERT_DOUBLE_EQ(st_mtim, mtime);
+ }
uv_fs_req_cleanup(&req);
}
@@ -1159,6 +1190,8 @@ TEST_IMPL(fs_async_dir) {
static int test_sendfile(void (*setup)(int), uv_fs_cb cb, off_t expected_size) {
int f, r;
struct stat s1, s2;
+ uv_fs_t req;
+ char buf1[1];
loop = uv_default_loop();
@@ -1188,7 +1221,7 @@ static int test_sendfile(void (*setup)(int), uv_fs_cb cb, off_t expected_size) {
uv_fs_req_cleanup(&open_req2);
r = uv_fs_sendfile(loop, &sendfile_req, open_req2.result, open_req1.result,
- 0, 131072, cb);
+ 1, 131072, cb);
ASSERT(r == 0);
uv_run(loop, UV_RUN_DEFAULT);
@@ -1203,9 +1236,26 @@ static int test_sendfile(void (*setup)(int), uv_fs_cb cb, off_t expected_size) {
ASSERT(0 == stat("test_file", &s1));
ASSERT(0 == stat("test_file2", &s2));
- ASSERT(s1.st_size == s2.st_size);
ASSERT(s2.st_size == expected_size);
+ if (expected_size > 0) {
+ ASSERT_UINT64_EQ(s1.st_size, s2.st_size + 1);
+ r = uv_fs_open(NULL, &open_req1, "test_file2", O_RDWR, 0, NULL);
+ ASSERT(r >= 0);
+ ASSERT(open_req1.result >= 0);
+ uv_fs_req_cleanup(&open_req1);
+
+ memset(buf1, 0, sizeof(buf1));
+ iov = uv_buf_init(buf1, sizeof(buf1));
+ r = uv_fs_read(NULL, &req, open_req1.result, &iov, 1, -1, NULL);
+ ASSERT(r >= 0);
+ ASSERT(req.result >= 0);
+ ASSERT_EQ(buf1[0], 'e'); /* 'e' from begin */
+ uv_fs_req_cleanup(&req);
+ } else {
+ ASSERT_UINT64_EQ(s1.st_size, s2.st_size);
+ }
+
/* Cleanup. */
unlink("test_file");
unlink("test_file2");
@@ -1223,7 +1273,7 @@ static void sendfile_setup(int f) {
TEST_IMPL(fs_async_sendfile) {
- return test_sendfile(sendfile_setup, sendfile_cb, 65546);
+ return test_sendfile(sendfile_setup, sendfile_cb, 65545);
}
@@ -2523,29 +2573,16 @@ TEST_IMPL(fs_utime) {
uv_fs_req_cleanup(&req);
uv_fs_close(loop, &req, r, NULL);
- atime = mtime = 400497753; /* 1982-09-10 11:22:33 */
-
- /*
- * Test sub-second timestamps only on Windows (assuming NTFS). Some other
- * platforms support sub-second timestamps, but that support is filesystem-
- * dependent. Notably OS X (HFS Plus) does NOT support sub-second timestamps.
- */
-#ifdef _WIN32
- mtime += 0.444; /* 1982-09-10 11:22:33.444 */
-#endif
+ atime = mtime = 400497753.25; /* 1982-09-10 11:22:33.25 */
r = uv_fs_utime(NULL, &req, path, atime, mtime, NULL);
ASSERT(r == 0);
ASSERT(req.result == 0);
uv_fs_req_cleanup(&req);
- r = uv_fs_stat(NULL, &req, path, NULL);
- ASSERT(r == 0);
- ASSERT(req.result == 0);
check_utime(path, atime, mtime, /* test_lutime */ 0);
- uv_fs_req_cleanup(&req);
- atime = mtime = 1291404900; /* 2010-12-03 20:35:00 - mees <3 */
+ atime = mtime = 1291404900.25; /* 2010-12-03 20:35:00.25 - mees <3 */
checkme.path = path;
checkme.atime = atime;
checkme.mtime = mtime;
@@ -2565,6 +2602,45 @@ TEST_IMPL(fs_utime) {
}
+TEST_IMPL(fs_utime_round) {
+ const char path[] = "test_file";
+ double atime;
+ double mtime;
+ uv_fs_t req;
+ int r;
+
+ loop = uv_default_loop();
+ unlink(path);
+ r = uv_fs_open(NULL, &req, path, O_RDWR | O_CREAT, S_IWUSR | S_IRUSR, NULL);
+ ASSERT_GE(r, 0);
+ ASSERT_GE(req.result, 0);
+ uv_fs_req_cleanup(&req);
+ ASSERT_EQ(0, uv_fs_close(loop, &req, r, NULL));
+
+ atime = mtime = -14245440.25; /* 1969-07-20T02:56:00.25Z */
+
+ r = uv_fs_utime(NULL, &req, path, atime, mtime, NULL);
+#if !defined(__linux__) && \
+ !defined(_WIN32) && \
+ !defined(__APPLE__) && \
+ !defined(__FreeBSD__) && \
+ !defined(__sun)
+ if (r != 0) {
+ ASSERT_EQ(r, UV_EINVAL);
+ RETURN_SKIP("utime on some OS (z/OS, IBM i PASE, AIX) or filesystems may reject pre-epoch timestamps");
+ }
+#endif
+ ASSERT_EQ(0, r);
+ ASSERT_EQ(0, req.result);
+ uv_fs_req_cleanup(&req);
+ check_utime(path, atime, mtime, /* test_lutime */ 0);
+ unlink(path);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
+
+
#ifdef _WIN32
TEST_IMPL(fs_stat_root) {
int r;
@@ -2618,16 +2694,7 @@ TEST_IMPL(fs_futime) {
uv_fs_req_cleanup(&req);
uv_fs_close(loop, &req, r, NULL);
- atime = mtime = 400497753; /* 1982-09-10 11:22:33 */
-
- /*
- * Test sub-second timestamps only on Windows (assuming NTFS). Some other
- * platforms support sub-second timestamps, but that support is filesystem-
- * dependent. Notably OS X (HFS Plus) does NOT support sub-second timestamps.
- */
-#ifdef _WIN32
- mtime += 0.444; /* 1982-09-10 11:22:33.444 */
-#endif
+ atime = mtime = 400497753.25; /* 1982-09-10 11:22:33.25 */
r = uv_fs_open(NULL, &req, path, O_RDWR, 0, NULL);
ASSERT(r >= 0);
@@ -2645,11 +2712,7 @@ TEST_IMPL(fs_futime) {
#endif
uv_fs_req_cleanup(&req);
- r = uv_fs_stat(NULL, &req, path, NULL);
- ASSERT(r == 0);
- ASSERT(req.result == 0);
check_utime(path, atime, mtime, /* test_lutime */ 0);
- uv_fs_req_cleanup(&req);
atime = mtime = 1291404900; /* 2010-12-03 20:35:00 - mees <3 */
@@ -2708,11 +2771,7 @@ TEST_IMPL(fs_lutime) {
uv_fs_req_cleanup(&req);
/* Test the synchronous version. */
- atime = mtime = 400497753; /* 1982-09-10 11:22:33 */
-
-#ifdef _WIN32
- mtime += 0.444; /* 1982-09-10 11:22:33.444 */
-#endif
+ atime = mtime = 400497753.25; /* 1982-09-10 11:22:33.25 */
checkme.atime = atime;
checkme.mtime = mtime;
@@ -2837,6 +2896,9 @@ TEST_IMPL(fs_scandir_non_existent_dir) {
}
TEST_IMPL(fs_scandir_file) {
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
const char* path;
int r;
@@ -3083,6 +3145,9 @@ static void fs_read_bufs(int add_flags) {
uv_fs_req_cleanup(&close_req);
}
TEST_IMPL(fs_read_bufs) {
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
fs_read_bufs(0);
fs_read_bufs(UV_FS_O_FILEMAP);
@@ -4372,6 +4437,7 @@ TEST_IMPL(fs_invalid_mkdir_name) {
loop = uv_default_loop();
r = uv_fs_mkdir(loop, &req, "invalid>", 0, NULL);
ASSERT(r == UV_EINVAL);
+ ASSERT_EQ(UV_EINVAL, uv_fs_mkdir(loop, &req, "test:lol", 0, NULL));
return 0;
}
diff --git a/deps/uv/test/test-getaddrinfo.c b/deps/uv/test/test-getaddrinfo.c
index 628e4d13cc60db..b1fc312349f960 100644
--- a/deps/uv/test/test-getaddrinfo.c
+++ b/deps/uv/test/test-getaddrinfo.c
@@ -100,7 +100,7 @@ TEST_IMPL(getaddrinfo_fail) {
ASSERT(0 == uv_getaddrinfo(uv_default_loop(),
&req,
getaddrinfo_fail_cb,
- "xyzzy.xyzzy.xyzzy.",
+ "example.invalid.",
NULL,
NULL));
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
@@ -122,7 +122,7 @@ TEST_IMPL(getaddrinfo_fail_sync) {
ASSERT(0 > uv_getaddrinfo(uv_default_loop(),
&req,
NULL,
- "xyzzy.xyzzy.xyzzy.",
+ "example.invalid.",
NULL,
NULL));
uv_freeaddrinfo(req.addrinfo);
diff --git a/deps/uv/test/test-ipc.c b/deps/uv/test/test-ipc.c
index 39ef4f1175b0d4..ba3ba737481532 100644
--- a/deps/uv/test/test-ipc.c
+++ b/deps/uv/test/test-ipc.c
@@ -693,6 +693,11 @@ static void ipc_on_connection(uv_stream_t* server, int status) {
}
+static void close_and_free_cb(uv_handle_t* handle) {
+ close_cb_called++;
+ free(handle);
+}
+
static void ipc_on_connection_tcp_conn(uv_stream_t* server, int status) {
int r;
uv_buf_t buf;
@@ -721,7 +726,7 @@ static void ipc_on_connection_tcp_conn(uv_stream_t* server, int status) {
on_tcp_child_process_read);
ASSERT_EQ(r, 0);
- uv_close((uv_handle_t*)conn, close_cb);
+ uv_close((uv_handle_t*)conn, close_and_free_cb);
}
diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h
index 52b17a69147aa0..d7c7b086f03a18 100644
--- a/deps/uv/test/test-list.h
+++ b/deps/uv/test/test-list.h
@@ -116,7 +116,8 @@ TEST_DECLARE (tcp_open_bound)
TEST_DECLARE (tcp_open_connected)
TEST_DECLARE (tcp_connect_error_after_write)
TEST_DECLARE (tcp_shutdown_after_write)
-TEST_DECLARE (tcp_bind_error_addrinuse)
+TEST_DECLARE (tcp_bind_error_addrinuse_connect)
+TEST_DECLARE (tcp_bind_error_addrinuse_listen)
TEST_DECLARE (tcp_bind_error_addrnotavail_1)
TEST_DECLARE (tcp_bind_error_addrnotavail_2)
TEST_DECLARE (tcp_bind_error_fault)
@@ -359,6 +360,7 @@ TEST_DECLARE (fs_open_flags)
TEST_DECLARE (fs_fd_hash)
#endif
TEST_DECLARE (fs_utime)
+TEST_DECLARE (fs_utime_round)
TEST_DECLARE (fs_futime)
TEST_DECLARE (fs_lutime)
TEST_DECLARE (fs_file_open_append)
@@ -451,12 +453,16 @@ TEST_DECLARE (poll_nested_epoll)
#ifdef UV_HAVE_KQUEUE
TEST_DECLARE (poll_nested_kqueue)
#endif
+TEST_DECLARE (poll_multiple_handles)
TEST_DECLARE (ip4_addr)
TEST_DECLARE (ip6_addr_link_local)
TEST_DECLARE (poll_close_doesnt_corrupt_stack)
TEST_DECLARE (poll_closesocket)
+TEST_DECLARE (close_fd)
+TEST_DECLARE (closed_fd_events)
+TEST_DECLARE (spawn_fs_open)
#ifdef _WIN32
TEST_DECLARE (spawn_detect_pipe_name_collisions_on_windows)
#if !defined(USING_UV_SHARED)
@@ -471,8 +477,6 @@ TEST_DECLARE (ipc_listen_after_bind_twice)
TEST_DECLARE (win32_signum_number)
#else
TEST_DECLARE (emfile)
-TEST_DECLARE (close_fd)
-TEST_DECLARE (spawn_fs_open)
TEST_DECLARE (spawn_setuid_setgid)
TEST_DECLARE (we_get_signal)
TEST_DECLARE (we_get_signals)
@@ -481,7 +485,6 @@ TEST_DECLARE (we_get_signals_mixed)
TEST_DECLARE (signal_multiple_loops)
TEST_DECLARE (signal_pending_on_close)
TEST_DECLARE (signal_close_loop_alive)
-TEST_DECLARE (closed_fd_events)
#endif
#ifdef __APPLE__
TEST_DECLARE (osx_select)
@@ -567,7 +570,8 @@ TASK_LIST_START
#ifndef _WIN32
TEST_ENTRY (pipe_close_stdout_read_stdin)
#endif
- TEST_ENTRY (pipe_set_non_blocking)
+ /* Seems to be either about 0.5s or 5s, depending on the OS. */
+ TEST_ENTRY_CUSTOM (pipe_set_non_blocking, 0, 0, 20000)
TEST_ENTRY (pipe_set_chmod)
TEST_ENTRY (tty)
#ifdef _WIN32
@@ -671,7 +675,13 @@ TASK_LIST_START
TEST_HELPER (tcp_shutdown_after_write, tcp4_echo_server)
TEST_ENTRY (tcp_connect_error_after_write)
- TEST_ENTRY (tcp_bind_error_addrinuse)
+ TEST_ENTRY (tcp_bind_error_addrinuse_connect)
+ /* tcp4_echo_server steals the port. It needs to be a separate process
+ * because libuv sets setsockopt(SO_REUSEADDR) that lets you steal an
+ * existing bind if it originates from the same process.
+ */
+ TEST_HELPER (tcp_bind_error_addrinuse_connect, tcp4_echo_server)
+ TEST_ENTRY (tcp_bind_error_addrinuse_listen)
TEST_ENTRY (tcp_bind_error_addrnotavail_1)
TEST_ENTRY (tcp_bind_error_addrnotavail_2)
TEST_ENTRY (tcp_bind_error_fault)
@@ -894,6 +904,7 @@ TASK_LIST_START
#ifdef UV_HAVE_KQUEUE
TEST_ENTRY (poll_nested_kqueue)
#endif
+ TEST_ENTRY (poll_multiple_handles)
TEST_ENTRY (socket_buffer_size)
@@ -935,6 +946,9 @@ TASK_LIST_START
TEST_ENTRY (poll_close_doesnt_corrupt_stack)
TEST_ENTRY (poll_closesocket)
+ TEST_ENTRY (close_fd)
+ TEST_ENTRY (closed_fd_events)
+ TEST_ENTRY (spawn_fs_open)
#ifdef _WIN32
TEST_ENTRY (spawn_detect_pipe_name_collisions_on_windows)
#if !defined(USING_UV_SHARED)
@@ -949,8 +963,6 @@ TASK_LIST_START
TEST_ENTRY (win32_signum_number)
#else
TEST_ENTRY (emfile)
- TEST_ENTRY (close_fd)
- TEST_ENTRY (spawn_fs_open)
TEST_ENTRY (spawn_setuid_setgid)
TEST_ENTRY (we_get_signal)
TEST_ENTRY (we_get_signals)
@@ -959,7 +971,6 @@ TASK_LIST_START
TEST_ENTRY (signal_multiple_loops)
TEST_ENTRY (signal_pending_on_close)
TEST_ENTRY (signal_close_loop_alive)
- TEST_ENTRY (closed_fd_events)
#endif
#ifdef __APPLE__
@@ -988,6 +999,7 @@ TASK_LIST_START
#endif
TEST_ENTRY (fs_chown)
TEST_ENTRY (fs_utime)
+ TEST_ENTRY (fs_utime_round)
TEST_ENTRY (fs_futime)
TEST_ENTRY (fs_lutime)
TEST_ENTRY (fs_readlink)
diff --git a/deps/uv/test/test-ping-pong.c b/deps/uv/test/test-ping-pong.c
index 7f7758b3b2ebea..4a26e4dee1b0f5 100644
--- a/deps/uv/test/test-ping-pong.c
+++ b/deps/uv/test/test-ping-pong.c
@@ -24,6 +24,7 @@
#include
#include
+#include /* strlen */
static int completed_pingers = 0;
@@ -33,23 +34,21 @@ static int completed_pingers = 0;
#define NUM_PINGS 1000
#endif
-/* 64 bytes is enough for a pinger */
-#define BUFSIZE 10240
-
static char PING[] = "PING\n";
+static char PONG[] = "PONG\n";
static int pinger_on_connect_count;
typedef struct {
int vectored_writes;
- int pongs;
- int state;
+ unsigned pongs;
+ unsigned state;
union {
uv_tcp_t tcp;
uv_pipe_t pipe;
} stream;
uv_connect_t connect_req;
- char read_buffer[BUFSIZE];
+ char* pong;
} pinger_t;
@@ -59,28 +58,44 @@ static void alloc_cb(uv_handle_t* handle, size_t size, uv_buf_t* buf) {
}
+static void ponger_on_close(uv_handle_t* handle) {
+ if (handle->data)
+ free(handle->data);
+ else
+ free(handle);
+}
+
+
static void pinger_on_close(uv_handle_t* handle) {
- pinger_t* pinger = (pinger_t*)handle->data;
+ pinger_t* pinger = (pinger_t*) handle->data;
- ASSERT(NUM_PINGS == pinger->pongs);
+ ASSERT_EQ(NUM_PINGS, pinger->pongs);
- free(pinger);
+ if (handle == (uv_handle_t*) &pinger->stream.tcp) {
+ free(pinger); /* also frees handle */
+ } else {
+ uv_close((uv_handle_t*) &pinger->stream.tcp, ponger_on_close);
+ free(handle);
+ }
completed_pingers++;
}
static void pinger_after_write(uv_write_t* req, int status) {
- ASSERT(status == 0);
+ ASSERT_EQ(status, 0);
free(req);
}
static void pinger_write_ping(pinger_t* pinger) {
+ uv_stream_t* stream;
uv_write_t* req;
uv_buf_t bufs[sizeof PING - 1];
int i, nbufs;
+ stream = (uv_stream_t*) &pinger->stream.tcp;
+
if (!pinger->vectored_writes) {
/* Write a single buffer. */
nbufs = 1;
@@ -94,13 +109,8 @@ static void pinger_write_ping(pinger_t* pinger) {
}
req = malloc(sizeof(*req));
- if (uv_write(req,
- (uv_stream_t*) &pinger->stream.tcp,
- bufs,
- nbufs,
- pinger_after_write)) {
- FATAL("uv_write failed");
- }
+ ASSERT_NOT_NULL(req);
+ ASSERT_EQ(0, uv_write(req, stream, bufs, nbufs, pinger_after_write));
puts("PING");
}
@@ -115,20 +125,20 @@ static void pinger_read_cb(uv_stream_t* stream,
pinger = (pinger_t*) stream->data;
if (nread < 0) {
- ASSERT(nread == UV_EOF);
+ ASSERT_EQ(nread, UV_EOF);
puts("got EOF");
free(buf->base);
- uv_close((uv_handle_t*)(&pinger->stream.tcp), pinger_on_close);
+ uv_close((uv_handle_t*) stream, pinger_on_close);
return;
}
- /* Now we count the pings */
+ /* Now we count the pongs */
for (i = 0; i < nread; i++) {
- ASSERT(buf->base[i] == PING[pinger->state]);
- pinger->state = (pinger->state + 1) % (sizeof(PING) - 1);
+ ASSERT_EQ(buf->base[i], pinger->pong[pinger->state]);
+ pinger->state = (pinger->state + 1) % strlen(pinger->pong);
if (pinger->state != 0)
continue;
@@ -139,7 +149,7 @@ static void pinger_read_cb(uv_stream_t* stream,
if (pinger->pongs < NUM_PINGS) {
pinger_write_ping(pinger);
} else {
- uv_close((uv_handle_t*)(&pinger->stream.tcp), pinger_on_close);
+ uv_close((uv_handle_t*) stream, pinger_on_close);
break;
}
}
@@ -148,20 +158,53 @@ static void pinger_read_cb(uv_stream_t* stream,
}
+static void ponger_read_cb(uv_stream_t* stream,
+ ssize_t nread,
+ const uv_buf_t* buf) {
+ uv_buf_t writebuf;
+ uv_write_t* req;
+ int i;
+
+ if (nread < 0) {
+ ASSERT_EQ(nread, UV_EOF);
+
+ puts("got EOF");
+ free(buf->base);
+
+ uv_close((uv_handle_t*) stream, ponger_on_close);
+
+ return;
+ }
+
+ /* Echo back */
+ for (i = 0; i < nread; i++) {
+ if (buf->base[i] == 'I')
+ buf->base[i] = 'O';
+ }
+
+ writebuf = uv_buf_init(buf->base, nread);
+ req = malloc(sizeof(*req));
+ ASSERT_NOT_NULL(req);
+ ASSERT_EQ(0, uv_write(req, stream, &writebuf, 1, pinger_after_write));
+}
+
+
static void pinger_on_connect(uv_connect_t* req, int status) {
- pinger_t* pinger = (pinger_t*)req->handle->data;
+ pinger_t* pinger = (pinger_t*) req->handle->data;
pinger_on_connect_count++;
- ASSERT(status == 0);
+ ASSERT_EQ(status, 0);
- ASSERT(1 == uv_is_readable(req->handle));
- ASSERT(1 == uv_is_writable(req->handle));
- ASSERT(0 == uv_is_closing((uv_handle_t *) req->handle));
+ ASSERT_EQ(1, uv_is_readable(req->handle));
+ ASSERT_EQ(1, uv_is_writable(req->handle));
+ ASSERT_EQ(0, uv_is_closing((uv_handle_t *) req->handle));
pinger_write_ping(pinger);
- uv_read_start((uv_stream_t*)(req->handle), alloc_cb, pinger_read_cb);
+ ASSERT_EQ(0, uv_read_start((uv_stream_t*) req->handle,
+ alloc_cb,
+ pinger_read_cb));
}
@@ -172,17 +215,18 @@ static void tcp_pinger_v6_new(int vectored_writes) {
pinger_t* pinger;
- ASSERT(0 == uv_ip6_addr("::1", TEST_PORT, &server_addr));
+ ASSERT_EQ(0, uv_ip6_addr("::1", TEST_PORT, &server_addr));
pinger = malloc(sizeof(*pinger));
- ASSERT(pinger != NULL);
+ ASSERT_NOT_NULL(pinger);
pinger->vectored_writes = vectored_writes;
pinger->state = 0;
pinger->pongs = 0;
+ pinger->pong = PING;
/* Try to connect to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(uv_default_loop(), &pinger->stream.tcp);
pinger->stream.tcp.data = pinger;
- ASSERT(!r);
+ ASSERT_EQ(0, r);
/* We are never doing multiple reads/connects at a time anyway, so these
* handles can be pre-initialized. */
@@ -190,10 +234,10 @@ static void tcp_pinger_v6_new(int vectored_writes) {
&pinger->stream.tcp,
(const struct sockaddr*) &server_addr,
pinger_on_connect);
- ASSERT(!r);
+ ASSERT_EQ(0, r);
/* Synchronous connect callbacks are not allowed. */
- ASSERT(pinger_on_connect_count == 0);
+ ASSERT_EQ(pinger_on_connect_count, 0);
}
@@ -202,17 +246,18 @@ static void tcp_pinger_new(int vectored_writes) {
struct sockaddr_in server_addr;
pinger_t* pinger;
- ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &server_addr));
+ ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", TEST_PORT, &server_addr));
pinger = malloc(sizeof(*pinger));
- ASSERT(pinger != NULL);
+ ASSERT_NOT_NULL(pinger);
pinger->vectored_writes = vectored_writes;
pinger->state = 0;
pinger->pongs = 0;
+ pinger->pong = PING;
/* Try to connect to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(uv_default_loop(), &pinger->stream.tcp);
pinger->stream.tcp.data = pinger;
- ASSERT(!r);
+ ASSERT_EQ(0, r);
/* We are never doing multiple reads/connects at a time anyway, so these
* handles can be pre-initialized. */
@@ -220,10 +265,10 @@ static void tcp_pinger_new(int vectored_writes) {
&pinger->stream.tcp,
(const struct sockaddr*) &server_addr,
pinger_on_connect);
- ASSERT(!r);
+ ASSERT_EQ(0, r);
/* Synchronous connect callbacks are not allowed. */
- ASSERT(pinger_on_connect_count == 0);
+ ASSERT_EQ(pinger_on_connect_count, 0);
}
@@ -232,15 +277,16 @@ static void pipe_pinger_new(int vectored_writes) {
pinger_t* pinger;
pinger = malloc(sizeof(*pinger));
- ASSERT(pinger != NULL);
+ ASSERT_NOT_NULL(pinger);
pinger->vectored_writes = vectored_writes;
pinger->state = 0;
pinger->pongs = 0;
+ pinger->pong = PING;
/* Try to connect to the server and do NUM_PINGS ping-pongs. */
r = uv_pipe_init(uv_default_loop(), &pinger->stream.pipe, 0);
pinger->stream.pipe.data = pinger;
- ASSERT(!r);
+ ASSERT_EQ(0, r);
/* We are never doing multiple reads/connects at a time anyway, so these
* handles can be pre-initialized. */
@@ -248,13 +294,86 @@ static void pipe_pinger_new(int vectored_writes) {
pinger_on_connect);
/* Synchronous connect callbacks are not allowed. */
- ASSERT(pinger_on_connect_count == 0);
+ ASSERT_EQ(pinger_on_connect_count, 0);
}
+static void socketpair_pinger_new(int vectored_writes) {
+ pinger_t* pinger;
+ uv_os_sock_t fds[2];
+ uv_tcp_t* ponger;
+
+ pinger = malloc(sizeof(*pinger));
+ ASSERT_NOT_NULL(pinger);
+ pinger->vectored_writes = vectored_writes;
+ pinger->state = 0;
+ pinger->pongs = 0;
+ pinger->pong = PONG;
+
+ /* Try to make a socketpair and do NUM_PINGS ping-pongs. */
+ (void)uv_default_loop(); /* ensure WSAStartup has been performed */
+ ASSERT_EQ(0, uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE));
+#ifndef _WIN32
+ /* On Windows, this is actually a UV_TCP, but libuv doesn't detect that. */
+ ASSERT_EQ(uv_guess_handle((uv_file) fds[0]), UV_NAMED_PIPE);
+ ASSERT_EQ(uv_guess_handle((uv_file) fds[1]), UV_NAMED_PIPE);
+#endif
+
+ ASSERT_EQ(0, uv_tcp_init(uv_default_loop(), &pinger->stream.tcp));
+ pinger->stream.pipe.data = pinger;
+ ASSERT_EQ(0, uv_tcp_open(&pinger->stream.tcp, fds[1]));
+
+ ponger = malloc(sizeof(*ponger));
+ ASSERT_NOT_NULL(ponger);
+ ponger->data = NULL;
+ ASSERT_EQ(0, uv_tcp_init(uv_default_loop(), ponger));
+ ASSERT_EQ(0, uv_tcp_open(ponger, fds[0]));
+
+ pinger_write_ping(pinger);
+
+ ASSERT_EQ(0, uv_read_start((uv_stream_t*) &pinger->stream.tcp,
+ alloc_cb,
+ pinger_read_cb));
+ ASSERT_EQ(0, uv_read_start((uv_stream_t*) ponger,
+ alloc_cb,
+ ponger_read_cb));
+}
+
+
+static void pipe2_pinger_new(int vectored_writes) {
+ uv_file fds[2];
+ pinger_t* pinger;
+ uv_pipe_t* ponger;
+
+ /* Try to make a pipe and do NUM_PINGS pings. */
+ ASSERT_EQ(0, uv_pipe(fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE));
+ ASSERT_EQ(uv_guess_handle(fds[0]), UV_NAMED_PIPE);
+ ASSERT_EQ(uv_guess_handle(fds[1]), UV_NAMED_PIPE);
+
+ ponger = malloc(sizeof(*ponger));
+ ASSERT_NOT_NULL(ponger);
+ ASSERT_EQ(0, uv_pipe_init(uv_default_loop(), ponger, 0));
+ ASSERT_EQ(0, uv_pipe_open(ponger, fds[0]));
+
+ pinger = malloc(sizeof(*pinger));
+ ASSERT_NOT_NULL(pinger);
+ pinger->vectored_writes = vectored_writes;
+ pinger->state = 0;
+ pinger->pongs = 0;
+ pinger->pong = PING;
+ ASSERT_EQ(0, uv_pipe_init(uv_default_loop(), &pinger->stream.pipe, 0));
+ ASSERT_EQ(0, uv_pipe_open(&pinger->stream.pipe, fds[1]));
+ pinger->stream.pipe.data = pinger; /* record for close_cb */
+ ponger->data = pinger; /* record for read_cb */
+
+ pinger_write_ping(pinger);
+
+ ASSERT_EQ(0, uv_read_start((uv_stream_t*) ponger, alloc_cb, pinger_read_cb));
+}
+
static int run_ping_pong_test(void) {
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
- ASSERT(completed_pingers == 1);
+ ASSERT_EQ(completed_pingers, 1);
MAKE_VALGRIND_HAPPY();
return 0;
@@ -263,12 +382,20 @@ static int run_ping_pong_test(void) {
TEST_IMPL(tcp_ping_pong) {
tcp_pinger_new(0);
+ run_ping_pong_test();
+
+ completed_pingers = 0;
+ socketpair_pinger_new(0);
return run_ping_pong_test();
}
TEST_IMPL(tcp_ping_pong_vec) {
tcp_pinger_new(1);
+ run_ping_pong_test();
+
+ completed_pingers = 0;
+ socketpair_pinger_new(1);
return run_ping_pong_test();
}
@@ -291,11 +418,19 @@ TEST_IMPL(tcp6_ping_pong_vec) {
TEST_IMPL(pipe_ping_pong) {
pipe_pinger_new(0);
+ run_ping_pong_test();
+
+ completed_pingers = 0;
+ pipe2_pinger_new(0);
return run_ping_pong_test();
}
TEST_IMPL(pipe_ping_pong_vec) {
pipe_pinger_new(1);
+ run_ping_pong_test();
+
+ completed_pingers = 0;
+ pipe2_pinger_new(1);
return run_ping_pong_test();
}
diff --git a/deps/uv/test/test-pipe-connect-error.c b/deps/uv/test/test-pipe-connect-error.c
index ebb2a6ca826ce4..8bba328a344f17 100644
--- a/deps/uv/test/test-pipe-connect-error.c
+++ b/deps/uv/test/test-pipe-connect-error.c
@@ -76,6 +76,9 @@ TEST_IMPL(pipe_connect_bad_name) {
TEST_IMPL(pipe_connect_to_file) {
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
const char* path = "test/fixtures/empty_file";
uv_pipe_t client;
uv_connect_t req;
diff --git a/deps/uv/test/test-pipe-getsockname.c b/deps/uv/test/test-pipe-getsockname.c
index 48ee400e74cf97..5f377dcbb60696 100644
--- a/deps/uv/test/test-pipe-getsockname.c
+++ b/deps/uv/test/test-pipe-getsockname.c
@@ -225,7 +225,9 @@ TEST_IMPL(pipe_getsockname_blocking) {
ASSERT(r != -1);
r = uv_pipe_open(&pipe_client, readfd);
ASSERT(r == 0);
- r = uv_read_start((uv_stream_t*)&pipe_client, NULL, NULL);
+ r = uv_read_start((uv_stream_t*) &pipe_client,
+ (uv_alloc_cb) abort,
+ (uv_read_cb) abort);
ASSERT(r == 0);
Sleep(100);
r = uv_read_stop((uv_stream_t*)&pipe_client);
@@ -236,7 +238,9 @@ TEST_IMPL(pipe_getsockname_blocking) {
ASSERT(r == 0);
ASSERT(len1 == 0); /* It's an annonymous pipe. */
- r = uv_read_start((uv_stream_t*)&pipe_client, NULL, NULL);
+ r = uv_read_start((uv_stream_t*)&pipe_client,
+ (uv_alloc_cb) abort,
+ (uv_read_cb) abort);
ASSERT(r == 0);
Sleep(100);
diff --git a/deps/uv/test/test-pipe-set-non-blocking.c b/deps/uv/test/test-pipe-set-non-blocking.c
index 626b53f09a2776..c45148f2bd028d 100644
--- a/deps/uv/test/test-pipe-set-non-blocking.c
+++ b/deps/uv/test/test-pipe-set-non-blocking.c
@@ -16,18 +16,10 @@
#include "uv.h"
#include "task.h"
-#ifdef _WIN32
-
-TEST_IMPL(pipe_set_non_blocking) {
- RETURN_SKIP("Test not implemented on Windows.");
-}
-
-#else /* !_WIN32 */
-
#include /* memset */
+#ifndef _WIN32
#include /* close */
-#include
-#include
+#endif
struct thread_ctx {
uv_barrier_t barrier;
@@ -54,8 +46,27 @@ static void thread_main(void* arg) {
uv_fs_req_cleanup(&req);
} while (n > 0 || (n == -1 && uv_errno == UV_EINTR));
+#ifdef _WIN32
+ ASSERT(n == UV_EOF);
+#else
ASSERT(n == 0);
+#endif
+}
+
+
+#ifdef _WIN32
+static void write_cb(uv_write_t* req, int status) {
+ ASSERT(status == 0);
+ req->handle = NULL; /* signal completion of write_cb */
}
+#endif
+
+#ifdef _WIN32
+#define NWRITES (10 << 16)
+#else
+#define NWRITES (10 << 20)
+#endif
+
TEST_IMPL(pipe_set_non_blocking) {
struct thread_ctx ctx;
@@ -66,9 +77,12 @@ TEST_IMPL(pipe_set_non_blocking) {
uv_buf_t buf;
uv_file fd[2];
int n;
+#ifdef _WIN32
+ uv_write_t write_req;
+#endif
ASSERT(0 == uv_pipe_init(uv_default_loop(), &pipe_handle, 0));
- ASSERT(0 == socketpair(AF_UNIX, SOCK_STREAM, 0, fd));
+ ASSERT(0 == uv_pipe(fd, 0, 0));
ASSERT(0 == uv_pipe_open(&pipe_handle, fd[1]));
ASSERT(0 == uv_stream_set_blocking((uv_stream_t*) &pipe_handle, 1));
fd[1] = -1; /* fd[1] is owned by pipe_handle now. */
@@ -83,11 +97,20 @@ TEST_IMPL(pipe_set_non_blocking) {
memset(data, '.', sizeof(data));
nwritten = 0;
- while (nwritten < 10 << 20) {
+ while (nwritten < NWRITES) {
/* The stream is in blocking mode so uv_try_write() should always succeed
* with the exact number of bytes that we wanted written.
*/
n = uv_try_write((uv_stream_t*) &pipe_handle, &buf, 1);
+#ifdef _WIN32
+ ASSERT(n == UV_EAGAIN); /* E_NOTIMPL */
+ ASSERT(0 == uv_write(&write_req, (uv_stream_t*) &pipe_handle, &buf, 1, write_cb));
+ ASSERT(write_req.handle != NULL);
+ ASSERT(1 == uv_run(uv_default_loop(), UV_RUN_ONCE)); /* queue write_cb */
+ ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE)); /* process write_cb */
+ ASSERT(write_req.handle == NULL); /* check for signaled completion of write_cb */
+ n = buf.len;
+#endif
ASSERT(n == sizeof(data));
nwritten += n;
}
@@ -96,12 +119,14 @@ TEST_IMPL(pipe_set_non_blocking) {
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT(0 == uv_thread_join(&thread));
+#ifdef _WIN32
+ ASSERT(0 == _close(fd[0])); /* fd[1] is closed by uv_close(). */
+#else
ASSERT(0 == close(fd[0])); /* fd[1] is closed by uv_close(). */
+#endif
fd[0] = -1;
uv_barrier_destroy(&ctx.barrier);
MAKE_VALGRIND_HAPPY();
return 0;
}
-
-#endif /* !_WIN32 */
diff --git a/deps/uv/test/test-platform-output.c b/deps/uv/test/test-platform-output.c
index f547ddfd7696ff..341c7ae54ed28b 100644
--- a/deps/uv/test/test-platform-output.c
+++ b/deps/uv/test/test-platform-output.c
@@ -155,6 +155,7 @@ TEST_IMPL(platform_output) {
printf(" username: %s\n", pwd.username);
printf(" shell: %s\n", pwd.shell);
printf(" home directory: %s\n", pwd.homedir);
+ uv_os_free_passwd(&pwd);
pid = uv_os_getpid();
ASSERT(pid > 0);
diff --git a/deps/uv/test/test-poll-multiple-handles.c b/deps/uv/test/test-poll-multiple-handles.c
new file mode 100644
index 00000000000000..fc2205ddec74d5
--- /dev/null
+++ b/deps/uv/test/test-poll-multiple-handles.c
@@ -0,0 +1,99 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include
+
+#ifndef _WIN32
+# include
+# include
+# include
+#endif
+
+#include "uv.h"
+#include "task.h"
+
+
+static int close_cb_called = 0;
+
+
+static void close_cb(uv_handle_t* handle) {
+ close_cb_called++;
+}
+
+static void poll_cb(uv_poll_t* handle, int status, int events) {
+ /* Not a bound socket, linux immediately reports UV_READABLE, other OS do not */
+ ASSERT(events == UV_READABLE);
+}
+
+TEST_IMPL(poll_multiple_handles) {
+ uv_os_sock_t sock;
+ uv_poll_t first_poll_handle, second_poll_handle;
+
+#ifdef _WIN32
+ {
+ struct WSAData wsa_data;
+ int r = WSAStartup(MAKEWORD(2, 2), &wsa_data);
+ ASSERT(r == 0);
+ }
+#endif
+
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+#ifdef _WIN32
+ ASSERT(sock != INVALID_SOCKET);
+#else
+ ASSERT(sock != -1);
+#endif
+ ASSERT(0 == uv_poll_init_socket(uv_default_loop(), &first_poll_handle, sock));
+ ASSERT(0 == uv_poll_init_socket(uv_default_loop(), &second_poll_handle, sock));
+
+ ASSERT(0 == uv_poll_start(&first_poll_handle, UV_READABLE, poll_cb));
+
+ /* We may not start polling while another polling handle is active
+ * on that fd.
+ */
+#ifndef _WIN32
+ /* We do not track handles in an O(1) lookupable way on Windows,
+ * so not checking that here.
+ */
+ ASSERT(uv_poll_start(&second_poll_handle, UV_READABLE, poll_cb) == UV_EEXIST);
+#endif
+
+ /* After stopping the other polling handle, we now should be able to poll */
+ ASSERT(0 == uv_poll_stop(&first_poll_handle));
+ ASSERT(0 == uv_poll_start(&second_poll_handle, UV_READABLE, poll_cb));
+
+ /* Closing an already stopped polling handle is safe in any case */
+ uv_close((uv_handle_t*) &first_poll_handle, close_cb);
+
+ uv_unref((uv_handle_t*) &second_poll_handle);
+ ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
+ ASSERT(close_cb_called == 1);
+ uv_ref((uv_handle_t*) &second_poll_handle);
+
+ ASSERT(uv_is_active((uv_handle_t*) &second_poll_handle));
+ uv_close((uv_handle_t*) &second_poll_handle, close_cb);
+
+ ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
+ ASSERT(close_cb_called == 2);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
diff --git a/deps/uv/test/test-shutdown-eof.c b/deps/uv/test/test-shutdown-eof.c
index 9f95e7561f26c7..0abab9175e9d26 100644
--- a/deps/uv/test/test-shutdown-eof.c
+++ b/deps/uv/test/test-shutdown-eof.c
@@ -89,7 +89,13 @@ static void connect_cb(uv_connect_t *req, int status) {
ASSERT(req == &connect_req);
/* Start reading from our connection so we can receive the EOF. */
- uv_read_start((uv_stream_t*)&tcp, alloc_cb, read_cb);
+ ASSERT_EQ(0, uv_read_start((uv_stream_t*)&tcp, alloc_cb, read_cb));
+
+ /* Check error handling. */
+ ASSERT_EQ(UV_EALREADY, uv_read_start((uv_stream_t*)&tcp, alloc_cb, read_cb));
+ ASSERT_EQ(UV_EINVAL, uv_read_start(NULL, alloc_cb, read_cb));
+ ASSERT_EQ(UV_EINVAL, uv_read_start((uv_stream_t*)&tcp, NULL, read_cb));
+ ASSERT_EQ(UV_EINVAL, uv_read_start((uv_stream_t*)&tcp, alloc_cb, NULL));
/*
* Write the letter 'Q' to gracefully kill the echo-server. This will not
diff --git a/deps/uv/test/test-spawn.c b/deps/uv/test/test-spawn.c
index d1757337a6d468..886ddaf63b509d 100644
--- a/deps/uv/test/test-spawn.c
+++ b/deps/uv/test/test-spawn.c
@@ -29,11 +29,9 @@
#include
#ifdef _WIN32
-# if defined(__MINGW32__)
-# include
-# endif
# include
# include
+ typedef BOOL (WINAPI *sCompareObjectHandles)(_In_ HANDLE, _In_ HANDLE);
#else
# include
# include
@@ -49,9 +47,7 @@ static char exepath[1024];
static size_t exepath_size = 1024;
static char* args[5];
static int no_term_signal;
-#ifndef _WIN32
static int timer_counter;
-#endif
static uv_tcp_t tcp_server;
#define OUTPUT_SIZE 1024
@@ -140,12 +136,10 @@ static void on_read(uv_stream_t* tcp, ssize_t nread, const uv_buf_t* buf) {
}
-#ifndef _WIN32
static void on_read_once(uv_stream_t* tcp, ssize_t nread, const uv_buf_t* buf) {
uv_read_stop(tcp);
on_read(tcp, nread, buf);
}
-#endif
static void write_cb(uv_write_t* req, int status) {
@@ -154,6 +148,11 @@ static void write_cb(uv_write_t* req, int status) {
}
+static void write_null_cb(uv_write_t* req, int status) {
+ ASSERT(status == 0);
+}
+
+
static void init_process_options(char* test, uv_exit_cb exit_cb) {
/* Note spawn_helper1 defined in test/run-tests.c */
int r = uv_exepath(exepath, &exepath_size);
@@ -177,11 +176,9 @@ static void timer_cb(uv_timer_t* handle) {
}
-#ifndef _WIN32
static void timer_counter_cb(uv_timer_t* handle) {
++timer_counter;
}
-#endif
TEST_IMPL(spawn_fails) {
@@ -1328,9 +1325,8 @@ TEST_IMPL(environment_creation) {
found = 1;
}
}
- if (prev) { /* verify sort order -- requires Vista */
-#if _WIN32_WINNT >= 0x0600 && \
- (!defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR))
+ if (prev) { /* verify sort order */
+#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
ASSERT(CompareStringOrdinal(prev, -1, str, -1, TRUE) == 1);
#endif
}
@@ -1579,17 +1575,27 @@ TEST_IMPL(spawn_auto_unref) {
}
-#ifndef _WIN32
TEST_IMPL(spawn_fs_open) {
- int fd;
+ int r;
+ uv_os_fd_t fd;
+ uv_os_fd_t dup_fd;
uv_fs_t fs_req;
uv_pipe_t in;
uv_write_t write_req;
+ uv_write_t write_req2;
uv_buf_t buf;
uv_stdio_container_t stdio[1];
+#ifdef _WIN32
+ const char dev_null[] = "NUL";
+ HMODULE kernelbase_module;
+ sCompareObjectHandles pCompareObjectHandles; /* function introduced in Windows 10 */
+#else
+ const char dev_null[] = "/dev/null";
+#endif
- fd = uv_fs_open(NULL, &fs_req, "/dev/null", O_RDWR, 0, NULL);
- ASSERT(fd >= 0);
+ r = uv_fs_open(NULL, &fs_req, dev_null, O_RDWR, 0, NULL);
+ ASSERT(r != -1);
+ fd = uv_get_osfhandle((uv_file) fs_req.result);
uv_fs_req_cleanup(&fs_req);
init_process_options("spawn_helper8", exit_cb);
@@ -1601,13 +1607,28 @@ TEST_IMPL(spawn_fs_open) {
options.stdio[0].data.stream = (uv_stream_t*) ∈
options.stdio_count = 1;
+ /* make an inheritable copy */
+#ifdef _WIN32
+ ASSERT(0 != DuplicateHandle(GetCurrentProcess(), fd, GetCurrentProcess(), &dup_fd,
+ 0, /* inherit */ TRUE, DUPLICATE_SAME_ACCESS));
+ kernelbase_module = GetModuleHandleA("kernelbase.dll");
+ pCompareObjectHandles = (sCompareObjectHandles)
+ GetProcAddress(kernelbase_module, "CompareObjectHandles");
+ ASSERT(pCompareObjectHandles == NULL || pCompareObjectHandles(fd, dup_fd));
+#else
+ dup_fd = dup(fd);
+#endif
+
ASSERT(0 == uv_spawn(uv_default_loop(), &process, &options));
buf = uv_buf_init((char*) &fd, sizeof(fd));
- ASSERT(0 == uv_write(&write_req, (uv_stream_t*) &in, &buf, 1, write_cb));
+ ASSERT(0 == uv_write(&write_req, (uv_stream_t*) &in, &buf, 1, write_null_cb));
+
+ buf = uv_buf_init((char*) &dup_fd, sizeof(fd));
+ ASSERT(0 == uv_write(&write_req2, (uv_stream_t*) &in, &buf, 1, write_cb));
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
- ASSERT(0 == uv_fs_close(NULL, &fs_req, fd, NULL));
+ ASSERT(0 == uv_fs_close(NULL, &fs_req, r, NULL));
ASSERT(exit_cb_called == 1);
ASSERT(close_cb_called == 2); /* One for `in`, one for process */
@@ -1615,17 +1636,20 @@ TEST_IMPL(spawn_fs_open) {
MAKE_VALGRIND_HAPPY();
return 0;
}
-#endif /* !_WIN32 */
-#ifndef _WIN32
TEST_IMPL(closed_fd_events) {
uv_stdio_container_t stdio[3];
uv_pipe_t pipe_handle;
- int fd[2];
+ uv_fs_t req;
+ uv_buf_t bufs[1];
+ uv_file fd[2];
+ bufs[0] = uv_buf_init("", 1);
/* create a pipe and share it with a child process */
- ASSERT(0 == pipe(fd));
+ ASSERT(0 == uv_pipe(fd, 0, 0));
+ ASSERT(fd[0] > 2);
+ ASSERT(fd[1] > 2);
/* spawn_helper4 blocks indefinitely. */
init_process_options("spawn_helper4", exit_cb);
@@ -1642,12 +1666,18 @@ TEST_IMPL(closed_fd_events) {
/* read from the pipe with uv */
ASSERT(0 == uv_pipe_init(uv_default_loop(), &pipe_handle, 0));
ASSERT(0 == uv_pipe_open(&pipe_handle, fd[0]));
+ /* uv_pipe_open() takes ownership of the file descriptor. */
fd[0] = -1;
ASSERT(0 == uv_read_start((uv_stream_t*) &pipe_handle, on_alloc, on_read_once));
- ASSERT(1 == write(fd[1], "", 1));
+ ASSERT(1 == uv_fs_write(NULL, &req, fd[1], bufs, 1, -1, NULL));
+ ASSERT(req.result == 1);
+ uv_fs_req_cleanup(&req);
+#ifdef _WIN32
+ ASSERT(1 == uv_run(uv_default_loop(), UV_RUN_ONCE));
+#endif
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE));
/* should have received just one byte */
@@ -1656,7 +1686,9 @@ TEST_IMPL(closed_fd_events) {
/* close the pipe and see if we still get events */
uv_close((uv_handle_t*) &pipe_handle, close_cb);
- ASSERT(1 == write(fd[1], "", 1));
+ ASSERT(1 == uv_fs_write(NULL, &req, fd[1], bufs, 1, -1, NULL));
+ ASSERT(req.result == 1);
+ uv_fs_req_cleanup(&req);
ASSERT(0 == uv_timer_init(uv_default_loop(), &timer));
ASSERT(0 == uv_timer_start(&timer, timer_counter_cb, 10, 0));
@@ -1669,13 +1701,17 @@ TEST_IMPL(closed_fd_events) {
ASSERT(timer_counter == 1);
/* cleanup */
- ASSERT(0 == uv_process_kill(&process, /* SIGTERM */ 15));
+ ASSERT(0 == uv_process_kill(&process, SIGTERM));
+#ifdef _WIN32
+ ASSERT(0 == _close(fd[1]));
+#else
ASSERT(0 == close(fd[1]));
+#endif
MAKE_VALGRIND_HAPPY();
return 0;
}
-#endif /* !_WIN32 */
+
TEST_IMPL(spawn_reads_child_path) {
int r;
@@ -1746,38 +1782,6 @@ TEST_IMPL(spawn_reads_child_path) {
return 0;
}
-#ifndef _WIN32
-static int mpipe(int *fds) {
- if (pipe(fds) == -1)
- return -1;
- if (fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
- fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
- close(fds[0]);
- close(fds[1]);
- return -1;
- }
- return 0;
-}
-#else
-static int mpipe(int *fds) {
- SECURITY_ATTRIBUTES attr;
- HANDLE readh, writeh;
- attr.nLength = sizeof(attr);
- attr.lpSecurityDescriptor = NULL;
- attr.bInheritHandle = FALSE;
- if (!CreatePipe(&readh, &writeh, &attr, 0))
- return -1;
- fds[0] = _open_osfhandle((intptr_t)readh, 0);
- fds[1] = _open_osfhandle((intptr_t)writeh, 0);
- if (fds[0] == -1 || fds[1] == -1) {
- CloseHandle(readh);
- CloseHandle(writeh);
- return -1;
- }
- return 0;
-}
-#endif /* !_WIN32 */
-
TEST_IMPL(spawn_inherit_streams) {
uv_process_t child_req;
uv_stdio_container_t child_stdio[2];
@@ -1803,8 +1807,8 @@ TEST_IMPL(spawn_inherit_streams) {
ASSERT(uv_pipe_init(loop, &pipe_stdin_parent, 0) == 0);
ASSERT(uv_pipe_init(loop, &pipe_stdout_parent, 0) == 0);
- ASSERT(mpipe(fds_stdin) != -1);
- ASSERT(mpipe(fds_stdout) != -1);
+ ASSERT(uv_pipe(fds_stdin, 0, 0) == 0);
+ ASSERT(uv_pipe(fds_stdout, 0, 0) == 0);
ASSERT(uv_pipe_open(&pipe_stdin_child, fds_stdin[0]) == 0);
ASSERT(uv_pipe_open(&pipe_stdout_child, fds_stdout[1]) == 0);
diff --git a/deps/uv/test/test-tcp-bind-error.c b/deps/uv/test/test-tcp-bind-error.c
index f95efd9f0c8900..7732267f44f54e 100644
--- a/deps/uv/test/test-tcp-bind-error.c
+++ b/deps/uv/test/test-tcp-bind-error.c
@@ -25,6 +25,7 @@
#include
+static int connect_cb_called = 0;
static int close_cb_called = 0;
@@ -34,7 +35,49 @@ static void close_cb(uv_handle_t* handle) {
}
-TEST_IMPL(tcp_bind_error_addrinuse) {
+static void connect_cb(uv_connect_t* req, int status) {
+ ASSERT(status == UV_EADDRINUSE);
+ uv_close((uv_handle_t*) req->handle, close_cb);
+ connect_cb_called++;
+}
+
+
+TEST_IMPL(tcp_bind_error_addrinuse_connect) {
+ struct sockaddr_in addr;
+ int addrlen;
+ uv_connect_t req;
+ uv_tcp_t conn;
+
+ /* 127.0.0.1: is already taken by tcp4_echo_server running in
+ * another process. uv_tcp_bind() and uv_tcp_connect() should still succeed
+ * (greatest common denominator across platforms) but the connect callback
+ * should receive an UV_EADDRINUSE error.
+ */
+ ASSERT(0 == uv_tcp_init(uv_default_loop(), &conn));
+ ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
+ ASSERT(0 == uv_tcp_bind(&conn, (const struct sockaddr*) &addr, 0));
+
+ ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT + 1, &addr));
+ ASSERT(0 == uv_tcp_connect(&req,
+ &conn,
+ (const struct sockaddr*) &addr,
+ connect_cb));
+
+ addrlen = sizeof(addr);
+ ASSERT(UV_EADDRINUSE == uv_tcp_getsockname(&conn,
+ (struct sockaddr*) &addr,
+ &addrlen));
+
+ ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
+ ASSERT(connect_cb_called == 1);
+ ASSERT(close_cb_called == 1);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
+
+
+TEST_IMPL(tcp_bind_error_addrinuse_listen) {
struct sockaddr_in addr;
uv_tcp_t server1, server2;
int r;
@@ -240,7 +283,9 @@ TEST_IMPL(tcp_bind_writable_flags) {
ASSERT(r == UV_EPIPE);
r = uv_shutdown(&shutdown_req, (uv_stream_t*) &server, NULL);
ASSERT(r == UV_ENOTCONN);
- r = uv_read_start((uv_stream_t*) &server, NULL, NULL);
+ r = uv_read_start((uv_stream_t*) &server,
+ (uv_alloc_cb) abort,
+ (uv_read_cb) abort);
ASSERT(r == UV_ENOTCONN);
uv_close((uv_handle_t*)&server, close_cb);
diff --git a/deps/uv/test/test-tcp-connect-timeout.c b/deps/uv/test/test-tcp-connect-timeout.c
index a67d325284da11..3c34e54ae7f85e 100644
--- a/deps/uv/test/test-tcp-connect-timeout.c
+++ b/deps/uv/test/test-tcp-connect-timeout.c
@@ -111,7 +111,7 @@ static int is_supported_system(void) {
if (cnt != 3) {
return 0;
}
- // relase >= 10.0.16299
+ /* relase >= 10.0.16299 */
for (cnt = 0; cnt < 3; ++cnt) {
if (semver[cnt] > min_semver[cnt])
return 1;
diff --git a/deps/uv/test/test-tty.c b/deps/uv/test/test-tty.c
index a9d38f22117afd..ff7d388d7c00f3 100644
--- a/deps/uv/test/test-tty.c
+++ b/deps/uv/test/test-tty.c
@@ -426,6 +426,9 @@ TEST_IMPL(tty_pty) {
#if defined(__QEMU__)
RETURN_SKIP("Test does not currently work in QEMU");
#endif
+#if defined(__ASAN__)
+ RETURN_SKIP("Test does not currently work in ASAN");
+#endif
#if defined(__APPLE__) || \
defined(__DragonFly__) || \
diff --git a/deps/uv/test/test-udp-connect.c b/deps/uv/test/test-udp-connect.c
index 41ace117a1bba2..52856f700d210e 100644
--- a/deps/uv/test/test-udp-connect.c
+++ b/deps/uv/test/test-udp-connect.c
@@ -124,7 +124,7 @@ TEST_IMPL(udp_connect) {
buf = uv_buf_init("EXIT", 4);
- // connect() to INADDR_ANY fails on Windows wih WSAEADDRNOTAVAIL
+ /* connect() to INADDR_ANY fails on Windows wih WSAEADDRNOTAVAIL */
ASSERT_EQ(0, uv_ip4_addr("0.0.0.0", TEST_PORT, &tmp_addr));
r = uv_udp_connect(&client, (const struct sockaddr*) &tmp_addr);
#ifdef _WIN32
diff --git a/deps/uv/tools/make_dist_html.py b/deps/uv/tools/make_dist_html.py
index 7a19d3e1151fb8..4833b1b8e38180 100644
--- a/deps/uv/tools/make_dist_html.py
+++ b/deps/uv/tools/make_dist_html.py
@@ -1,6 +1,4 @@
-#!/usr/bin/python
-
-from __future__ import print_function
+#!/usr/bin/python3
import itertools
import os
@@ -84,7 +82,7 @@
'''
def version(tag):
- return map(int, re.match('^v(\d+)\.(\d+)\.(\d+)', tag).groups())
+ return list(map(int, re.match('^v(\d+)\.(\d+)\.(\d+)', tag).groups()))
def major_minor(tag):
return version(tag)[:2]
@@ -114,7 +112,7 @@ def groups_for(groups, n=4):
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
- tags = subprocess.check_output(['git', 'tag'])
+ tags = subprocess.check_output(['git', 'tag'], text=True)
tags = [tag for tag in tags.split('\n') if tag.startswith('v')]
tags.sort(key=version, reverse=True)
groups = [group_for(list(g)) for _, g in itertools.groupby(tags, major_minor)]
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 4b49968baa053f..987b36b676712a 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -414,6 +414,16 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
int prot = GetProtectionFromMemoryPermission(access);
int ret = mprotect(address, size, prot);
+
+ // MacOS 11.2 on Apple Silicon refuses to switch permissions from
+ // rwx to none. Just use madvise instead.
+#if defined(V8_OS_MACOSX)
+ if (ret != 0 && access == OS::MemoryPermission::kNoAccess) {
+ ret = madvise(address, size, MADV_FREE_REUSABLE);
+ return ret == 0;
+ }
+#endif
+
if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
// This is advisory; ignore errors and continue execution.
USE(DiscardSystemPages(address, size));
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index c4edee49212489..dba4865250c8aa 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -573,6 +573,8 @@ namespace internal {
T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \
T(DataCloneErrorDetachedArrayBuffer, \
"An ArrayBuffer is detached and could not be cloned.") \
+ T(DataCloneErrorNonDetachableArrayBuffer, \
+ "ArrayBuffer is not detachable and could not be cloned.") \
T(DataCloneErrorSharedArrayBufferTransferred, \
"A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \
"transferred.") \
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 58b1db67492857..9e79f9ba434193 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -860,6 +860,11 @@ Maybe ValueSerializer::WriteJSArrayBuffer(
WriteVarint(index.FromJust());
return ThrowIfOutOfMemory();
}
+ if (!array_buffer->is_detachable()) {
+ ThrowDataCloneError(
+ MessageTemplate::kDataCloneErrorNonDetachableArrayBuffer);
+ return Nothing();
+ }
uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
if (transfer_entry) {
diff --git a/deps/v8/test/mjsunit/wasm/worker-memory.js b/deps/v8/test/mjsunit/wasm/worker-memory.js
index c5b99ede7e2836..bf5430f7139815 100644
--- a/deps/v8/test/mjsunit/wasm/worker-memory.js
+++ b/deps/v8/test/mjsunit/wasm/worker-memory.js
@@ -11,6 +11,13 @@
assertThrows(() => worker.postMessage(memory), Error);
})();
+(function TestPostMessageUnsharedMemoryBuffer() {
+ let worker = new Worker('', {type: 'string'});
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 2});
+
+ assertThrows(() => worker.postMessage(memory.buffer), Error);
+})();
+
// Can't use assert in a worker.
let workerHelpers =
`function assertTrue(value, msg) {
diff --git a/doc/api/addons.md b/doc/api/addons.md
index 19799233cf7584..43f24860338a68 100644
--- a/doc/api/addons.md
+++ b/doc/api/addons.md
@@ -7,12 +7,13 @@ _Addons_ are dynamically-linked shared objects written in C++. The
[`require()`][require] function can load addons as ordinary Node.js modules.
Addons provide an interface between JavaScript and C/C++ libraries.
-There are three options for implementing addons: N-API, nan, or direct
+There are three options for implementing addons: Node-API, nan, or direct
use of internal V8, libuv and Node.js libraries. Unless there is a need for
-direct access to functionality which is not exposed by N-API, use N-API.
-Refer to [C/C++ addons with N-API](n-api.md) for more information on N-API.
+direct access to functionality which is not exposed by Node-API, use Node-API.
+Refer to [C/C++ addons with Node-API](n-api.md) for more information on
+Node-API.
-When not using N-API, implementing addons is complicated,
+When not using Node-API, implementing addons is complicated,
involving knowledge of several components and APIs:
* V8: the C++ library Node.js uses to provide the
@@ -245,7 +246,7 @@ changes:
In order to be loaded from multiple Node.js environments,
such as a main thread and a Worker thread, an add-on needs to either:
-* Be an N-API addon, or
+* Be an Node-API addon, or
* Be declared as context-aware using `NODE_MODULE_INIT()` as described above
In order to support [`Worker`][] threads, addons need to clean up any resources
@@ -437,11 +438,11 @@ addon developers are recommended to use to keep compatibility between past and
future releases of V8 and Node.js. See the `nan` [examples][] for an
illustration of how it can be used.
-## N-API
+## Node-API
> Stability: 2 - Stable
-N-API is an API for building native addons. It is independent from
+Node-API is an API for building native addons. It is independent from
the underlying JavaScript runtime (e.g. V8) and is maintained as part of
Node.js itself. This API will be Application Binary Interface (ABI) stable
across versions of Node.js. It is intended to insulate addons from
@@ -451,17 +452,17 @@ recompilation. Addons are built/packaged with the same approach/tools
outlined in this document (node-gyp, etc.). The only difference is the
set of APIs that are used by the native code. Instead of using the V8
or [Native Abstractions for Node.js][] APIs, the functions available
-in the N-API are used.
+in the Node-API are used.
Creating and maintaining an addon that benefits from the ABI stability
-provided by N-API carries with it certain
+provided by Node-API carries with it certain
[implementation considerations](n-api.md#n_api_implications_of_abi_stability).
-To use N-API in the above "Hello world" example, replace the content of
+To use Node-API in the above "Hello world" example, replace the content of
`hello.cc` with the following. All other instructions remain the same.
```cpp
-// hello.cc using N-API
+// hello.cc using Node-API
#include
namespace demo {
@@ -493,7 +494,7 @@ NAPI_MODULE(NODE_GYP_MODULE_NAME, init)
```
The functions available and how to use them are documented in
-[C/C++ addons with N-API](n-api.md).
+[C/C++ addons with Node-API](n-api.md).
## Addon examples
diff --git a/doc/api/buffer.md b/doc/api/buffer.md
index 5c02be9f3613a4..d3c1e43e5e29ce 100644
--- a/doc/api/buffer.md
+++ b/doc/api/buffer.md
@@ -370,7 +370,7 @@ The content-type of the `Blob`.
### `Blob` objects and `MessageChannel`
Once a {Blob} object is created, it can be sent via `MessagePort` to multiple
-destinations without transfering or immediately copying the data. The data
+destinations without transferring or immediately copying the data. The data
contained by the `Blob` is copied only when the `arrayBuffer()` or `text()`
methods are called.
diff --git a/doc/api/cli.md b/doc/api/cli.md
index 5373f97fca07a9..70cb69cd54a451 100644
--- a/doc/api/cli.md
+++ b/doc/api/cli.md
@@ -355,9 +355,9 @@ Node.js instance runs out of memory when `max_count` is greater than `0`.
Generating V8 snapshots takes time and memory (both memory managed by the
V8 heap and native memory outside the V8 heap). The bigger the heap is,
-the more resources it needs. Node.js will adjust the V8 heap to accommondate
+the more resources it needs. Node.js will adjust the V8 heap to accommodate
the additional V8 heap memory overhead, and try its best to avoid using up
-all the memory avialable to the process. When the process uses
+all the memory available to the process. When the process uses
more memory than the system deems appropriate, the process may be terminated
abruptly by the system, depending on the system configuration.
diff --git a/doc/api/console.md b/doc/api/console.md
index 8ede89bcfc376c..9f7581716340c3 100644
--- a/doc/api/console.md
+++ b/doc/api/console.md
@@ -435,7 +435,7 @@ added: v0.1.104
changes:
- version: v13.0.0
pr-url: https://github.com/nodejs/node/pull/29251
- description: The elapsed time is diplayed with a suitable time unit.
+ description: The elapsed time is displayed with a suitable time unit.
- version: v6.0.0
pr-url: https://github.com/nodejs/node/pull/5901
description: This method no longer supports multiple calls that don’t map
diff --git a/doc/api/crypto.md b/doc/api/crypto.md
index 76326cc75bfd53..fa2694d0fe3872 100644
--- a/doc/api/crypto.md
+++ b/doc/api/crypto.md
@@ -53,7 +53,7 @@ The `crypto` module provides the `Certificate` class for working with SPKAC
data. The most common usage is handling output generated by the HTML5
`` element. Node.js uses [OpenSSL's SPKAC implementation][] internally.
-### `Certificate.exportChallenge(spkac[, encoding])`
+### Static method: `Certificate.exportChallenge(spkac[, encoding])`
+
+* `key` {CryptoKey}
+* Returns: {KeyObject}
+
+Example: Converting a `CryptoKey` instance to a `KeyObject`:
+
+```js
+const { webcrypto: { subtle }, KeyObject } = require('crypto');
+
+(async function() {
+ const key = await subtle.generateKey({
+ name: 'HMAC',
+ hash: 'SHA-256',
+ length: 256
+ }, true, ['sign', 'verify']);
+
+ const keyObject = KeyObject.from(key);
+ console.log(keyObject.symmetricKeySize);
+ // Prints: 32 (symmetric key size in bytes)
+})();
+```
+
### `keyObject.asymmetricKeyDetails`
* `options`: {Object}
-* Returns: {string | Buffer}
+* Returns: {string | Buffer | Object}
-For symmetric keys, this function allocates a `Buffer` containing the key
-material and ignores any options.
+For symmetric keys, the following encoding options can be used:
-For asymmetric keys, the `options` parameter is used to determine the export
-format.
+* `format`: {string} Must be `'buffer'` (default) or `'jwk'`.
For public keys, the following encoding options can be used:
* `type`: {string} Must be one of `'pkcs1'` (RSA only) or `'spki'`.
-* `format`: {string} Must be `'pem'` or `'der'`.
+* `format`: {string} Must be `'pem'`, `'der'`, or `'jwk'`.
For private keys, the following encoding options can be used:
* `type`: {string} Must be one of `'pkcs1'` (RSA only), `'pkcs8'` or
`'sec1'` (EC only).
-* `format`: {string} Must be `'pem'` or `'der'`.
+* `format`: {string} Must be `'pem'`, `'der'`, or `'jwk'`.
* `cipher`: {string} If specified, the private key will be encrypted with
the given `cipher` and `passphrase` using PKCS#5 v2.0 password based
encryption.
* `passphrase`: {string | Buffer} The passphrase to use for encryption, see
`cipher`.
-When PEM encoding was selected, the result will be a string, otherwise it will
-be a buffer containing the data encoded as DER.
+The result type depends on the selected encoding format, when PEM the
+result is a string, when DER it will be a buffer containing the data
+encoded as DER, when [JWK][] it will be an object.
+
+When [JWK][] encoding format was selected, all other encoding options are
+ignored.
PKCS#1, SEC1, and PKCS#8 type keys can be encrypted by using a combination of
the `cipher` and `format` options. The PKCS#8 `type` can be used with any
@@ -1804,6 +1842,16 @@ added: v15.6.0
The issuer identification included in this certificate.
+### `x509.issuerCertificate`
+
+
+* Type: {X509Certificate}
+
+The issuer certificate or `undefined` if the issuer certificate is not
+available.
+
### `x509.keyUsage`
-
-* `type`: {string} The intended use of the generated secret key. Currently
- accepted values are `'hmac'` and `'aes'`.
-* `options`: {Object}
- * `length`: {number} The bit length of the key to generate.
- * If `type` is `'hmac'`, the minimum is 1, and the maximum length is
- 231-1. If the value is not a multiple of 8, the generated
- key will be truncated to `Math.floor(length / 8)`.
- * If `type` is `'aes'`, the length must be one of `128`, `192`, or `256`.
-* Returns: {KeyObject}
-
-Synchronously generates a new random secret key of the given `length`. The
-`type` will determine which validations will be performed on the `length`.
-
-```js
-const { generateKeySync } = require('crypto');
-
-const key = generateKeySync('hmac', 64);
-console.log(key.export().toString('hex')); // e89..........41e
-```
-
### `crypto.generateKeyPair(type, options, callback)`
+
+* `type`: {string} The intended use of the generated secret key. Currently
+ accepted values are `'hmac'` and `'aes'`.
+* `options`: {Object}
+ * `length`: {number} The bit length of the key to generate.
+ * If `type` is `'hmac'`, the minimum is 1, and the maximum length is
+ 231-1. If the value is not a multiple of 8, the generated
+ key will be truncated to `Math.floor(length / 8)`.
+ * If `type` is `'aes'`, the length must be one of `128`, `192`, or `256`.
+* Returns: {KeyObject}
+
+Synchronously generates a new random secret key of the given `length`. The
+`type` will determine which validations will be performed on the `length`.
+
+```js
+const { generateKeySync } = require('crypto');
+
+const key = generateKeySync('hmac', 64);
+console.log(key.export().toString('hex')); // e89..........41e
+```
+
### `crypto.generatePrime(size[, options[, callback]])`
-
-* Returns: {string[]} An array with the names of the supported cipher
- algorithms.
-
-```js
-const ciphers = crypto.getCiphers();
-console.log(ciphers); // ['aes-128-cbc', 'aes-128-ccm', ...]
-```
-
### `crypto.getCipherInfo(nameOrNid[, options])`
+
+* Returns: {string[]} An array with the names of the supported cipher
+ algorithms.
+
+```js
+const ciphers = crypto.getCiphers();
+console.log(ciphers); // ['aes-128-cbc', 'aes-128-ccm', ...]
+```
+
### `crypto.getCurves()`
@@ -3250,7 +3310,7 @@ changes:
`crypto.constants`, which may be: `crypto.constants.RSA_NO_PADDING` or
`crypto.constants.RSA_PKCS1_PADDING`.
* `encoding` {string} The string encoding to use when `buffer`, `key`,
- or 'passphrase` are strings.
+ or `passphrase` are strings.
* `buffer` {string|ArrayBuffer|Buffer|TypedArray|DataView}
* Returns: {Buffer} A new `Buffer` with the decrypted content.
@@ -3302,7 +3362,7 @@ changes:
`crypto.constants.RSA_PKCS1_PADDING`, or
`crypto.constants.RSA_PKCS1_OAEP_PADDING`.
* `encoding` {string} The string encoding to use when `buffer`, `key`,
- `oaepLabel`, or 'passphrase` are strings.
+ `oaepLabel`, or `passphrase` are strings.
* `buffer` {string|ArrayBuffer|Buffer|TypedArray|DataView}
* Returns: {Buffer} A new `Buffer` with the encrypted content.
@@ -3614,6 +3674,9 @@ memory-wise in order to make brute-force attacks unrewarding.
The `salt` should be as unique as possible. It is recommended that a salt is
random and at least 16 bytes long. See [NIST SP 800-132][] for details.
+When passing strings for `password` or `salt`, please consider
+[caveats when using strings as inputs to cryptographic APIs][].
+
The `callback` function is called with two arguments: `err` and `derivedKey`.
`err` is an exception object when key derivation fails, otherwise `err` is
`null`. `derivedKey` is passed to the callback as a [`Buffer`][].
@@ -3672,6 +3735,9 @@ memory-wise in order to make brute-force attacks unrewarding.
The `salt` should be as unique as possible. It is recommended that a salt is
random and at least 16 bytes long. See [NIST SP 800-132][] for details.
+When passing strings for `password` or `salt`, please consider
+[caveats when using strings as inputs to cryptographic APIs][].
+
An exception is thrown when key derivation fails, otherwise the derived key is
returned as a [`Buffer`][].
@@ -3881,6 +3947,47 @@ See the [Web Crypto API documentation][] for details.
## Notes
+### Using strings as inputs to cryptographic APIs
+
+For historical reasons, many cryptographic APIs provided by Node.js accept
+strings as inputs where the underlying cryptographic algorithm works on byte
+sequences. These instances include plaintexts, ciphertexts, symmetric keys,
+initialization vectors, passphrases, salts, authentication tags,
+and additional authenticated data.
+
+When passing strings to cryptographic APIs, consider the following factors.
+
+* Not all byte sequences are valid UTF-8 strings. Therefore, when a byte
+ sequence of length `n` is derived from a string, its entropy is generally
+ lower than the entropy of a random or pseudo-random `n` byte sequence.
+ For example, no UTF-8 string will result in the byte sequence `c0 af`. Secret
+ keys should almost exclusively be random or pseudo-random byte sequences.
+* Similarly, when converting random or pseudo-random byte sequences to UTF-8
+ strings, subsequences that do not represent valid code points may be replaced
+ by the Unicode replacement character (`U+FFFD`). The byte representation of
+ the resulting Unicode string may, therefore, not be equal to the byte sequence
+ that the string was created from.
+
+ ```js
+ const original = [0xc0, 0xaf];
+ const bytesAsString = Buffer.from(original).toString('utf8');
+ const stringAsBytes = Buffer.from(bytesAsString, 'utf8');
+ console.log(stringAsBytes);
+ // Prints ''.
+ ```
+
+ The outputs of ciphers, hash functions, signature algorithms, and key
+ derivation functions are pseudo-random byte sequences and should not be
+ used as Unicode strings.
+* When strings are obtained from user input, some Unicode characters can be
+ represented in multiple equivalent ways that result in different byte
+ sequences. For example, when passing a user passphrase to a key derivation
+ function, such as PBKDF2 or scrypt, the result of the key derivation function
+ depends on whether the string uses composed or decomposed characters. Node.js
+ does not normalize character representations. Developers should consider using
+ [`String.prototype.normalize()`][] on user inputs before passing them to
+ cryptographic APIs.
+
### Legacy streams API (prior to Node.js 0.10)
The Crypto module was added to Node.js before there was the concept of a
@@ -4342,9 +4449,11 @@ See the [list of SSL OP Flags][] for details.
[AEAD algorithms]: https://en.wikipedia.org/wiki/Authenticated_encryption
[CCM mode]: #crypto_ccm_mode
[Caveats]: #crypto_support_for_weak_or_compromised_algorithms
+[caveats when using strings as inputs to cryptographic APIs]: #crypto_using_strings_as_inputs_to_cryptographic_apis
[Crypto constants]: #crypto_crypto_constants_1
[HTML 5.2]: https://www.w3.org/TR/html52/changes.html#features-removed
[HTML5's `keygen` element]: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/keygen
+[JWK]: https://tools.ietf.org/html/rfc7517
[NIST SP 800-131A]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-131Ar1.pdf
[NIST SP 800-132]: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-132.pdf
[NIST SP 800-38D]: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
@@ -4363,6 +4472,7 @@ See the [list of SSL OP Flags][] for details.
[`EVP_BytesToKey`]: https://www.openssl.org/docs/man1.1.0/crypto/EVP_BytesToKey.html
[`KeyObject`]: #crypto_class_keyobject
[`Sign`]: #crypto_class_sign
+[`String.prototype.normalize()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/normalize
[`UV_THREADPOOL_SIZE`]: cli.md#cli_uv_threadpool_size_size
[`Verify`]: #crypto_class_verify
[`cipher.final()`]: #crypto_cipher_final_outputencoding
diff --git a/doc/api/deprecations.md b/doc/api/deprecations.md
index c66e27895bcd61..ac0f99bf4fcf95 100644
--- a/doc/api/deprecations.md
+++ b/doc/api/deprecations.md
@@ -173,7 +173,7 @@ changes:
description: A deprecation code has been assigned.
- version: v0.11.14
description: Runtime deprecation.
- - version: v0.5.11
+ - version: v0.5.10
description: Documentation-only deprecation.
-->
@@ -2480,7 +2480,7 @@ called, not whether `'finish'` has been emitted and the underlying data
is flushed.
Use [`response.writableFinished`][] or [`response.writableEnded`][]
-accordingly instead to avoid the ambigiuty.
+accordingly instead to avoid the ambiguity.
To maintain existing behaviour `response.finished` should be replaced with
`response.writableEnded`.
@@ -2681,7 +2681,7 @@ Use `fs.rm(path, { recursive: true, force: true })` instead.
-Used by the `N-API` when `Constructor.prototype` is not an object.
+Used by the `Node-API` when `Constructor.prototype` is not an object.
### `ERR_NO_LONGER_SUPPORTED`
@@ -2716,6 +2728,8 @@ The native call from `process.cpuUsage` could not be processed.
[ES Module]: esm.md
[ICU]: intl.md#intl_internationalization_support
+[JSON Web Key Elliptic Curve Registry]: https://www.iana.org/assignments/jose/jose.xhtml#web-key-elliptic-curve
+[JSON Web Key Types Registry]: https://www.iana.org/assignments/jose/jose.xhtml#web-key-types
[Node.js error codes]: #nodejs-error-codes
[RFC 7230 Section 3]: https://tools.ietf.org/html/rfc7230#section-3
[Subresource Integrity specification]: https://www.w3.org/TR/SRI/#the-integrity-attribute
diff --git a/doc/api/events.md b/doc/api/events.md
index 571f2893a741de..cd2d7fc6137945 100644
--- a/doc/api/events.md
+++ b/doc/api/events.md
@@ -1247,12 +1247,21 @@ target.addEventListener('foo', handler4, { once: true });
### `EventTarget` error handling
When a registered event listener throws (or returns a Promise that rejects),
-by default the error is forwarded to the `process.on('error')` event
-on `process.nextTick()`. Throwing within an event listener will *not* stop
-the other registered handlers from being invoked.
+by default the error is treated as an uncaught exception on
+`process.nextTick()`. This means uncaught exceptions in `EventTarget`s will
+terminate the Node.js process by default.
-The `EventTarget` does not implement any special default handling for
-`'error'` type events.
+Throwing within an event listener will *not* stop the other registered handlers
+from being invoked.
+
+The `EventTarget` does not implement any special default handling for `'error'`
+type events like `EventEmitter`.
+
+Currently errors are first forwarded to the `process.on('error')` event
+before reaching `process.on('uncaughtException')`. This behavior is
+deprecated and will change in a future release to align `EventTarget` with
+other Node.js APIs. Any code relying on the `process.on('error')` event should
+be aligned with the new behavior.
### Class: `Event`
-For most `fs` module functions, the `path` or `filename` argument may be passed
-as a WHATWG [`URL`][] object. Only [`URL`][] objects using the `file:` protocol
-are supported.
-```js
-const fs = require('fs');
-const fileUrl = new URL('file:///tmp/hello');
+The `fs/promises` API provides asynchronous file system methods that return
+promises.
-fs.readFileSync(fileUrl);
-```
+The promise APIs use the underlying Node.js threadpool to perform file
+system operations off the event loop thread. These operations are not
+synchronized or threadsafe. Care must be taken when performing multiple
+concurrent modifications on the same file or data corruption may occur.
-`file:` URLs are always absolute paths.
+### Class: `FileHandle`
+
-Using WHATWG [`URL`][] objects might introduce platform-specific behaviors.
+A {FileHandle} object is an object wrapper for a numeric file descriptor.
-On Windows, `file:` URLs with a host name convert to UNC paths, while `file:`
-URLs with drive letters convert to local absolute paths. `file:` URLs without a
-host name nor a drive letter will result in a throw:
+Instances of the {FileHandle} object are created by the `fsPromises.open()`
+method.
-```js
-// On Windows :
+All {FileHandle} objects are {EventEmitter}s.
-// - WHATWG file URLs with hostname convert to UNC path
-// file://hostname/p/a/t/h/file => \\hostname\p\a\t\h\file
-fs.readFileSync(new URL('file://hostname/p/a/t/h/file'));
+If a {FileHandle} is not closed using the `filehandle.close()` method, it will
+try to automatically close the file descriptor and emit a process warning,
+helping to prevent memory leaks. Please do not rely on this behavior because
+it can be unreliable and the file may not be closed. Instead, always explicitly
+close {FileHandle}s. Node.js may change this behavior in the future.
-// - WHATWG file URLs with drive letters convert to absolute path
-// file:///C:/tmp/hello => C:\tmp\hello
-fs.readFileSync(new URL('file:///C:/tmp/hello'));
+#### Event: `'close'`
+
-// - WHATWG file URLs without hostname must have a drive letters
-fs.readFileSync(new URL('file:///notdriveletter/p/a/t/h/file'));
-fs.readFileSync(new URL('file:///c/p/a/t/h/file'));
-// TypeError [ERR_INVALID_FILE_URL_PATH]: File URL path must be absolute
-```
+The `'close'` event is emitted when the {FileHandle} has been closed and can no
+longer be used.
-`file:` URLs with drive letters must use `:` as a separator just after
-the drive letter. Using another separator will result in a throw.
+#### `filehandle.appendFile(data[, options])`
+
-On all other platforms, `file:` URLs with a host name are unsupported and will
-result in a throw:
+* `data` {string|Buffer|TypedArray|DataView}
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `'utf8'`
+* Returns: {Promise} Fulfills with `undefined` upon success.
-```js
-// On other platforms:
+Alias of [`filehandle.writeFile()`][].
-// - WHATWG file URLs with hostname are unsupported
-// file://hostname/p/a/t/h/file => throw!
-fs.readFileSync(new URL('file://hostname/p/a/t/h/file'));
-// TypeError [ERR_INVALID_FILE_URL_PATH]: must be absolute
+When operating on file handles, the mode cannot be changed from what it was set
+to with [`fsPromises.open()`][]. Therefore, this is equivalent to
+[`filehandle.writeFile()`][].
-// - WHATWG file URLs convert to absolute path
-// file:///tmp/hello => /tmp/hello
-fs.readFileSync(new URL('file:///tmp/hello'));
-```
+#### `filehandle.chmod(mode)`
+
-A `file:` URL having encoded slash characters will result in a throw on all
-platforms:
+* `mode` {integer} the file mode bit mask.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-```js
-// On Windows
-fs.readFileSync(new URL('file:///C:/p/a/t/h/%2F'));
-fs.readFileSync(new URL('file:///C:/p/a/t/h/%2f'));
-/* TypeError [ERR_INVALID_FILE_URL_PATH]: File URL path must not include encoded
-\ or / characters */
+Modifies the permissions on the file. See chmod(2).
-// On POSIX
-fs.readFileSync(new URL('file:///p/a/t/h/%2F'));
-fs.readFileSync(new URL('file:///p/a/t/h/%2f'));
-/* TypeError [ERR_INVALID_FILE_URL_PATH]: File URL path must not include encoded
-/ characters */
-```
+#### `filehandle.chown(uid, gid)`
+
-On Windows, `file:` URLs having encoded backslash will result in a throw:
+* `uid` {integer} The file's new owner's user id.
+* `gid` {integer} The file's new group's group id.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-```js
-// On Windows
-fs.readFileSync(new URL('file:///C:/path/%5C'));
-fs.readFileSync(new URL('file:///C:/path/%5c'));
-/* TypeError [ERR_INVALID_FILE_URL_PATH]: File URL path must not include encoded
-\ or / characters */
-```
+Changes the ownership of the file. A wrapper for chown(2).
-## File descriptors
+#### `filehandle.close()`
+
-On POSIX systems, for every process, the kernel maintains a table of currently
-open files and resources. Each open file is assigned a simple numeric
-identifier called a *file descriptor*. At the system-level, all file system
-operations use these file descriptors to identify and track each specific
-file. Windows systems use a different but conceptually similar mechanism for
-tracking resources. To simplify things for users, Node.js abstracts away the
-specific differences between operating systems and assigns all open files a
-numeric file descriptor.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-The `fs.open()` method is used to allocate a new file descriptor. Once
-allocated, the file descriptor may be used to read data from, write data to,
-or request information about the file.
+Closes the file handle after waiting for any pending operation on the handle to
+complete.
-```js
-fs.open('/open/some/file.txt', 'r', (err, fd) => {
- if (err) throw err;
- fs.fstat(fd, (err, stat) => {
- if (err) throw err;
- // use stat
+```js esm
+import { open } from 'fs/promises';
- // always close the file descriptor!
- fs.close(fd, (err) => {
- if (err) throw err;
- });
- });
-});
+let filehandle;
+try {
+ filehandle = await open('thefile.txt', 'r');
+} finally {
+ await filehandle?.close();
+}
```
-Most operating systems limit the number of file descriptors that may be open
-at any given time so it is critical to close the descriptor when operations
-are completed. Failure to do so will result in a memory leak that will
-eventually cause an application to crash.
-
-## Threadpool usage
-
-All file system APIs except `fs.FSWatcher()` and those that are explicitly
-synchronous use libuv's threadpool, which can have surprising and negative
-performance implications for some applications. See the
-[`UV_THREADPOOL_SIZE`][] documentation for more information.
-
-## Class: `fs.Dir`
+#### `filehandle.datasync()`
-A class representing a directory stream.
-
-Created by [`fs.opendir()`][], [`fs.opendirSync()`][], or
-[`fsPromises.opendir()`][].
+* Returns: {Promise} Fulfills with `undefined` upon success.
-```js
-const fs = require('fs');
+Forces all currently queued I/O operations associated with the file to the
+operating system's synchronized I/O completion state. Refer to the POSIX
+fdatasync(2) documentation for details.
-async function print(path) {
- const dir = await fs.promises.opendir(path);
- for await (const dirent of dir) {
- console.log(dirent.name);
- }
-}
-print('./').catch(console.error);
-```
+Unlike `filehandle.sync` this method does not flush modified metadata.
-### `dir.close()`
+#### `filehandle.fd`
-* Returns: {Promise}
-
-Asynchronously close the directory's underlying resource handle.
-Subsequent reads will result in errors.
-
-A `Promise` is returned that will be resolved after the resource has been
-closed.
+* {number} The numeric file descriptor managed by the {FileHandle} object.
-### `dir.close(callback)`
+#### `filehandle.read(buffer, offset, length, position)`
-* `callback` {Function}
- * `err` {Error}
+* `buffer` {Buffer|Uint8Array} A buffer that will be filled with the file
+ data read.
+* `offset` {integer} The location in the buffer at which to start filling.
+ **Default:** `0`
+* `length` {integer} The number of bytes to read. **Default:** `buffer.length`
+* `position` {integer} The location where to begin reading data from the
+ file. If `null`, data will be read from the current file position, and
+ the position will be updated. If `position` is an integer, the current
+ file position will remain unchanged.
+* Returns: {Promise} Fulfills upon success with an object with two properties:
+ * `bytesRead` {integer} The number of bytes read
+ * `buffer` {Buffer|Uint8Array} A reference to the passed in `buffer` argument.
-Asynchronously close the directory's underlying resource handle.
-Subsequent reads will result in errors.
+Reads data from the file and stores that in the given buffer.
-The `callback` will be called after the resource handle has been closed.
+If the file is not modified concurrently, the end-of-file is reached when the
+number of bytes read is zero.
-### `dir.closeSync()`
+#### `filehandle.read(options)`
+* `options` {Object}
+ * `buffer` {Buffer|Uint8Array} A buffer that will be filled with the file
+ data read. **Default:** `Buffer.alloc(16384)`
+ * `offset` {integer} The location in the buffer at which to start filling.
+ **Default:** `0`
+ * `length` {integer} The number of bytes to read. **Default:** `buffer.length`
+ * `position` {integer} The location where to begin reading data from the
+ file. If `null`, data will be read from the current file position, and
+ the position will be updated. If `position` is an integer, the current
+ file position will remain unchanged. **Default:**: `null`
+* Returns: {Promise} Fulfills upon success with an object with two properties:
+ * `bytesRead` {integer} The number of bytes read
+ * `buffer` {Buffer|Uint8Array} A reference to the passed in `buffer`
+ argument.
+
+Reads data from the file and stores that in the given buffer.
-Synchronously close the directory's underlying resource handle.
-Subsequent reads will result in errors.
+If the file is not modified concurrently, the end-of-file is reached when the
+number of bytes read is zero.
-### `dir.path`
+#### `filehandle.readFile(options)`
-* {string}
-
-The read-only path of this directory as was provided to [`fs.opendir()`][],
-[`fs.opendirSync()`][], or [`fsPromises.opendir()`][].
-
-### `dir.read()`
-
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `null`
+ * `signal` {AbortSignal} allows aborting an in-progress readFile
+* Returns: {Promise} Fulfills upon a successful read with the contents of the
+ file. If no encoding is specified (using `options.encoding`), the data is
+ returned as a {Buffer} object. Otherwise, the data will be a string.
-* Returns: {Promise} containing {fs.Dirent|null}
+Asynchronously reads the entire contents of a file.
-Asynchronously read the next directory entry via readdir(3) as an
-[`fs.Dirent`][].
+If `options` is a string, then it specifies the `encoding`.
-After the read is completed, a `Promise` is returned that will be resolved with
-an [`fs.Dirent`][], or `null` if there are no more directory entries to read.
+The {FileHandle} has to support reading.
-Directory entries returned by this function are in no particular order as
-provided by the operating system's underlying directory mechanisms.
-Entries added or removed while iterating over the directory might not be
-included in the iteration results.
+If one or more `filehandle.read()` calls are made on a file handle and then a
+`filehandle.readFile()` call is made, the data will be read from the current
+position till the end of the file. It doesn't always read from the beginning
+of the file.
-### `dir.read(callback)`
+#### `filehandle.readv(buffers[, position])`
-* `callback` {Function}
- * `err` {Error}
- * `dirent` {fs.Dirent|null}
-
-Asynchronously read the next directory entry via readdir(3) as an
-[`fs.Dirent`][].
-
-After the read is completed, the `callback` will be called with an
-[`fs.Dirent`][], or `null` if there are no more directory entries to read.
+* `buffers` {Buffer[]|TypedArray[]|DataView[]}
+* `position` {integer} The offset from the beginning of the file where the data
+ should be read from. If `position` is not a `number`, the data will be read
+ from the current position.
+* Returns: {Promise} Fulfills upon success an object containing two properties:
+ * `bytesRead` {integer} the number of bytes read
+ * `buffers` {Buffer[]|TypedArray[]|DataView[]} property containing
+ a reference to the `buffers` input.
-Directory entries returned by this function are in no particular order as
-provided by the operating system's underlying directory mechanisms.
-Entries added or removed while iterating over the directory might not be
-included in the iteration results.
+Read from a file and write to an array of {ArrayBufferView}s
-### `dir.readSync()`
+#### `filehandle.stat([options])`
-* Returns: {fs.Dirent|null}
+* `options` {Object}
+ * `bigint` {boolean} Whether the numeric values in the returned
+ {fs.Stats} object should be `bigint`. **Default:** `false`.
+* Returns: {Promise} Fulfills with an {fs.Stats} for the file.
-Synchronously read the next directory entry via readdir(3) as an
-[`fs.Dirent`][].
+#### `filehandle.sync()`
+
-If there are no more directory entries to read, `null` will be returned.
+* Returns: {Promise} Fufills with `undefined` upon success.
-Directory entries returned by this function are in no particular order as
-provided by the operating system's underlying directory mechanisms.
-Entries added or removed while iterating over the directory might not be
-included in the iteration results.
+Request that all data for the open file descriptor is flushed to the storage
+device. The specific implementation is operating system and device specific.
+Refer to the POSIX fsync(2) documentation for more detail.
-### `dir[Symbol.asyncIterator]()`
+#### `filehandle.truncate(len)`
-* Returns: {AsyncIterator} of {fs.Dirent}
-
-Asynchronously iterates over the directory via readdir(3) until all entries have
-been read.
+* `len` {integer} **Default:** `0`
+* Returns: {Promise} Fulfills with `undefined` upo nsuccess.
-Entries returned by the async iterator are always an [`fs.Dirent`][].
-The `null` case from `dir.read()` is handled internally.
+Truncates the file.
-See [`fs.Dir`][] for an example.
+If the file was larger than `len` bytes, only the first `len` bytes will be
+retained in the file.
-Directory entries returned by this iterator are in no particular order as
-provided by the operating system's underlying directory mechanisms.
-Entries added or removed while iterating over the directory might not be
-included in the iteration results.
+The following example retains only the first four bytes of the file:
-## Class: `fs.Dirent`
-
+```js esm
+import { open } from 'fs/promises';
-A representation of a directory entry, which can be a file or a subdirectory
-within the directory, as returned by reading from an [`fs.Dir`][]. The
-directory entry is a combination of the file name and file type pairs.
+let filehandle = null;
+try {
+ filehandle = await open('temp.txt', 'r+');
+ await filehandle.truncate(4);
+} finally {
+ filehandle?.close();
+}
+```
-Additionally, when [`fs.readdir()`][] or [`fs.readdirSync()`][] is called with
-the `withFileTypes` option set to `true`, the resulting array is filled with
-`fs.Dirent` objects, rather than strings or `Buffers`.
+If the file previously was shorter than `len` bytes, it is extended, and the
+extended part is filled with null bytes (`'\0'`):
-### `dirent.isBlockDevice()`
+#### `filehandle.utimes(atime, mtime)`
-* Returns: {boolean}
+* `atime` {number|string|Date}
+* `mtime` {number|string|Date}
+* Returns: {Promise}
+
+Change the file system timestamps of the object referenced by the {FileHandle}
+then resolves the promise with no arguments upon success.
-Returns `true` if the `fs.Dirent` object describes a block device.
+This function does not work on AIX versions before 7.1, it will reject the
+promise with an error using code `UV_ENOSYS`.
-### `dirent.isCharacterDevice()`
+#### `filehandle.write(buffer[, offset[, length[, position]]])`
-* Returns: {boolean}
+* `buffer` {Buffer|Uint8Array|string|Object}
+* `offset` {integer} The start position from within `buffer` where the data
+ to write begins.
+* `length` {integer} The number of bytes from `buffer` to write.
+* `position` {integer} The offset from the beginning of the file where the
+ data from `buffer` should be written. If `position` is not a `number`,
+ the data will be written at the current position. See the POSIX pwrite(2)
+ documentation for more detail.
+* Returns: {Promise}
-Returns `true` if the `fs.Dirent` object describes a character device.
+Write `buffer` to the file.
-### `dirent.isDirectory()`
-
+The promise is resolved with an object containing two properties:
-* Returns: {boolean}
+* `bytesWritten` {integer} the number of bytes written
+* `buffer` {Buffer|Uint8Array|string|Object} a reference to the `buffer`
+ written.
-Returns `true` if the `fs.Dirent` object describes a file system
-directory.
+It is unsafe to use `filehandle.write()` multiple times on the same file
+without waiting for the promise to be resolved (or rejected). For this
+scenario, use [`fs.createWriteStream()`][].
+
+On Linux, positional writes do not work when the file is opened in append mode.
+The kernel ignores the position argument and always appends the data to
+the end of the file.
-### `dirent.isFIFO()`
+#### `filehandle.write(string[, position[, encoding]])`
-* Returns: {boolean}
+* `string` {string|Object}
+* `position` {integer} The offset from the beginning of the file where the
+ data from `string` should be written. If `position` is not a `number` the
+ data will be written at the current position. See the POSIX pwrite(2)
+ documentation for more detail.
+* `encoding` {string} The expected string encoding. **Default:** `'utf8'`
+* Returns: {Promise}
-Returns `true` if the `fs.Dirent` object describes a first-in-first-out
-(FIFO) pipe.
+Write `string` to the file. If `string` is not a string, or an object with an
+own `toString` function property, the promise is rejected with an error.
-### `dirent.isFile()`
-
+The promise is resolved with an object containing two properties:
-* Returns: {boolean}
+* `bytesWritten` {integer} the number of bytes written
+* `buffer` {string|Object} a reference to the `string` written.
+
+It is unsafe to use `filehandle.write()` multiple times on the same file
+without waiting for the promise to be resolved (or rejected). For this
+scenario, use [`fs.createWriteStream()`][].
-Returns `true` if the `fs.Dirent` object describes a regular file.
+On Linux, positional writes do not work when the file is opened in append mode.
+The kernel ignores the position argument and always appends the data to
+the end of the file.
-### `dirent.isSocket()`
+#### `filehandle.writeFile(data, options)`
-* Returns: {boolean}
+* `data` {string|Buffer|Uint8Array|Object}
+* `options` {Object|string}
+ * `encoding` {string|null} The expected character encoding when `data` is a
+ string. **Default:** `'utf8'`
+* Returns: {Promise}
-Returns `true` if the `fs.Dirent` object describes a socket.
+Asynchronously writes data to a file, replacing the file if it already exists.
+`data` can be a string, a buffer, or an object with an own `toString` function
+property. The promise is resolved with no arguments upon success.
-### `dirent.isSymbolicLink()`
-
+If `options` is a string, then it specifies the `encoding`.
-* Returns: {boolean}
+The {FileHandle} has to support writing.
+
+It is unsafe to use `filehandle.writeFile()` multiple times on the same file
+without waiting for the promise to be resolved (or rejected).
-Returns `true` if the `fs.Dirent` object describes a symbolic link.
+If one or more `filehandle.write()` calls are made on a file handle and then a
+`filehandle.writeFile()` call is made, the data will be written from the
+current position till the end of the file. It doesn't always write from the
+beginning of the file.
-### `dirent.name`
+#### `filehandle.writev(buffers[, position])`
-* {string|Buffer}
+* `buffers` {Buffer[]|TypedArray[]|DataView[]}
+* `position` {integer} The offset from the beginning of the file where the
+ data from `buffers` should be written. If `position` is not a `number`,
+ the data will be written at the current position.
+* Returns: {Promise}
-The file name that this `fs.Dirent` object refers to. The type of this
-value is determined by the `options.encoding` passed to [`fs.readdir()`][] or
-[`fs.readdirSync()`][].
+Write an array of {ArrayBufferView}s to the file.
-## Class: `fs.FSWatcher`
-
+The promise is resolved with an object containing a two properties:
-* Extends {EventEmitter}
+* `bytesWritten` {integer} the number of bytes written
+* `buffers` {Buffer[]|TypedArray[]|DataView[]} a reference to the `buffers`
+ input.
-A successful call to [`fs.watch()`][] method will return a new `fs.FSWatcher`
-object.
+It is unsafe to call `writev()` multiple times on the same file without waiting
+for the promise to be resolved (or rejected).
-All `fs.FSWatcher` objects emit a `'change'` event whenever a specific watched
-file is modified.
+On Linux, positional writes don't work when the file is opened in append mode.
+The kernel ignores the position argument and always appends the data to
+the end of the file.
-### Event: `'change'`
+### `fsPromises.access(path[, mode])`
-* `eventType` {string} The type of change event that has occurred
-* `filename` {string|Buffer} The filename that changed (if relevant/available)
+* `path` {string|Buffer|URL}
+* `mode` {integer} **Default:** `fs.constants.F_OK`
+* Returns: {Promise} Fulfills with `undefined` upon success.
-Emitted when something changes in a watched directory or file.
-See more details in [`fs.watch()`][].
+Tests a user's permissions for the file or directory specified by `path`.
+The `mode` argument is an optional integer that specifies the accessibility
+checks to be performed. Check [File access constants][] for possible values
+of `mode`. It is possible to create a mask consisting of the bitwise OR of
+two or more values (e.g. `fs.constants.W_OK | fs.constants.R_OK`).
-The `filename` argument may not be provided depending on operating system
-support. If `filename` is provided, it will be provided as a `Buffer` if
-`fs.watch()` is called with its `encoding` option set to `'buffer'`, otherwise
-`filename` will be a UTF-8 string.
+If the accessibility check is successful, the promise is resolved with no
+value. If any of the accessibility checks fail, the promise is rejected
+with an {Error} object. The following example checks if the file
+`/etc/passwd` can be read and written by the current process.
-```js
-// Example when handled through fs.watch() listener
-fs.watch('./tmp', { encoding: 'buffer' }, (eventType, filename) => {
- if (filename) {
- console.log(filename);
- // Prints:
- }
-});
+```js esm
+import { access } from 'fs/promises';
+import { constants } from 'fs';
+
+try {
+ await access('/etc/passwd', constants.R_OK | constants.W_OK);
+ console.log('can access');
+} catch {
+ console.error('cannot access');
+}
```
-### Event: `'close'`
+Using `fsPromises.access()` to check for the accessibility of a file before
+calling `fsPromises.open()` is not recommended. Doing so introduces a race
+condition, since other processes may change the file's state between the two
+calls. Instead, user code should open/read/write the file directly and handle
+the error raised if the file is not accessible.
+
+### `fsPromises.appendFile(path, data[, options])`
-Emitted when the watcher stops watching for changes. The closed
-`fs.FSWatcher` object is no longer usable in the event handler.
+* `path` {string|Buffer|URL|FileHandle} filename or {FileHandle}
+* `data` {string|Buffer}
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `'utf8'`
+ * `mode` {integer} **Default:** `0o666`
+ * `flag` {string} See [support of file system `flags`][]. **Default:** `'a'`.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-### Event: `'error'`
-
+Asynchronously append data to a file, creating the file if it does not yet
+exist. `data` can be a string or a {Buffer}.
-* `error` {Error}
+If `options` is a string, then it specifies the `encoding`.
-Emitted when an error occurs while watching the file. The errored
-`fs.FSWatcher` object is no longer usable in the event handler.
+The `path` may be specified as a {FileHandle} that has been opened
+for appending (using `fsPromises.open()`).
-### `watcher.close()`
+### `fsPromises.chmod(path, mode)`
-Stop watching for changes on the given `fs.FSWatcher`. Once stopped, the
-`fs.FSWatcher` object is no longer usable.
+* `path` {string|Buffer|URL}
+* `mode` {string|integer}
+* Returns: {Promise} Fulfills with `undefined` upon success.
+
+Changes the permissions of a file.
-### `watcher.ref()`
+### `fsPromises.chown(path, uid, gid)`
-* Returns: {fs.FSWatcher}
-
-When called, requests that the Node.js event loop *not* exit so long as the
-`FSWatcher` is active. Calling `watcher.ref()` multiple times will have
-no effect.
+* `path` {string|Buffer|URL}
+* `uid` {integer}
+* `gid` {integer}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-By default, all `FSWatcher` objects are "ref'ed", making it normally
-unnecessary to call `watcher.ref()` unless `watcher.unref()` had been
-called previously.
+Changes the ownership of a file.
-### `watcher.unref()`
+### `fsPromises.copyFile(src, dest[, mode])`
-* Returns: {fs.FSWatcher}
+* `src` {string|Buffer|URL} source filename to copy
+* `dest` {string|Buffer|URL} destination filename of the copy operation
+* `mode` {integer} Optional modifiers that specify the behavior of the copy
+ operation. It is possible to create a mask consisting of the bitwise OR of
+ two or more values (e.g.
+ `fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`)
+ **Default:** `0`.
+ * `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest`
+ already exists.
+ * `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create
+ a copy-on-write reflink. If the platform does not support copy-on-write,
+ then a fallback copy mechanism is used.
+ * `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
+ create a copy-on-write reflink. If the platform does not support
+ copy-on-write, then the operation will fail.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-When called, the active `FSWatcher` object will not require the Node.js
-event loop to remain active. If there is no other activity keeping the
-event loop running, the process may exit before the `FSWatcher` object's
-callback is invoked. Calling `watcher.unref()` multiple times will have
-no effect.
+Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
+already exists.
-## Class: `fs.StatWatcher`
-
+No guarantees are made about the atomicity of the copy operation. If an
+error occurs after the destination file has been opened for writing, an attempt
+will be made to remove the destination.
-* Extends {EventEmitter}
+```js esm
+import { constants } from 'fs';
+import { copyFile } from 'fs/promises';
-A successful call to `fs.watchFile()` method will return a new `fs.StatWatcher`
-object.
+try {
+ await copyFile('source.txt', 'destination.txt');
+ console.log('source.txt was copied to destination.txt');
+} catch {
+ console.log('The file could not be copied');
+}
+
+// By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
+try {
+ await copyFile('source.txt', 'destination.txt', constants.COPYFILE_EXCL);
+ console.log('source.txt was copied to destination.txt');
+} catch {
+ console.log('The file could not be copied');
+}
+```
-### `watcher.ref()`
+### `fsPromises.lchmod(path, mode)`
-* Returns: {fs.StatWatcher}
+* `path` {string|Buffer|URL}
+* `mode` {integer}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-When called, requests that the Node.js event loop *not* exit so long as the
-`StatWatcher` is active. Calling `watcher.ref()` multiple times will have
-no effect.
+Changes the permissions on a symbolic link.
-By default, all `StatWatcher` objects are "ref'ed", making it normally
-unnecessary to call `watcher.ref()` unless `watcher.unref()` had been
-called previously.
+This method is only implemented on macOS.
-### `watcher.unref()`
+### `fsPromises.lchown(path, uid, gid)`
-* Returns: {fs.StatWatcher}
+* `path` {string|Buffer|URL}
+* `uid` {integer}
+* `gid` {integer}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-When called, the active `StatWatcher` object will not require the Node.js
-event loop to remain active. If there is no other activity keeping the
-event loop running, the process may exit before the `StatWatcher` object's
-callback is invoked. Calling `watcher.unref()` multiple times will have
-no effect.
+Changes the ownership on a symbolic link.
-## Class: `fs.ReadStream`
+### `fsPromises.lutimes(path, atime, mtime)`
-* Extends: {stream.Readable}
+* `path` {string|Buffer|URL}
+* `atime` {number|string|Date}
+* `mtime` {number|string|Date}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-Instances of `fs.ReadStream` are created and returned using the
-[`fs.createReadStream()`][] function.
+Changes the access and modification times of a file in the same way as
+[`fsPromises.utimes()`][], with the difference that if the path refers to a
+symbolic link, then the link is not dereferenced: instead, the timestamps of
+the symbolic link itself are changed.
-### Event: `'close'`
+### `fsPromises.link(existingPath, newPath)`
-Emitted when the `fs.ReadStream`'s underlying file descriptor has been closed.
+* `existingPath` {string|Buffer|URL}
+* `newPath` {string|Buffer|URL}
+* Returns: {Promise} Fulfills with `undefined` upon success.
+
+Creates a new link from the `existingPath` to the `newPath`. See the POSIX
+link(2) documentation for more detail.
-### Event: `'open'`
+### `fsPromises.lstat(path[, options])`
-* `fd` {integer} Integer file descriptor used by the `ReadStream`.
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `bigint` {boolean} Whether the numeric values in the returned
+ {fs.Stats} object should be `bigint`. **Default:** `false`.
+* Returns: {Promise} Fulfills with the {fs.Stats} object for the given
+ symbolic link `path`.
-Emitted when the `fs.ReadStream`'s file descriptor has been opened.
+Equivalent to `fsPromises.stats()` when `path` refers to a symbolic link.
+Refer to the POSIX lstat(2) document for more detail.
-### Event: `'ready'`
+### `fsPromises.mkdir(path[, options])`
-Emitted when the `fs.ReadStream` is ready to be used.
+* `path` {string|Buffer|URL}
+* `options` {Object|integer}
+ * `recursive` {boolean} **Default:** `false`
+ * `mode` {string|integer} Not supported on Windows. **Default:** `0o777`.
+* Returns: {Promise} Upon success, fulfills with `undefined` if `recursive`
+ is `false`, or the first directory path created if `recursive` is `true`.
-Fires immediately after `'open'`.
+Asynchronously creates a directory.
-### `readStream.bytesRead`
+The optional `options` argument can be an integer specifying `mode` (permission
+and sticky bits), or an object with a `mode` property and a `recursive`
+property indicating whether parent directories should be created. Calling
+`fsPromises.mkdir()` when `path` is a directory that exists results in a
+rejection only when `recursive` is false.
+
+### `fsPromises.mkdtemp(prefix[, options])`
-* {number}
+* `prefix` {string}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {Promise} Fulfills with a string containing the filesystem path
+ of the newly created temporary directory.
-The number of bytes that have been read so far.
+Creates a unique temporary directory. A unique directory name is generated by
+appending six random characters to the end of the provided `prefix`. Due to
+platform inconsistencies, avoid trailing `X` characters in `prefix`. Some
+platforms, notably the BSDs, can return more than six random characters, and
+replace trailing `X` characters in `prefix` with random characters.
-### `readStream.path`
-
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use.
-* {string|Buffer}
+```js esm
+import { mkdtemp } from 'fs/promises';
-The path to the file the stream is reading from as specified in the first
-argument to `fs.createReadStream()`. If `path` is passed as a string, then
-`readStream.path` will be a string. If `path` is passed as a `Buffer`, then
-`readStream.path` will be a `Buffer`.
+try {
+ await mkdtemp(path.join(os.tmpdir(), 'foo-'));
+} catch (err) {
+ console.error(err);
+}
+```
+
+The `fsPromises.mkdtemp()` method will append the six randomly selected
+characters directly to the `prefix` string. For instance, given a directory
+`/tmp`, if the intention is to create a temporary directory *within* `/tmp`, the
+`prefix` must end with a trailing platform-specific path separator
+(`require('path').sep`).
-### `readStream.pending`
+### `fsPromises.open(path, flags[, mode])`
-* {boolean}
+* `path` {string|Buffer|URL}
+* `flags` {string|number} See [support of file system `flags`][].
+ **Default:** `'r'`.
+* `mode` {string|integer} Sets the file mode (permission and sticky bits)
+ if the file is created. **Default:** `0o666` (readable and writable)
+* Returns: {Promise} Fullfils with a {FileHandle} object.
-This property is `true` if the underlying file has not been opened yet,
-i.e. before the `'ready'` event is emitted.
+Opens a {FileHandle}.
+
+Refer to the POSIX open(2) documentation for more detail.
-## Class: `fs.Stats`
+Some characters (`< > : " / \ | ? *`) are reserved under Windows as documented
+by [Naming Files, Paths, and Namespaces][]. Under NTFS, if the filename contains
+a colon, Node.js will open a file system stream, as described by
+[this MSDN page][MSDN-Using-Streams].
+
+### `fsPromises.opendir(path[, options])`
-A `fs.Stats` object provides information about a file.
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `encoding` {string|null} **Default:** `'utf8'`
+ * `bufferSize` {number} Number of directory entries that are buffered
+ internally when reading from the directory. Higher values lead to better
+ performance but higher memory usage. **Default:** `32`
+* Returns: {Promise} Fulfills with an {fs.Dir}.
-Objects returned from [`fs.stat()`][], [`fs.lstat()`][] and [`fs.fstat()`][] and
-their synchronous counterparts are of this type.
-If `bigint` in the `options` passed to those methods is true, the numeric values
-will be `bigint` instead of `number`, and the object will contain additional
-nanosecond-precision properties suffixed with `Ns`.
+Asynchronously open a directory for iterative scanning. See the POSIX
+opendir(3) documentation for more detail.
-```console
-Stats {
- dev: 2114,
- ino: 48064969,
- mode: 33188,
- nlink: 1,
- uid: 85,
- gid: 100,
- rdev: 0,
- size: 527,
- blksize: 4096,
- blocks: 8,
- atimeMs: 1318289051000.1,
- mtimeMs: 1318289051000.1,
- ctimeMs: 1318289051000.1,
- birthtimeMs: 1318289051000.1,
- atime: Mon, 10 Oct 2011 23:24:11 GMT,
- mtime: Mon, 10 Oct 2011 23:24:11 GMT,
- ctime: Mon, 10 Oct 2011 23:24:11 GMT,
- birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
-```
+Creates an {fs.Dir}, which contains all further functions for reading from
+and cleaning up the directory.
-`bigint` version:
+The `encoding` option sets the encoding for the `path` while opening the
+directory and subsequent read operations.
-```console
-BigIntStats {
- dev: 2114n,
- ino: 48064969n,
- mode: 33188n,
- nlink: 1n,
- uid: 85n,
- gid: 100n,
- rdev: 0n,
- size: 527n,
- blksize: 4096n,
- blocks: 8n,
- atimeMs: 1318289051000n,
- mtimeMs: 1318289051000n,
- ctimeMs: 1318289051000n,
- birthtimeMs: 1318289051000n,
- atimeNs: 1318289051000000000n,
- mtimeNs: 1318289051000000000n,
- ctimeNs: 1318289051000000000n,
- birthtimeNs: 1318289051000000000n,
- atime: Mon, 10 Oct 2011 23:24:11 GMT,
- mtime: Mon, 10 Oct 2011 23:24:11 GMT,
- ctime: Mon, 10 Oct 2011 23:24:11 GMT,
- birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
+Example using async iteration:
+
+```js esm
+import { opendir } from 'fs/promises';
+
+try {
+ const dir = await opendir('./');
+ for await (const dirent of dir)
+ console.log(dirent.name);
+} catch (err) {
+ console.error(err);
+}
```
-### `stats.isBlockDevice()`
+### `fsPromises.readdir(path[, options])`
-* Returns: {boolean}
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+ * `withFileTypes` {boolean} **Default:** `false`
+* Returns: {Promise} Fulfills with an array of the names of the files in
+ the directory excluding `'.'` and `'..'`.
-Returns `true` if the `fs.Stats` object describes a block device.
+Reads the contents of a directory.
-### `stats.isCharacterDevice()`
-
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use for
+the filenames. If the `encoding` is set to `'buffer'`, the filenames returned
+will be passed as {Buffer} objects.
-* Returns: {boolean}
+If `options.withFileTypes` is set to `true`, the resolved array will contain
+{fs.Dirent} objects.
-Returns `true` if the `fs.Stats` object describes a character device.
+```js esm
+import { readdir } from 'fs/promises';
-### `stats.isDirectory()`
-
-
-* Returns: {boolean}
-
-Returns `true` if the `fs.Stats` object describes a file system directory.
-
-If the `fs.Stats` object was obtained from [`fs.lstat()`][], this method will
-always return `false`. This is because [`fs.lstat()`][] returns information
-about a symbolic link itself and not the path it resolves to.
-
-### `stats.isFIFO()`
-
-
-* Returns: {boolean}
-
-Returns `true` if the `fs.Stats` object describes a first-in-first-out (FIFO)
-pipe.
-
-### `stats.isFile()`
-
-
-* Returns: {boolean}
-
-Returns `true` if the `fs.Stats` object describes a regular file.
-
-### `stats.isSocket()`
-
-
-* Returns: {boolean}
-
-Returns `true` if the `fs.Stats` object describes a socket.
+try {
+ const files = await readdir(path);
+ for await (const file of files)
+ console.log(file);
+} catch (err) {
+ console.error(err);
+}
+```
-### `stats.isSymbolicLink()`
+### `fsPromises.readFile(path[, options])`
-* Returns: {boolean}
-
-Returns `true` if the `fs.Stats` object describes a symbolic link.
-
-This method is only valid when using [`fs.lstat()`][].
-
-### `stats.dev`
-
-* {number|bigint}
+* `path` {string|Buffer|URL|FileHandle} filename or `FileHandle`
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `null`
+ * `flag` {string} See [support of file system `flags`][]. **Default:** `'r'`.
+ * `signal` {AbortSignal} allows aborting an in-progress readFile
+* Returns: {Promise} Fulfills with the contents of the file.
-The numeric identifier of the device containing the file.
+Asynchronously reads the entire contents of a file.
-### `stats.ino`
+If no encoding is specified (using `options.encoding`), the data is returned
+as a {Buffer} object. Otherwise, the data will be a string.
-* {number|bigint}
+If `options` is a string, then it specifies the encoding.
-The file system specific "Inode" number for the file.
+When the `path` is a directory, the behavior of `fsPromises.readFile()` is
+platform-specific. On macOS, Linux, and Windows, the promise will be rejected
+with an error. On FreeBSD, a representation of the directory's contents will be
+returned.
-### `stats.mode`
+It is possible to abort an ongoing `readFile` using an {AbortSignal}. If a
+request is aborted the promise returned is rejected with an `AbortError`:
-* {number|bigint}
+```js esm
+import { readFile } from 'fs/promises';
-A bit-field describing the file type and mode.
+try {
+ const controller = new AbortController();
+ const signal = controller.signal;
+ readFile(fileName, { signal });
-### `stats.nlink`
+ // Abort the request
+ controller.abort();
+} catch (err) {
+ console.error(err);
+}
+```
-* {number|bigint}
+Aborting an ongoing request does not abort individual operating
+system requests but rather the internal buffering `fs.readFile` performs.
-The number of hard-links that exist for the file.
+Any specified {FileHandle} has to support reading.
-### `stats.uid`
+### `fsPromises.readlink(path[, options])`
+
-* {number|bigint}
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {Promise} Fulfills with the `linkString` upon success.
-The numeric user identifier of the user that owns the file (POSIX).
+Reads the contents of the symbolic link refered to by `path`. See the POSIX
+readlink(2) documentation for more etail. The promise is resolved with the
+`linkString` upon success.
-### `stats.gid`
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use for
+the link path returned. If the `encoding` is set to `'buffer'`, the link path
+returned will be passed as a {Buffer} object.
-* {number|bigint}
+### `fsPromises.realpath(path[, options])`
+
-The numeric group identifier of the group that owns the file (POSIX).
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {Promise} Fulfills with the resolved path upon success.
-### `stats.rdev`
+Determines the actual location of `path` using the same semantics as the
+`fs.realpath.native()` function.
-* {number|bigint}
+Only paths that can be converted to UTF8 strings are supported.
-A numeric device identifier if the file represents a device.
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use for
+the path. If the `encoding` is set to `'buffer'`, the path returned will be
+passed as a {Buffer} object.
-### `stats.size`
+On Linux, when Node.js is linked against musl libc, the procfs file system must
+be mounted on `/proc` in order for this function to work. Glibc does not have
+this restriction.
-* {number|bigint}
+### `fsPromises.rename(oldPath, newPath)`
+
-The size of the file in bytes.
+* `oldPath` {string|Buffer|URL}
+* `newPath` {string|Buffer|URL}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-### `stats.blksize`
+Renames `oldPath` to `newPath`.
-* {number|bigint}
+### `fsPromises.rmdir(path[, options])`
+
-The file system block size for i/o operations.
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
+ `EPERM` error is encountered, Node.js retries the operation with a linear
+ backoff wait of `retryDelay` milliseconds longer on each try. This option
+ represents the number of retries. This option is ignored if the `recursive`
+ option is not `true`. **Default:** `0`.
+ * `recursive` {boolean} If `true`, perform a recursive directory removal. In
+ recursive mode, errors are not reported if `path` does not exist, and
+ operations are retried on failure. **Default:** `false`.
+ * `retryDelay` {integer} The amount of time in milliseconds to wait between
+ retries. This option is ignored if the `recursive` option is not `true`.
+ **Default:** `100`.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-### `stats.blocks`
+Removes the directory identified by `path`.
-* {number|bigint}
+Using `fsPromises.rmdir()` on a file (not a directory) results in the
+promise being rejected with an `ENOENT` error on Windows and an `ENOTDIR`
+error on POSIX.
-The number of blocks allocated for this file.
+Setting `recursive` to `true` results in behavior similar to the Unix command
+`rm -rf`: an error will not be raised for paths that do not exist, and paths
+that represent files will be deleted. The permissive behavior of the
+`recursive` option is deprecated, `ENOTDIR` and `ENOENT` will be thrown in
+the future.
-### `stats.atimeMs`
+### `fsPromises.rm(path[, options])`
-* {number|bigint}
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `force` {boolean} When `true`, exceptions will be ignored if `path` does
+ not exist. **Default:** `false`.
+ * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
+ `EPERM` error is encountered, Node.js will retry the operation with a linear
+ backoff wait of `retryDelay` milliseconds longer on each try. This option
+ represents the number of retries. This option is ignored if the `recursive`
+ option is not `true`. **Default:** `0`.
+ * `recursive` {boolean} If `true`, perform a recursive directory removal. In
+ recursive mode operations are retried on failure. **Default:** `false`.
+ * `retryDelay` {integer} The amount of time in milliseconds to wait between
+ retries. This option is ignored if the `recursive` option is not `true`.
+ **Default:** `100`.
+* Returns: {Promise} Fulfills with `undefined` upon success.
-The timestamp indicating the last time this file was accessed expressed in
-milliseconds since the POSIX Epoch.
+Removes files and directories (modeled on the standard POSIX `rm` utility).
-### `stats.mtimeMs`
+### `fsPromises.stat(path[, options])`
-* {number|bigint}
-
-The timestamp indicating the last time this file was modified expressed in
-milliseconds since the POSIX Epoch.
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `bigint` {boolean} Whether the numeric values in the returned
+ {fs.Stats} object should be `bigint`. **Default:** `false`.
+* Returns: {Promise} Fulfills with the {fs.Stats} object for the
+ given `path`.
-### `stats.ctimeMs`
+### `fsPromises.symlink(target, path[, type])`
-* {number|bigint}
+* `target` {string|Buffer|URL}
+* `path` {string|Buffer|URL}
+* `type` {string} **Default:** `'file'`
+* Returns: {Promise} Fulfills with `undefined` upon success.
-The timestamp indicating the last time the file status was changed expressed
-in milliseconds since the POSIX Epoch.
+Creates a symbolic link.
+
+The `type` argument is only used on Windows platforms and can be one of `'dir'`,
+`'file'`, or `'junction'`. Windows junction points require the destination path
+to be absolute. When using `'junction'`, the `target` argument will
+automatically be normalized to absolute path.
-### `stats.birthtimeMs`
+### `fsPromises.truncate(path[, len])`
-* {number|bigint}
+* `path` {string|Buffer|URL}
+* `len` {integer} **Default:** `0`
+* Returns: {Promise} Fulfills with `undefined` upon success.
-The timestamp indicating the creation time of this file expressed in
-milliseconds since the POSIX Epoch.
+Truncates (shortens or extends the length) of the content at `path` to `len`
+bytes.
-### `stats.atimeNs`
+### `fsPromises.unlink(path)`
-* {bigint}
+* `path` {string|Buffer|URL}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-Only present when `bigint: true` is passed into the method that generates
-the object.
-The timestamp indicating the last time this file was accessed expressed in
-nanoseconds since the POSIX Epoch.
+If `path` refers to a symbolic link, then the link is removed without affecting
+the file or directory to which that link refers. If the `path` refers to a file
+path that is not a symbolic link, the file is deleted. See the POSIX unlink(2)
+documentation for more detail.
-### `stats.mtimeNs`
+### `fsPromises.utimes(path, atime, mtime)`
-* {bigint}
-
-Only present when `bigint: true` is passed into the method that generates
-the object.
-The timestamp indicating the last time this file was modified expressed in
-nanoseconds since the POSIX Epoch.
+* `path` {string|Buffer|URL}
+* `atime` {number|string|Date}
+* `mtime` {number|string|Date}
+* Returns: {Promise} Fulfills with `undefined` upon success.
-### `stats.ctimeNs`
-
+Change the file system timestamps of the object referenced by `path`.
-* {bigint}
+The `atime` and `mtime` arguments follow these rules:
-Only present when `bigint: true` is passed into the method that generates
-the object.
-The timestamp indicating the last time the file status was changed expressed
-in nanoseconds since the POSIX Epoch.
+* Values can be either numbers representing Unix epoch time, `Date`s, or a
+ numeric string like `'123456789.0'`.
+* If the value can not be converted to a number, or is `NaN`, `Infinity` or
+ `-Infinity`, an `Error` will be thrown.
-### `stats.birthtimeNs`
+### `fsPromises.watch(filename[, options])`
-* {bigint}
-
-Only present when `bigint: true` is passed into the method that generates
-the object.
-The timestamp indicating the creation time of this file expressed in
-nanoseconds since the POSIX Epoch.
-
-### `stats.atime`
-
-
-* {Date}
-
-The timestamp indicating the last time this file was accessed.
-
-### `stats.mtime`
-
-
-* {Date}
-
-The timestamp indicating the last time this file was modified.
-
-### `stats.ctime`
-
-
-* {Date}
-
-The timestamp indicating the last time the file status was changed.
-
-### `stats.birthtime`
-
-
-* {Date}
-
-The timestamp indicating the creation time of this file.
-
-### Stat time values
-
-The `atimeMs`, `mtimeMs`, `ctimeMs`, `birthtimeMs` properties are
-numeric values that hold the corresponding times in milliseconds. Their
-precision is platform specific. When `bigint: true` is passed into the
-method that generates the object, the properties will be [bigints][],
-otherwise they will be [numbers][MDN-Number].
-
-The `atimeNs`, `mtimeNs`, `ctimeNs`, `birthtimeNs` properties are
-[bigints][] that hold the corresponding times in nanoseconds. They are
-only present when `bigint: true` is passed into the method that generates
-the object. Their precision is platform specific.
-
-`atime`, `mtime`, `ctime`, and `birthtime` are
-[`Date`][MDN-Date] object alternate representations of the various times. The
-`Date` and number values are not connected. Assigning a new number value, or
-mutating the `Date` value, will not be reflected in the corresponding alternate
-representation.
+* `filename` {string|Buffer|URL}
+* `options` {string|Object}
+ * `persistent` {boolean} Indicates whether the process should continue to run
+ as long as files are being watched. **Default:** `true`.
+ * `recursive` {boolean} Indicates whether all subdirectories should be
+ watched, or only the current directory. This applies when a directory is
+ specified, and only on supported platforms (See [caveats][]). **Default:**
+ `false`.
+ * `encoding` {string} Specifies the character encoding to be used for the
+ filename passed to the listener. **Default:** `'utf8'`.
+ * `signal` {AbortSignal} An {AbortSignal} used to signal when the watcher
+ should stop.
+* Returns: {AsyncIterator} of objects with the properties:
+ * `eventType` {string} The type of change
+ * `filename` {string|Buffer} The name of the file changed.
-The times in the stat object have the following semantics:
+Returns an async iterator that watches for changes on `filename`, where `filename`
+is either a file or a directory.
-* `atime` "Access Time": Time when file data last accessed. Changed
- by the mknod(2), utimes(2), and read(2) system calls.
-* `mtime` "Modified Time": Time when file data last modified.
- Changed by the mknod(2), utimes(2), and write(2) system calls.
-* `ctime` "Change Time": Time when file status was last changed
- (inode data modification). Changed by the chmod(2), chown(2),
- link(2), mknod(2), rename(2), unlink(2), utimes(2),
- read(2), and write(2) system calls.
-* `birthtime` "Birth Time": Time of file creation. Set once when the
- file is created. On filesystems where birthtime is not available,
- this field may instead hold either the `ctime` or
- `1970-01-01T00:00Z` (ie, Unix epoch timestamp `0`). This value may be greater
- than `atime` or `mtime` in this case. On Darwin and other FreeBSD variants,
- also set if the `atime` is explicitly set to an earlier value than the current
- `birthtime` using the utimes(2) system call.
+```js
+const { watch } = require('fs/promises');
-Prior to Node.js 0.12, the `ctime` held the `birthtime` on Windows systems. As
-of 0.12, `ctime` is not "creation time", and on Unix systems, it never was.
+const ac = new AbortController();
+const { signal } = ac;
+setTimeout(() => ac.abort(), 10000);
-## Class: `fs.WriteStream`
-
+(async () => {
+ try {
+ const watcher = watch(__filename, { signal });
+ for await (const event of watcher)
+ console.log(event);
+ } catch (err) {
+ if (err.name === 'AbortError')
+ return;
+ throw err;
+ }
+})();
+```
-* Extends {stream.Writable}
+On most platforms, `'rename'` is emitted whenever a filename appears or
+disappears in the directory.
-Instances of `fs.WriteStream` are created and returned using the
-[`fs.createWriteStream()`][] function.
+All the [caveats][] for `fs.watch()` also apply to `fsPromises.watch()`.
-### Event: `'close'`
+### `fsPromises.writeFile(file, data[, options])`
-Emitted when the `WriteStream`'s underlying file descriptor has been closed.
+* `file` {string|Buffer|URL|FileHandle} filename or `FileHandle`
+* `data` {string|Buffer|Uint8Array|Object}
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `'utf8'`
+ * `mode` {integer} **Default:** `0o666`
+ * `flag` {string} See [support of file system `flags`][]. **Default:** `'w'`.
+ * `signal` {AbortSignal} allows aborting an in-progress writeFile
+* Returns: {Promise} Fulfills with `undefined` upon success.
-### Event: `'open'`
-
+Asynchronously writes data to a file, replacing the file if it already exists.
+`data` can be a string, a {Buffer}, or an object with an own `toString` function
+property.
-* `fd` {integer} Integer file descriptor used by the `WriteStream`.
+The `encoding` option is ignored if `data` is a buffer.
-Emitted when the `WriteStream`'s file is opened.
+If `options` is a string, then it specifies the encoding.
-### Event: `'ready'`
-
+Any specified {FileHandle} has to support writing.
-Emitted when the `fs.WriteStream` is ready to be used.
+It is unsafe to use `fsPromises.writeFile()` multiple times on the same file
+without waiting for the promise to be settled.
-Fires immediately after `'open'`.
+Similarly to `fsPromises.readFile` - `fsPromises.writeFile` is a convenience
+method that performs multiple `write` calls internally to write the buffer
+passed to it. For performance sensitive code consider using
+[`fs.createWriteStream()`][].
-### `writeStream.bytesWritten`
-
+It is possible to use an {AbortSignal} to cancel an `fsPromises.writeFile()`.
+Cancelation is "best effort", and some amount of data is likely still
+to be written.
-The number of bytes written so far. Does not include data that is still queued
-for writing.
+```js esm
+import { writeFile } from 'fs/promises';
-### `writeStream.path`
-
+try {
+ const controller = new AbortController();
+ const { signal } = controller;
+ const data = new Uint8Array(Buffer.from('Hello Node.js'));
+ writeFile('message.txt', data, { signal });
+ controller.abort();
+} catch (err) {
+ // When a request is aborted - err is an AbortError
+ console.error(err);
+}
+```
-The path to the file the stream is writing to as specified in the first
-argument to [`fs.createWriteStream()`][]. If `path` is passed as a string, then
-`writeStream.path` will be a string. If `path` is passed as a `Buffer`, then
-`writeStream.path` will be a `Buffer`.
+Aborting an ongoing request does not abort individual operating
+system requests but rather the internal buffering `fs.writeFile` performs.
-### `writeStream.pending`
-
+## Callback API
-* {boolean}
+The callback APIs perform all operations asynchronously, without blocking the
+event loop, then invoke a callback function upon completion or error.
-This property is `true` if the underlying file has not been opened yet,
-i.e. before the `'ready'` event is emitted.
+The callback APIs use the underlying Node.js threadpool to perform file
+system operations off the event loop thread. These operations are not
+synchronized or threadsafe. Care must be taken when performing multiple
+concurrent modifications on the same file or data corruption may occur.
-## `fs.access(path[, mode], callback)`
+### `fs.access(path[, mode], callback)`
-
-* `path` {string|Buffer|URL}
-* `mode` {integer} **Default:** `fs.constants.F_OK`
-
-Synchronously tests a user's permissions for the file or directory specified
-by `path`. The `mode` argument is an optional integer that specifies the
-accessibility checks to be performed. Check [File access constants][] for
-possible values of `mode`. It is possible to create a mask consisting of
-the bitwise OR of two or more values
-(e.g. `fs.constants.W_OK | fs.constants.R_OK`).
-
-If any of the accessibility checks fail, an `Error` will be thrown. Otherwise,
-the method will return `undefined`.
-
-```js
-try {
- fs.accessSync('etc/passwd', fs.constants.R_OK | fs.constants.W_OK);
- console.log('can read/write');
-} catch (err) {
- console.error('no access!');
-}
-```
-
-## `fs.appendFile(path, data[, options], callback)`
+### `fs.appendFile(path, data[, options], callback)`
-
-* `path` {string|Buffer|URL|number} filename or file descriptor
-* `data` {string|Buffer}
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `'utf8'`
- * `mode` {integer} **Default:** `0o666`
- * `flag` {string} See [support of file system `flags`][]. **Default:** `'a'`.
-
-Synchronously append data to a file, creating the file if it does not yet
-exist. `data` can be a string or a [`Buffer`][].
-
-```js
-try {
- fs.appendFileSync('message.txt', 'data to append');
- console.log('The "data to append" was appended to file!');
-} catch (err) {
- /* Handle the error */
-}
-```
-
-If `options` is a string, then it specifies the encoding:
-
-```js
-fs.appendFileSync('message.txt', 'data to append', 'utf8');
-```
-
-The `path` may be specified as a numeric file descriptor that has been opened
-for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will
-not be closed automatically.
-
-```js
-let fd;
-
-try {
- fd = fs.openSync('message.txt', 'a');
- fs.appendFileSync(fd, 'data to append', 'utf8');
-} catch (err) {
- /* Handle the error */
-} finally {
- if (fd !== undefined)
- fs.closeSync(fd);
-}
-```
-
-## `fs.chmod(path, mode, callback)`
-
* `path` {string|Buffer|URL}
@@ -1482,16 +1605,18 @@ changes:
Asynchronously changes the permissions of a file. No arguments other than a
possible exception are given to the completion callback.
-See also: chmod(2).
+See the POSIX chmod(2) documentation for more detail.
-```js
-fs.chmod('my_file.txt', 0o775, (err) => {
+```js esm
+import { chmod } from 'fs';
+
+chmod('my_file.txt', 0o775, (err) => {
if (err) throw err;
console.log('The permissions for file "my_file.txt" have been changed!');
});
```
-### File modes
+#### File modes
The `mode` argument used in both the `fs.chmod()` and `fs.chmodSync()`
methods is a numeric bitmask created using a logical OR of the following
@@ -1541,25 +1666,7 @@ Caveats: on Windows only the write permission can be changed, and the
distinction among the permissions of group, owner or others is not
implemented.
-## `fs.chmodSync(path, mode)`
-
-
-* `path` {string|Buffer|URL}
-* `mode` {string|integer}
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.chmod()`][].
-
-See also: chmod(2).
-
-## `fs.chown(path, uid, gid, callback)`
+### `fs.chown(path, uid, gid, callback)`
-
-* `path` {string|Buffer|URL}
-* `uid` {integer}
-* `gid` {integer}
-
-Synchronously changes owner and group of a file. Returns `undefined`.
-This is the synchronous version of [`fs.chown()`][].
+See the POSIX chown(2) documentation for more detail.
-See also: chown(2).
-
-## `fs.close(fd, callback)`
+### `fs.close(fd[, callback])`
-
-* `fd` {integer}
-
-Synchronous close(2). Returns `undefined`.
-
-Calling `fs.closeSync()` on any file descriptor (`fd`) that is currently in use
-through any other `fs` operation may lead to undefined behavior.
-
-## `fs.constants`
-
-* {Object}
-
-Returns an object containing commonly used constants for file system
-operations. The specific constants currently defined are described in
-[FS constants][].
+See the POSIX close(2) documentation for more detail.
-## `fs.copyFile(src, dest[, mode], callback)`
+### `fs.copyFile(src, dest[, mode], callback)`
-
-* `src` {string|Buffer|URL} source filename to copy
-* `dest` {string|Buffer|URL} destination filename of the copy operation
-* `mode` {integer} modifiers for copy operation. **Default:** `0`.
-
-Synchronously copies `src` to `dest`. By default, `dest` is overwritten if it
-already exists. Returns `undefined`. Node.js makes no guarantees about the
-atomicity of the copy operation. If an error occurs after the destination file
-has been opened for writing, Node.js will attempt to remove the destination.
-
-`mode` is an optional integer that specifies the behavior
-of the copy operation. It is possible to create a mask consisting of the bitwise
-OR of two or more values (e.g.
-`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
-
-* `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
- exists.
-* `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
- copy-on-write reflink. If the platform does not support copy-on-write, then a
- fallback copy mechanism is used.
-* `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
- create a copy-on-write reflink. If the platform does not support
- copy-on-write, then the operation will fail.
-
-```js
-const fs = require('fs');
-const { COPYFILE_EXCL } = fs.constants;
-
-// destination.txt will be created or overwritten by default.
-fs.copyFileSync('source.txt', 'destination.txt');
-console.log('source.txt was copied to destination.txt');
+copyFile('source.txt', 'destination.txt', callback);
// By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
-fs.copyFileSync('source.txt', 'destination.txt', COPYFILE_EXCL);
+copyFile('source.txt', 'destination.txt', constants.COPYFILE_EXCL, callback);
```
-## `fs.createReadStream(path[, options])`
+### `fs.createReadStream(path[, options])`
-
-* `path` {string|Buffer|URL}
-* Returns: {boolean}
-
-Returns `true` if the path exists, `false` otherwise.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.exists()`][].
-
-`fs.exists()` is deprecated, but `fs.existsSync()` is not. The `callback`
-parameter to `fs.exists()` accepts parameters that are inconsistent with other
-Node.js callbacks. `fs.existsSync()` does not use a callback.
-
-```js
-if (fs.existsSync('/etc/passwd')) {
- console.log('The path exists.');
-}
-```
-
-## `fs.fchmod(fd, mode, callback)`
+### `fs.fchmod(fd, mode, callback)`
-
-* `fd` {integer}
-* `mode` {string|integer}
-
-Synchronous fchmod(2). Returns `undefined`.
+See the POSIX fchmod(2) documentation for more detail.
-## `fs.fchown(fd, uid, gid, callback)`
+### `fs.fchown(fd, uid, gid, callback)`
-
-* `fd` {integer}
-* `uid` {integer}
-* `gid` {integer}
+Sets the owner of the file. No arguments other than a possible exception are
+given to the completion callback.
-Synchronous fchown(2). Returns `undefined`.
+See the POSIX fchown(2) documentation for more detail.
-## `fs.fdatasync(fd, callback)`
+### `fs.fdatasync(fd, callback)`
-
-* `fd` {integer}
-
-Synchronous fdatasync(2). Returns `undefined`.
+Forces all currently queued I/O operations associated with the file to the
+operating system's synchronized I/O completion state. Refer to the POSIX
+fdatasync(2) documentation for details. No arguments other than a possible
+exception are given to the completion callback.
-## `fs.fstat(fd[, options], callback)`
+### `fs.fstat(fd[, options], callback)`
+Invokes the callback with the {fs.Stats} for the file descriptor.
-* `fd` {integer}
-* `options` {Object}
- * `bigint` {boolean} Whether the numeric values in the returned
- [`fs.Stats`][] object should be `bigint`. **Default:** `false`.
-* Returns: {fs.Stats}
+See the POSIX fstat(2) documentation for more detail.
-Synchronous fstat(2).
-
-## `fs.fsync(fd, callback)`
+### `fs.fsync(fd, callback)`
-
-* `fd` {integer}
-
-Synchronous fsync(2). Returns `undefined`.
+Request that all data for the open file descriptor is flushed to the storage
+device. The specific implementation is operating system and device specific.
+Refer to the POSIX fsync(2) documentation for more detail. No arguments other
+than a possible exception are given to the completion callback.
-## `fs.ftruncate(fd[, len], callback)`
+### `fs.ftruncate(fd[, len], callback)`
-
-* `fd` {integer}
-* `len` {integer} **Default:** `0`
-
-Returns `undefined`.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.ftruncate()`][].
+If the file previously was shorter than `len` bytes, it is extended, and the
+extended part is filled with null bytes (`'\0'`):
-## `fs.futimes(fd, atime, mtime, callback)`
+### `fs.futimes(fd, atime, mtime, callback)`
-
-* `fd` {integer}
-* `atime` {number|string|Date}
-* `mtime` {number|string|Date}
-
-Synchronous version of [`fs.futimes()`][]. Returns `undefined`.
-
-## `fs.lchmod(path, mode, callback)`
+### `fs.lchmod(path, mode, callback)`
-
-* `path` {string|Buffer|URL}
-* `mode` {integer}
+This method is only implemented on macOS.
-Synchronous lchmod(2). Returns `undefined`.
+See the POSIX lchmod(2) documentation for more detail.
-## `fs.lchown(path, uid, gid, callback)`
+### `fs.lchown(path, uid, gid, callback)`
+Set the owner of the symbolic link. No arguments other than a possible
+exception are given to the completion callback.
-* `path` {string|Buffer|URL}
-* `uid` {integer}
-* `gid` {integer}
-
-Synchronous lchown(2). Returns `undefined`.
+See the POSIX lchown(2) documentation for more detail.
-## `fs.lutimes(path, atime, mtime, callback)`
+### `fs.lutimes(path, atime, mtime, callback)`
-
-* `path` {string|Buffer|URL}
-* `atime` {number|string|Date}
-* `mtime` {number|string|Date}
-
-Change the file system timestamps of the symbolic link referenced by `path`.
-Returns `undefined`, or throws an exception when parameters are incorrect or
-the operation fails. This is the synchronous version of [`fs.lutimes()`][].
-
-## `fs.link(existingPath, newPath, callback)`
+### `fs.link(existingPath, newPath, callback)`
-
-* `existingPath` {string|Buffer|URL}
-* `newPath` {string|Buffer|URL}
-
-Synchronous link(2). Returns `undefined`.
+Creates a new link from the `existingPath` to the `newPath`. See the POSIX
+link(2) documentation for more detail. No arguments other than a possible
+exception are given to the completion callback.
-## `fs.lstat(path[, options], callback)`
+### `fs.lstat(path[, options], callback)`
-
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `bigint` {boolean} Whether the numeric values in the returned
- [`fs.Stats`][] object should be `bigint`. **Default:** `false`.
- * `throwIfNoEntry` {boolean} Whether an exception will be thrown
- if no file system entry exists, rather than returning `undefined`.
- **Default:** `true`.
-* Returns: {fs.Stats}
+Retrieves the {fs.Stats} for the symbolic link refered to by the path.
+The callback gets two arguments `(err, stats)` where `stats` is a {`fs.Stats}
+object. `lstat()` is identical to `stat()`, except that if `path` is a symbolic
+link, then the link itself is stat-ed, not the file that it refers to.
-Synchronous lstat(2).
+See the POSIX lstat(2) documentation for more details.
-## `fs.mkdir(path[, options], callback)`
+### `fs.mkdir(path[, options], callback)`
-
-* `path` {string|Buffer|URL}
-* `options` {Object|integer}
- * `recursive` {boolean} **Default:** `false`
- * `mode` {string|integer} Not supported on Windows. **Default:** `0o777`.
-* Returns: {string|undefined}
-
-Synchronously creates a directory. Returns `undefined`, or if `recursive` is
-`true`, the first directory path created.
-This is the synchronous version of [`fs.mkdir()`][].
-
-See also: mkdir(2).
+See the POSIX mkdir(2) documentation for more details.
-## `fs.mkdtemp(prefix[, options], callback)`
+### `fs.mkdtemp(prefix[, options], callback)`
-
-* `prefix` {string}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {string}
-
-Returns the created directory path.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.mkdtemp()`][].
-
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use.
-
-## `fs.open(path[, flags[, mode]], callback)`
+### `fs.open(path[, flags[, mode]], callback)`
-
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `encoding` {string|null} **Default:** `'utf8'`
- * `bufferSize` {number} Number of directory entries that are buffered
- internally when reading from the directory. Higher values lead to better
- performance but higher memory usage. **Default:** `32`
-* Returns: {fs.Dir}
-
-Synchronously open a directory. See opendir(3).
+Asynchronously open a directory. See the POSIX opendir(3) documentation for
+more details.
-Creates an [`fs.Dir`][], which contains all further functions for reading from
+Creates an {fs.Dir}, which contains all further functions for reading from
and cleaning up the directory.
The `encoding` option sets the encoding for the `path` while opening the
directory and subsequent read operations.
-## `fs.openSync(path[, flags, mode])`
-
-
-* `path` {string|Buffer|URL}
-* `flags` {string|number} **Default:** `'r'`.
- See [support of file system `flags`][].
-* `mode` {string|integer} **Default:** `0o666`
-* Returns: {number}
-
-Returns an integer representing the file descriptor.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.open()`][].
-
-## `fs.read(fd, buffer, offset, length, position, callback)`
+### `fs.read(fd, buffer, offset, length, position, callback)`
* `fd` {integer}
-* `buffer` {Buffer|TypedArray|DataView}
-* `offset` {integer}
-* `length` {integer}
-* `position` {integer|bigint}
+* `buffer` {Buffer|TypedArray|DataView} The buffer that the data will be
+ written to.
+* `offset` {integer} The position in `buffer` to write the data to.
+* `length` {integer} The number of bytes to read.
+* `position` {integer|bigint} Specifies where to begin reading from in the
+ file. If `position` is `null` or `-1 `, data will be read from the current
+ file position, and the file position will be updated. If `position` is an
+ integer, the file position will be unchanged.
* `callback` {Function}
* `err` {Error}
* `bytesRead` {integer}
@@ -2935,26 +2719,15 @@ changes:
Read data from the file specified by `fd`.
-`buffer` is the buffer that the data (read from the fd) will be written to.
-
-`offset` is the offset in the buffer to start writing at.
-
-`length` is an integer specifying the number of bytes to read.
-
-`position` is an argument specifying where to begin reading from in the file.
-If `position` is `null`, data will be read from the current file position,
-and the file position will be updated.
-If `position` is an integer, the file position will remain unchanged.
-
The callback is given the three arguments, `(err, bytesRead, buffer)`.
If the file is not modified concurrently, the end-of-file is reached when the
number of bytes read is zero.
If this method is invoked as its [`util.promisify()`][]ed version, it returns
-a `Promise` for an `Object` with `bytesRead` and `buffer` properties.
+a promise for an `Object` with `bytesRead` and `buffer` properties.
-## `fs.read(fd, [options,] callback)`
+### `fs.read(fd, [options,] callback)`
+{fs.Dirent} objects.
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
- * `withFileTypes` {boolean} **Default:** `false`
-* Returns: {string[]|Buffer[]|fs.Dirent[]}
-
-Synchronous readdir(3).
-
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use for
-the filenames returned. If the `encoding` is set to `'buffer'`,
-the filenames returned will be passed as `Buffer` objects.
-
-If `options.withFileTypes` is set to `true`, the result will contain
-[`fs.Dirent`][] objects.
-
-## `fs.readFile(path[, options], callback)`
+### `fs.readFile(path[, options], callback)`
-
-* `path` {string|Buffer|URL|integer} filename or file descriptor
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `null`
- * `flag` {string} See [support of file system `flags`][]. **Default:** `'r'`.
-* Returns: {string|Buffer}
-
-Returns the contents of the `path`.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.readFile()`][].
-
-If the `encoding` option is specified then this function returns a
-string. Otherwise it returns a buffer.
-
-Similar to [`fs.readFile()`][], when the path is a directory, the behavior of
-`fs.readFileSync()` is platform-specific.
-
-```js
-// macOS, Linux, and Windows
-fs.readFileSync('');
-// => [Error: EISDIR: illegal operation on a directory, read ]
-
-// FreeBSD
-fs.readFileSync(''); // =>
-```
-
-## `fs.readlink(path[, options], callback)`
+### `fs.readlink(path[, options], callback)`
-
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {string|Buffer}
+Reads the contents of the symbolic link refered to by `path`. The callback gets
+two arguments `(err, linkString)`.
-Synchronous readlink(2). Returns the symbolic link's string value.
+See the POSIX readlink(2) documentation for more details.
The optional `options` argument can be a string specifying an encoding, or an
object with an `encoding` property specifying the character encoding to use for
-the link path returned. If the `encoding` is set to `'buffer'`,
-the link path returned will be passed as a `Buffer` object.
-
-## `fs.readSync(fd, buffer, offset, length, position)`
-
-
-* `fd` {integer}
-* `buffer` {Buffer|TypedArray|DataView}
-* `offset` {integer}
-* `length` {integer}
-* `position` {integer|bigint}
-* Returns: {number}
-
-Returns the number of `bytesRead`.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.read()`][].
-
-## `fs.readSync(fd, buffer, [options])`
-
-
-* `fd` {integer}
-* `buffer` {Buffer|TypedArray|DataView}
-* `options` {Object}
- * `offset` {integer} **Default:** `0`
- * `length` {integer} **Default:** `buffer.length`
- * `position` {integer|bigint} **Default:** `null`
-* Returns: {number}
-
-Returns the number of `bytesRead`.
-
-Similar to the above `fs.readSync` function, this version takes an optional `options` object.
-If no `options` object is specified, it will default with the above values.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.read()`][].
+the link path passed to the callback. If the `encoding` is set to `'buffer'`,
+the link path returned will be passed as a {Buffer} object.
-## `fs.readv(fd, buffers[, position], callback)`
+### `fs.readv(fd, buffers[, position], callback)`
-
-* `fd` {integer}
-* `buffers` {ArrayBufferView[]}
-* `position` {integer}
-* Returns: {number} The number of bytes read.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.readv()`][].
+a promise for an `Object` with `bytesRead` and `buffers` properties.
-## `fs.realpath(path[, options], callback)`
+### `fs.realpath(path[, options], callback)`
@@ -3454,66 +3080,13 @@ Only paths that can be converted to UTF8 strings are supported.
The optional `options` argument can be a string specifying an encoding, or an
object with an `encoding` property specifying the character encoding to use for
the path passed to the callback. If the `encoding` is set to `'buffer'`,
-the path returned will be passed as a `Buffer` object.
-
-On Linux, when Node.js is linked against musl libc, the procfs file system must
-be mounted on `/proc` in order for this function to work. Glibc does not have
-this restriction.
-
-## `fs.realpathSync(path[, options])`
-
-
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {string|Buffer}
-
-Returns the resolved pathname.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.realpath()`][].
-
-## `fs.realpathSync.native(path[, options])`
-
-
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {string|Buffer}
-
-Synchronous realpath(3).
-
-Only paths that can be converted to UTF8 strings are supported.
-
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use for
-the path returned. If the `encoding` is set to `'buffer'`,
-the path returned will be passed as a `Buffer` object.
+the path returned will be passed as a {Buffer} object.
On Linux, when Node.js is linked against musl libc, the procfs file system must
be mounted on `/proc` in order for this function to work. Glibc does not have
this restriction.
-## `fs.rename(oldPath, newPath, callback)`
+### `fs.rename(oldPath, newPath, callback)`
-
-* `oldPath` {string|Buffer|URL}
-* `newPath` {string|Buffer|URL}
-
-Synchronous rename(2). Returns `undefined`.
-
-## `fs.rmdir(path[, options], callback)`
+### `fs.rmdir(path[, options], callback)`
-
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
- `EPERM` error is encountered, Node.js retries the operation with a linear
- backoff wait of `retryDelay` milliseconds longer on each try. This option
- represents the number of retries. This option is ignored if the `recursive`
- option is not `true`. **Default:** `0`.
- * `recursive` {boolean} If `true`, perform a recursive directory removal. In
- recursive mode, errors are not reported if `path` does not exist, and
- operations are retried on failure. **Default:** `false`.
- * `retryDelay` {integer} The amount of time in milliseconds to wait between
- retries. This option is ignored if the `recursive` option is not `true`.
- **Default:** `100`.
-
-Synchronous rmdir(2). Returns `undefined`.
-
-Using `fs.rmdirSync()` on a file (not a directory) results in an `ENOENT` error
-on Windows and an `ENOTDIR` error on POSIX.
-
-Setting `recursive` to `true` results in behavior similar to the Unix command
-`rm -rf`: an error will not be raised for paths that do not exist, and paths
-that represent files will be deleted. The permissive behavior of the
-`recursive` option is deprecated, `ENOTDIR` and `ENOENT` will be thrown in
-the future.
-
-## `fs.rm(path[, options], callback)`
+### `fs.rm(path[, options], callback)`
@@ -3701,30 +3212,7 @@ Asynchronously removes files and directories (modeled on the standard POSIX `rm`
utility). No arguments other than a possible exception are given to the
completion callback.
-## `fs.rmSync(path[, options])`
-
-
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `force` {boolean} When `true`, exceptions will be ignored if `path` does
- not exist. **Default:** `false`.
- * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
- `EPERM` error is encountered, Node.js will retry the operation with a linear
- backoff wait of `retryDelay` milliseconds longer on each try. This option
- represents the number of retries. This option is ignored if the `recursive`
- option is not `true`. **Default:** `0`.
- * `recursive` {boolean} If `true`, perform a recursive directory removal. In
- recursive mode operations are retried on failure. **Default:** `false`.
- * `retryDelay` {integer} The amount of time in milliseconds to wait between
- retries. This option is ignored if the `recursive` option is not `true`.
- **Default:** `100`.
-
-Synchronously removes files and directories (modeled on the standard POSIX `rm`
-utility). Returns `undefined`.
-
-## `fs.stat(path[, options], callback)`
+### `fs.stat(path[, options], callback)`
-
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `bigint` {boolean} Whether the numeric values in the returned
- [`fs.Stats`][] object should be `bigint`. **Default:** `false`.
- * `throwIfNoEntry` {boolean} Whether an exception will be thrown
- if no file system entry exists, rather than returning `undefined`.
- **Default:** `true`.
-* Returns: {fs.Stats}
-
-Synchronous stat(2).
-
-## `fs.symlink(target, path[, type], callback)`
+### `fs.symlink(target, path[, type], callback)`
-
-* `target` {string|Buffer|URL}
-* `path` {string|Buffer|URL}
-* `type` {string}
-
-Returns `undefined`.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.symlink()`][].
-
-## `fs.truncate(path[, len], callback)`
+### `fs.truncate(path[, len], callback)`
-
-* `path` {string|Buffer|URL}
-* `len` {integer} **Default:** `0`
-
-Synchronous truncate(2). Returns `undefined`. A file descriptor can also be
-passed as the first argument. In this case, `fs.ftruncateSync()` is called.
+See the POSIX truncate(2) documentation for more details.
-Passing a file descriptor is deprecated and may result in an error being thrown
-in the future.
-
-## `fs.unlink(path, callback)`
+### `fs.unlink(path, callback)`
-
-* `path` {string|Buffer|URL}
-
-Synchronous unlink(2). Returns `undefined`.
+See the POSIX unlink(2) documentation for more details.
-## `fs.unwatchFile(filename[, listener])`
+### `fs.unwatchFile(filename[, listener])`
@@ -4050,7 +3463,7 @@ Using [`fs.watch()`][] is more efficient than `fs.watchFile()` and
`fs.unwatchFile()`. `fs.watch()` should be used instead of `fs.watchFile()`
and `fs.unwatchFile()` when possible.
-## `fs.utimes(path, atime, mtime, callback)`
+### `fs.utimes(path, atime, mtime, callback)`
-
-* `path` {string|Buffer|URL}
-* `atime` {number|string|Date}
-* `mtime` {number|string|Date}
-
-Returns `undefined`.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.utimes()`][].
-
-## `fs.watch(filename[, options][, listener])`
+### `fs.watch(filename[, options][, listener])`
@@ -4177,7 +3570,7 @@ when the option is used on a platform that does not support it.
On Windows, no events will be emitted if the watched directory is moved or
renamed. An `EPERM` error is reported when the watched directory is deleted.
-#### Availability
+##### Availability
@@ -4202,7 +3595,7 @@ when using virtualization software such as Vagrant or Docker.
It is still possible to use `fs.watchFile()`, which uses stat polling, but
this method is slower and less reliable.
-#### Inodes
+##### Inodes
@@ -4216,7 +3609,7 @@ AIX files retain the same inode for the lifetime of a file. Saving and closing a
watched file on AIX will result in two notifications (one for adding new
content, and one for truncation).
-#### Filename argument
+##### Filename argument
@@ -4225,8 +3618,9 @@ macOS, Windows, and AIX. Even on supported platforms, `filename` is not always
guaranteed to be provided. Therefore, don't assume that `filename` argument is
always provided in the callback, and have some fallback logic if it is `null`.
-```js
-fs.watch('somedir', (eventType, filename) => {
+```js esm
+import { watch } from 'fs';
+watch('somedir', (eventType, filename) => {
console.log(`event type is: ${eventType}`);
if (filename) {
console.log(`filename provided: ${filename}`);
@@ -4236,7 +3630,7 @@ fs.watch('somedir', (eventType, filename) => {
});
```
-## `fs.watchFile(filename[, options], listener)`
+### `fs.watchFile(filename[, options], listener)`
-
-* `file` {string|Buffer|URL|integer} filename or file descriptor
-* `data` {string|Buffer|TypedArray|DataView|Object}
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `'utf8'`
- * `mode` {integer} **Default:** `0o666`
- * `flag` {string} See [support of file system `flags`][]. **Default:** `'w'`.
-
-Returns `undefined`.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.writeFile()`][].
-
-## `fs.writeSync(fd, buffer[, offset[, length[, position]]])`
-
-
-* `fd` {integer}
-* `buffer` {Buffer|TypedArray|DataView|string|Object}
-* `offset` {integer}
-* `length` {integer}
-* `position` {integer}
-* Returns: {number} The number of bytes written.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.write(fd, buffer...)`][].
-
-## `fs.writeSync(fd, string[, position[, encoding]])`
-
-
-* `fd` {integer}
-* `string` {string|Object}
-* `position` {integer}
-* `encoding` {string}
-* Returns: {number} The number of bytes written.
-
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.write(fd, string...)`][].
-
-## `fs.writev(fd, buffers[, position], callback)`
+### `fs.writev(fd, buffers[, position], callback)`
@@ -4674,7 +3982,7 @@ at the current position.
The callback will be given three arguments: `err`, `bytesWritten`, and
`buffers`. `bytesWritten` is how many bytes were written from `buffers`.
-If this method is [`util.promisify()`][]ed, it returns a `Promise` for an
+If this method is [`util.promisify()`][]ed, it returns a promise for an
`Object` with `bytesWritten` and `buffers` properties.
It is unsafe to use `fs.writev()` multiple times on the same file without
@@ -4684,247 +3992,269 @@ On Linux, positional writes don't work when the file is opened in append mode.
The kernel ignores the position argument and always appends the data to
the end of the file.
-## `fs.writevSync(fd, buffers[, position])`
-
-
-* `fd` {integer}
-* `buffers` {ArrayBufferView[]}
-* `position` {integer}
-* Returns: {number} The number of bytes written.
+## Synchronous API
-For detailed information, see the documentation of the asynchronous version of
-this API: [`fs.writev()`][].
+The synchronous APIs perform all operations synchronously, blocking the
+event loop until the operation completes or fails.
-## `fs` Promises API
+### `fs.accessSync(path[, mode])`
-
-The `fs.promises` API provides an alternative set of asynchronous file system
-methods that return `Promise` objects rather than using callbacks. The
-API is accessible via `require('fs').promises` or `require('fs/promises')`.
-
-### Class: `FileHandle`
-
-A `FileHandle` object is a wrapper for a numeric file descriptor.
-Instances of `FileHandle` are distinct from numeric file descriptors
-in that they provide an object oriented API for working with files.
-
-If a `FileHandle` is not closed using the
-`filehandle.close()` method, it might automatically close the file descriptor
-and will emit a process warning, thereby helping to prevent memory leaks.
-Please do not rely on this behavior because it is unreliable and
-the file may not be closed. Instead, always explicitly close `FileHandle`s.
-Node.js may change this behavior in the future.
+* `path` {string|Buffer|URL}
+* `mode` {integer} **Default:** `fs.constants.F_OK`
-Instances of the `FileHandle` object are created internally by the
-`fsPromises.open()` method.
+Synchronously tests a user's permissions for the file or directory specified
+by `path`. The `mode` argument is an optional integer that specifies the
+accessibility checks to be performed. Check [File access constants][] for
+possible values of `mode`. It is possible to create a mask consisting of
+the bitwise OR of two or more values
+(e.g. `fs.constants.W_OK | fs.constants.R_OK`).
-Unlike the callback-based API (`fs.fstat()`, `fs.fchown()`, `fs.fchmod()`, and
-so on), a numeric file descriptor is not used by the promise-based API. Instead,
-the promise-based API uses the `FileHandle` class in order to help avoid
-accidental leaking of unclosed file descriptors after a `Promise` is resolved or
-rejected.
+If any of the accessibility checks fail, an `Error` will be thrown. Otherwise,
+the method will return `undefined`.
-#### Event: `'close'`
-
+```js esm
+import { accessSync, constants } from 'fs';
-The `'close'` event is emitted when the `FileHandle` and any of its underlying
-resources (a file descriptor, for example) have been closed.
+try {
+ accessSync('etc/passwd', constants.R_OK | constants.W_OK);
+ console.log('can read/write');
+} catch (err) {
+ console.error('no access!');
+}
+```
-#### `filehandle.appendFile(data, options)`
+### `fs.appendFileSync(path, data[, options])`
+* `path` {string|Buffer|URL|number} filename or file descriptor
* `data` {string|Buffer}
* `options` {Object|string}
* `encoding` {string|null} **Default:** `'utf8'`
-* Returns: {Promise}
+ * `mode` {integer} **Default:** `0o666`
+ * `flag` {string} See [support of file system `flags`][]. **Default:** `'a'`.
-Alias of [`filehandle.writeFile()`][].
+Synchronously append data to a file, creating the file if it does not yet
+exist. `data` can be a string or a {Buffer}.
-When operating on file handles, the mode cannot be changed from what it was set
-to with [`fsPromises.open()`][]. Therefore, this is equivalent to
-[`filehandle.writeFile()`][].
+```js esm
+import { appendFileSync } from 'fs';
-#### `filehandle.chmod(mode)`
+try {
+ appendFileSync('message.txt', 'data to append');
+ console.log('The "data to append" was appended to file!');
+} catch (err) {
+ /* Handle the error */
+}
+```
+
+If `options` is a string, then it specifies the encoding:
+
+```js esm
+import { appendFileSync } from 'fs';
+
+appendFileSync('message.txt', 'data to append', 'utf8');
+```
+
+The `path` may be specified as a numeric file descriptor that has been opened
+for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will
+not be closed automatically.
+
+```js esm
+import { openSync, closeSync, appendFileSync } from 'fs';
+
+let fd;
+
+try {
+ fd = openSync('message.txt', 'a');
+ appendFileSync(fd, 'data to append', 'utf8');
+} catch (err) {
+ /* Handle the error */
+} finally {
+ if (fd !== undefined)
+ closeSync(fd);
+}
+```
+
+### `fs.chmodSync(path, mode)`
-* `mode` {integer}
-* Returns: {Promise}
+* `path` {string|Buffer|URL}
+* `mode` {string|integer}
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.chmod()`][].
-Modifies the permissions on the file. The `Promise` is resolved with no
-arguments upon success.
+See the POSIX chmod(2) documentation for more detail.
-#### `filehandle.chown(uid, gid)`
+### `fs.chownSync(path, uid, gid)`
+* `path` {string|Buffer|URL}
* `uid` {integer}
* `gid` {integer}
-* Returns: {Promise}
-Changes the ownership of the file then resolves the `Promise` with no arguments
-upon success.
+Synchronously changes owner and group of a file. Returns `undefined`.
+This is the synchronous version of [`fs.chown()`][].
-#### `filehandle.close()`
+See the POSIX chown(2) documentation for more detail.
+
+### `fs.closeSync(fd)`
-* Returns: {Promise} A `Promise` that will be resolved once the underlying
- file descriptor is closed, or will be rejected if an error occurs while
- closing.
+* `fd` {integer}
-Closes the file handle after waiting for any pending operation on the handle to
-complete.
+Closes the file descriptor. Returns `undefined`.
-```js
-const fsPromises = require('fs').promises;
-async function openAndClose() {
- let filehandle;
- try {
- filehandle = await fsPromises.open('thefile.txt', 'r');
- } finally {
- if (filehandle !== undefined)
- await filehandle.close();
- }
-}
-```
+Calling `fs.closeSync()` on any file descriptor (`fd`) that is currently in use
+through any other `fs` operation may lead to undefined behavior.
-#### `filehandle.datasync()`
+See the POSIX close(2) documentation for more detail.
+
+### `fs.copyFileSync(src, dest[, mode])`
-* Returns: {Promise}
+* `src` {string|Buffer|URL} source filename to copy
+* `dest` {string|Buffer|URL} destination filename of the copy operation
+* `mode` {integer} modifiers for copy operation. **Default:** `0`.
-Asynchronous fdatasync(2). The `Promise` is resolved with no arguments upon
-success.
+Synchronously copies `src` to `dest`. By default, `dest` is overwritten if it
+already exists. Returns `undefined`. Node.js makes no guarantees about the
+atomicity of the copy operation. If an error occurs after the destination file
+has been opened for writing, Node.js will attempt to remove the destination.
-#### `filehandle.fd`
-
+`mode` is an optional integer that specifies the behavior
+of the copy operation. It is possible to create a mask consisting of the bitwise
+OR of two or more values (e.g.
+`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
-* {number} The numeric file descriptor managed by the `FileHandle` object.
+* `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
+ exists.
+* `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
+ copy-on-write reflink. If the platform does not support copy-on-write, then a
+ fallback copy mechanism is used.
+* `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
+ create a copy-on-write reflink. If the platform does not support
+ copy-on-write, then the operation will fail.
-#### `filehandle.read(buffer, offset, length, position)`
-
+```js esm
+import { copyFileSync, constants } from 'fs';
-* `buffer` {Buffer|Uint8Array}
-* `offset` {integer}
-* `length` {integer}
-* `position` {integer}
-* Returns: {Promise}
+// destination.txt will be created or overwritten by default.
+copyFileSync('source.txt', 'destination.txt');
+console.log('source.txt was copied to destination.txt');
-Read data from the file.
+// By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
+copyFileSync('source.txt', 'destination.txt', constants.COPYFILE_EXCL);
+```
-`buffer` is the buffer that the data will be written to.
+### `fs.existsSync(path)`
+
-`offset` is the offset in the buffer to start writing at.
+* `path` {string|Buffer|URL}
+* Returns: {boolean}
-`length` is an integer specifying the number of bytes to read.
+Returns `true` if the path exists, `false` otherwise.
-`position` is an argument specifying where to begin reading from in the file.
-If `position` is `null`, data will be read from the current file position,
-and the file position will be updated.
-If `position` is an integer, the file position will remain unchanged.
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.exists()`][].
-Following successful read, the `Promise` is resolved with an object with a
-`bytesRead` property specifying the number of bytes read, and a `buffer`
-property that is a reference to the passed in `buffer` argument.
+`fs.exists()` is deprecated, but `fs.existsSync()` is not. The `callback`
+parameter to `fs.exists()` accepts parameters that are inconsistent with other
+Node.js callbacks. `fs.existsSync()` does not use a callback.
-If the file is not modified concurrently, the end-of-file is reached when the
-number of bytes read is zero.
+```js esm
+import { existsSync } from 'fs';
-#### `filehandle.read(options)`
-
-* `options` {Object}
- * `buffer` {Buffer|Uint8Array} **Default:** `Buffer.alloc(16384)`
- * `offset` {integer} **Default:** `0`
- * `length` {integer} **Default:** `buffer.length`
- * `position` {integer} **Default:** `null`
-* Returns: {Promise}
+if (existsSync('/etc/passwd'))
+ console.log('The path exists.');
+```
-#### `filehandle.readFile(options)`
+### `fs.fchmodSync(fd, mode)`
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `null`
- * `signal` {AbortSignal} allows aborting an in-progress readFile
-* Returns: {Promise}
+* `fd` {integer}
+* `mode` {string|integer}
-Asynchronously reads the entire contents of a file.
+Sets the permissions on the file. Returns `undefined`.
-The `Promise` is resolved with the contents of the file. If no encoding is
-specified (using `options.encoding`), the data is returned as a `Buffer`
-object. Otherwise, the data will be a string.
+See the POSIX fchmod(2) documentation for more detail.
-If `options` is a string, then it specifies the encoding.
+### `fs.fchownSync(fd, uid, gid)`
+
-The `FileHandle` has to support reading.
+* `fd` {integer}
+* `uid` {integer} The file's new owner's user id.
+* `gid` {integer} The file's new group's group id.
-If one or more `filehandle.read()` calls are made on a file handle and then a
-`filehandle.readFile()` call is made, the data will be read from the current
-position till the end of the file. It doesn't always read from the beginning
-of the file.
+Sets the owner of the file. Returns `undefined`.
-#### `filehandle.readv(buffers[, position])`
+See the POSIX fchown(2) documentation for more detail.
+
+### `fs.fdatasyncSync(fd)`
-* `buffers` {ArrayBufferView[]}
-* `position` {integer}
-* Returns: {Promise}
-
-Read from a file and write to an array of `ArrayBufferView`s
-
-The `Promise` is resolved with an object containing a `bytesRead` property
-identifying the number of bytes read, and a `buffers` property containing
-a reference to the `buffers` input.
+* `fd` {integer}
-`position` is the offset from the beginning of the file where this data
-should be read from. If `typeof position !== 'number'`, the data will be read
-from the current position.
+Forces all currently queued I/O operations associated with the file to the
+operating system's synchronized I/O completion state. Refer to the POSIX
+fdatasync(2) documentation for details. Returns `undefined`.
-#### `filehandle.stat([options])`
+### `fs.fstatSync(fd[, options])`
+* `fd` {integer}
* `options` {Object}
* `bigint` {boolean} Whether the numeric values in the returned
- [`fs.Stats`][] object should be `bigint`. **Default:** `false`.
-* Returns: {Promise}
+ {fs.Stats} object should be `bigint`. **Default:** `false`.
+* Returns: {fs.Stats}
-Retrieves the [`fs.Stats`][] for the file.
+Retrieves the {fs.Stats} for the file descriptor.
-#### `filehandle.sync()`
+See the POSIX fstat(2) documentation for more detail.
+
+### `fs.fsyncSync(fd)`
-* Returns: {Promise}
+* `fd` {integer}
-Asynchronous fsync(2). The `Promise` is resolved with no arguments upon
-success.
+Request that all data for the open file descriptor is flushed to the storage
+device. The specific implementation is operating system and device specific.
+Refer to the POSIX fsync(2) documentation for more detail. Returns `undefined`.
-#### `filehandle.truncate(len)`
+### `fs.ftruncateSync(fd[, len])`
+* `fd` {integer}
* `len` {integer} **Default:** `0`
-* Returns: {Promise}
-Truncates the file then resolves the `Promise` with no arguments upon success.
-
-If the file was larger than `len` bytes, only the first `len` bytes will be
-retained in the file.
+Truncates the file descriptor. Returns `undefined`.
-For example, the following program retains only the first four bytes of the
-file:
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.ftruncate()`][].
-```js
-const fs = require('fs');
-const fsPromises = fs.promises;
+### `fs.futimesSync(fd, atime, mtime)`
+
-console.log(fs.readFileSync('temp.txt', 'utf8'));
-// Prints: Node.js
+* `fd` {integer}
+* `atime` {number|string|Date}
+* `mtime` {number|string|Date}
-async function doTruncate() {
- let filehandle = null;
- try {
- filehandle = await fsPromises.open('temp.txt', 'r+');
- await filehandle.truncate(4);
- } finally {
- if (filehandle) {
- // Close the file if it is opened.
- await filehandle.close();
- }
- }
- console.log(fs.readFileSync('temp.txt', 'utf8')); // Prints: Node
-}
+Synchronous version of [`fs.futimes()`][]. Returns `undefined`.
-doTruncate().catch(console.error);
+### `fs.lchmodSync(path, mode)`
+
+
+* `path` {string|Buffer|URL}
+* `mode` {integer}
+
+Changes the permissions on a symbolic link. Returns `undefined`.
+
+This method is only implemented on macOS.
+
+See the POSIX lchmod(2) documentation for more detail.
+
+### `fs.lchownSync(path, uid, gid)`
+
+
+* `path` {string|Buffer|URL}
+* `uid` {integer} The file's new owner's user id.
+* `gid` {integer} The file's new group's group id.
+
+Set the owner for the path. Returns `undefined`.
+
+See the POSIX lchown(2) documentation for more details.
+
+### `fs.lutimesSync(path, atime, mtime)`
+
+
+* `path` {string|Buffer|URL}
+* `atime` {number|string|Date}
+* `mtime` {number|string|Date}
+
+Change the file system timestamps of the symbolic link referenced by `path`.
+Returns `undefined`, or throws an exception when parameters are incorrect or
+the operation fails. This is the synchronous version of [`fs.lutimes()`][].
+
+### `fs.linkSync(existingPath, newPath)`
+
+
+* `existingPath` {string|Buffer|URL}
+* `newPath` {string|Buffer|URL}
+
+Creates a new link from the `existingPath` to the `newPath`. See the POSIX
+link(2) documentation for more detail. Returns `undefined`.
+
+### `fs.lstatSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `bigint` {boolean} Whether the numeric values in the returned
+ {fs.Stats} object should be `bigint`. **Default:** `false`.
+ * `throwIfNoEntry` {boolean} Whether an exception will be thrown
+ if no file system entry exists, rather than returning `undefined`.
+ **Default:** `true`.
+* Returns: {fs.Stats}
+
+Retrieves the {fs.Stats} for the symbolic link refered to by `path`.
+
+See the POSIX lstat(2) documentation for more details.
+
+### `fs.mkdirSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {Object|integer}
+ * `recursive` {boolean} **Default:** `false`
+ * `mode` {string|integer} Not supported on Windows. **Default:** `0o777`.
+* Returns: {string|undefined}
+
+Synchronously creates a directory. Returns `undefined`, or if `recursive` is
+`true`, the first directory path created.
+This is the synchronous version of [`fs.mkdir()`][].
+
+See the POSIX mkdir(2) documentation for more details.
+
+### `fs.mkdtempSync(prefix[, options])`
+
+
+* `prefix` {string}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {string}
+
+Returns the created directory path.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.mkdtemp()`][].
+
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use.
+
+### `fs.opendirSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `encoding` {string|null} **Default:** `'utf8'`
+ * `bufferSize` {number} Number of directory entries that are buffered
+ internally when reading from the directory. Higher values lead to better
+ performance but higher memory usage. **Default:** `32`
+* Returns: {fs.Dir}
+
+Synchronously open a directory. See opendir(3).
+
+Creates an {fs.Dir}, which contains all further functions for reading from
+and cleaning up the directory.
+
+The `encoding` option sets the encoding for the `path` while opening the
+directory and subsequent read operations.
+
+### `fs.openSync(path[, flags, mode])`
+
+
+* `path` {string|Buffer|URL}
+* `flags` {string|number} **Default:** `'r'`.
+ See [support of file system `flags`][].
+* `mode` {string|integer} **Default:** `0o666`
+* Returns: {number}
+
+Returns an integer representing the file descriptor.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.open()`][].
+
+### `fs.readdirSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+ * `withFileTypes` {boolean} **Default:** `false`
+* Returns: {string[]|Buffer[]|fs.Dirent[]}
+
+Reads the contents of the director.
+
+See the POSIX readdir(3) documentation for more details.
+
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use for
+the filenames returned. If the `encoding` is set to `'buffer'`,
+the filenames returned will be passed as {Buffer} objects.
+
+If `options.withFileTypes` is set to `true`, the result will contain
+{fs.Dirent} objects.
+
+### `fs.readFileSync(path[, options])`
+
+
+* `path` {string|Buffer|URL|integer} filename or file descriptor
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `null`
+ * `flag` {string} See [support of file system `flags`][]. **Default:** `'r'`.
+* Returns: {string|Buffer}
+
+Returns the contents of the `path`.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.readFile()`][].
+
+If the `encoding` option is specified then this function returns a
+string. Otherwise it returns a buffer.
+
+Similar to [`fs.readFile()`][], when the path is a directory, the behavior of
+`fs.readFileSync()` is platform-specific.
+
+```js esm
+import { readFileSync } from 'fs';
+
+// macOS, Linux, and Windows
+readFileSync('');
+// => [Error: EISDIR: illegal operation on a directory, read ]
+
+// FreeBSD
+readFileSync(''); // =>
```
-If the file previously was shorter than `len` bytes, it is extended, and the
-extended part is filled with null bytes (`'\0'`):
+### `fs.readlinkSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {string|Buffer}
+
+Returns the symbolic link's string value.
+
+See the POSIX readlink(2) documentation for more details.
+
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use for
+the link path returned. If the `encoding` is set to `'buffer'`,
+the link path returned will be passed as a {Buffer} object.
+
+### `fs.readSync(fd, buffer, offset, length, position)`
+
+
+* `fd` {integer}
+* `buffer` {Buffer|TypedArray|DataView}
+* `offset` {integer}
+* `length` {integer}
+* `position` {integer|bigint}
+* Returns: {number}
+
+Returns the number of `bytesRead`.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.read()`][].
+
+### `fs.readSync(fd, buffer, [options])`
+
+
+* `fd` {integer}
+* `buffer` {Buffer|TypedArray|DataView}
+* `options` {Object}
+ * `offset` {integer} **Default:** `0`
+ * `length` {integer} **Default:** `buffer.length`
+ * `position` {integer|bigint} **Default:** `null`
+* Returns: {number}
+
+Returns the number of `bytesRead`.
+
+Similar to the above `fs.readSync` function, this version takes an optional `options` object.
+If no `options` object is specified, it will default with the above values.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.read()`][].
+
+### `fs.readvSync(fd, buffers[, position])`
+
+
+* `fd` {integer}
+* `buffers` {ArrayBufferView[]}
+* `position` {integer}
+* Returns: {number} The number of bytes read.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.readv()`][].
+
+### `fs.realpathSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {string|Buffer}
+
+Returns the resolved pathname.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.realpath()`][].
+
+### `fs.realpathSync.native(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {string|Object}
+ * `encoding` {string} **Default:** `'utf8'`
+* Returns: {string|Buffer}
+
+Synchronous realpath(3).
+
+Only paths that can be converted to UTF8 strings are supported.
+
+The optional `options` argument can be a string specifying an encoding, or an
+object with an `encoding` property specifying the character encoding to use for
+the path returned. If the `encoding` is set to `'buffer'`,
+the path returned will be passed as a {Buffer} object.
+
+On Linux, when Node.js is linked against musl libc, the procfs file system must
+be mounted on `/proc` in order for this function to work. Glibc does not have
+this restriction.
+
+### `fs.renameSync(oldPath, newPath)`
+
+
+* `oldPath` {string|Buffer|URL}
+* `newPath` {string|Buffer|URL}
+
+Renames the file from `oldPath` to `newPath`. Returns `undefined`.
+
+See the POSIX rename(2) documentation for more details.
+
+### `fs.rmdirSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
+ `EPERM` error is encountered, Node.js retries the operation with a linear
+ backoff wait of `retryDelay` milliseconds longer on each try. This option
+ represents the number of retries. This option is ignored if the `recursive`
+ option is not `true`. **Default:** `0`.
+ * `recursive` {boolean} If `true`, perform a recursive directory removal. In
+ recursive mode, errors are not reported if `path` does not exist, and
+ operations are retried on failure. **Default:** `false`.
+ * `retryDelay` {integer} The amount of time in milliseconds to wait between
+ retries. This option is ignored if the `recursive` option is not `true`.
+ **Default:** `100`.
+
+Synchronous rmdir(2). Returns `undefined`.
+
+Using `fs.rmdirSync()` on a file (not a directory) results in an `ENOENT` error
+on Windows and an `ENOTDIR` error on POSIX.
+
+Setting `recursive` to `true` results in behavior similar to the Unix command
+`rm -rf`: an error will not be raised for paths that do not exist, and paths
+that represent files will be deleted. The permissive behavior of the
+`recursive` option is deprecated, `ENOTDIR` and `ENOENT` will be thrown in
+the future.
+
+### `fs.rmSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `force` {boolean} When `true`, exceptions will be ignored if `path` does
+ not exist. **Default:** `false`.
+ * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
+ `EPERM` error is encountered, Node.js will retry the operation with a linear
+ backoff wait of `retryDelay` milliseconds longer on each try. This option
+ represents the number of retries. This option is ignored if the `recursive`
+ option is not `true`. **Default:** `0`.
+ * `recursive` {boolean} If `true`, perform a recursive directory removal. In
+ recursive mode operations are retried on failure. **Default:** `false`.
+ * `retryDelay` {integer} The amount of time in milliseconds to wait between
+ retries. This option is ignored if the `recursive` option is not `true`.
+ **Default:** `100`.
+
+Synchronously removes files and directories (modeled on the standard POSIX `rm`
+utility). Returns `undefined`.
+
+### `fs.statSync(path[, options])`
+
+
+* `path` {string|Buffer|URL}
+* `options` {Object}
+ * `bigint` {boolean} Whether the numeric values in the returned
+ {fs.Stats} object should be `bigint`. **Default:** `false`.
+ * `throwIfNoEntry` {boolean} Whether an exception will be thrown
+ if no file system entry exists, rather than returning `undefined`.
+ **Default:** `true`.
+* Returns: {fs.Stats}
+
+Retrieves the {fs.Stats} for the path.
+
+### `fs.symlinkSync(target, path[, type])`
+
+
+* `target` {string|Buffer|URL}
+* `path` {string|Buffer|URL}
+* `type` {string}
+
+Returns `undefined`.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.symlink()`][].
+
+### `fs.truncateSync(path[, len])`
+
+
+* `path` {string|Buffer|URL}
+* `len` {integer} **Default:** `0`
+
+Truncates the file. Returns `undefined`. A file descriptor can also be
+passed as the first argument. In this case, `fs.ftruncateSync()` is called.
+
+Passing a file descriptor is deprecated and may result in an error being thrown
+in the future.
+
+### `fs.unlinkSync(path)`
+
+
+* `path` {string|Buffer|URL}
+
+Synchronous unlink(2). Returns `undefined`.
+
+### `fs.utimesSync(path, atime, mtime)`
+
+
+* `path` {string|Buffer|URL}
+* `atime` {number|string|Date}
+* `mtime` {number|string|Date}
+
+Returns `undefined`.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.utimes()`][].
+
+### `fs.writeFileSync(file, data[, options])`
+
+
+* `file` {string|Buffer|URL|integer} filename or file descriptor
+* `data` {string|Buffer|TypedArray|DataView|Object}
+* `options` {Object|string}
+ * `encoding` {string|null} **Default:** `'utf8'`
+ * `mode` {integer} **Default:** `0o666`
+ * `flag` {string} See [support of file system `flags`][]. **Default:** `'w'`.
+
+Returns `undefined`.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.writeFile()`][].
+
+### `fs.writeSync(fd, buffer[, offset[, length[, position]]])`
+
+
+* `fd` {integer}
+* `buffer` {Buffer|TypedArray|DataView|string|Object}
+* `offset` {integer}
+* `length` {integer}
+* `position` {integer}
+* Returns: {number} The number of bytes written.
-```js
-const fs = require('fs');
-const fsPromises = fs.promises;
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.write(fd, buffer...)`][].
-console.log(fs.readFileSync('temp.txt', 'utf8'));
-// Prints: Node.js
+### `fs.writeSync(fd, string[, position[, encoding]])`
+
-async function doTruncate() {
- let filehandle = null;
- try {
- filehandle = await fsPromises.open('temp.txt', 'r+');
- await filehandle.truncate(10);
- } finally {
- if (filehandle) {
- // Close the file if it is opened.
- await filehandle.close();
- }
- }
- console.log(fs.readFileSync('temp.txt', 'utf8')); // Prints Node.js\0\0\0
-}
+* `fd` {integer}
+* `string` {string|Object}
+* `position` {integer}
+* `encoding` {string}
+* Returns: {number} The number of bytes written.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.write(fd, string...)`][].
+
+### `fs.writevSync(fd, buffers[, position])`
+
-doTruncate().catch(console.error);
+* `fd` {integer}
+* `buffers` {ArrayBufferView[]}
+* `position` {integer}
+* Returns: {number} The number of bytes written.
+
+For detailed information, see the documentation of the asynchronous version of
+this API: [`fs.writev()`][].
+
+## Common Objects
+
+The common objects are shared by all of the file system API variants
+(promise, callback, and synchronous).
+
+### Class: `fs.Dir`
+
+
+A class representing a directory stream.
+
+Created by [`fs.opendir()`][], [`fs.opendirSync()`][], or
+[`fsPromises.opendir()`][].
+
+```js esm
+import { opendir } from 'fs/promises';
+
+try {
+ const dir = await opendir('./');
+ for await (const dirent of dir)
+ console.log(dirent.name);
+} catch (err) {
+ console.error(err);
+}
```
-The last three bytes are null bytes (`'\0'`), to compensate the over-truncation.
+#### `dir.close()`
+
+
+* Returns: {Promise}
+
+Asynchronously close the directory's underlying resource handle.
+Subsequent reads will result in errors.
+
+A promise is returned that will be resolved after the resource has been
+closed.
+
+#### `dir.close(callback)`
+
+
+* `callback` {Function}
+ * `err` {Error}
+
+Asynchronously close the directory's underlying resource handle.
+Subsequent reads will result in errors.
+
+The `callback` will be called after the resource handle has been closed.
+
+#### `dir.closeSync()`
+
+
+Synchronously close the directory's underlying resource handle.
+Subsequent reads will result in errors.
+
+#### `dir.path`
+
+
+* {string}
+
+The read-only path of this directory as was provided to [`fs.opendir()`][],
+[`fs.opendirSync()`][], or [`fsPromises.opendir()`][].
+
+#### `dir.read()`
+
+
+* Returns: {Promise} containing {fs.Dirent|null}
+
+Asynchronously read the next directory entry via readdir(3) as an
+{fs.Dirent}.
+
+A promise is returned that will be resolved with an {fs.Dirent}, or `null`
+if there are no more directory entries to read.
+
+Directory entries returned by this function are in no particular order as
+provided by the operating system's underlying directory mechanisms.
+Entries added or removed while iterating over the directory might not be
+included in the iteration results.
+
+#### `dir.read(callback)`
+
+
+* `callback` {Function}
+ * `err` {Error}
+ * `dirent` {fs.Dirent|null}
+
+Asynchronously read the next directory entry via readdir(3) as an
+{fs.Dirent}.
+
+After the read is completed, the `callback` will be called with an
+{fs.Dirent}, or `null` if there are no more directory entries to read.
+
+Directory entries returned by this function are in no particular order as
+provided by the operating system's underlying directory mechanisms.
+Entries added or removed while iterating over the directory might not be
+included in the iteration results.
+
+#### `dir.readSync()`
+
+
+* Returns: {fs.Dirent|null}
+
+Synchronously read the next directory entry as an {fs.Dirent}. See the
+POSIX readdir(3) documentation for more detail.
+
+If there are no more directory entries to read, `null` will be returned.
+
+Directory entries returned by this function are in no particular order as
+provided by the operating system's underlying directory mechanisms.
+Entries added or removed while iterating over the directory might not be
+included in the iteration results.
+
+#### `dir[Symbol.asyncIterator]()`
+
+
+* Returns: {AsyncIterator} of {fs.Dirent}
+
+Asynchronously iterates over the directory until all entries have
+been read. Refer to the POSIX readdir(3) documentation for more detail.
+
+Entries returned by the async iterator are always an {fs.Dirent}.
+The `null` case from `dir.read()` is handled internally.
+
+See {fs.Dir} for an example.
+
+Directory entries returned by this iterator are in no particular order as
+provided by the operating system's underlying directory mechanisms.
+Entries added or removed while iterating over the directory might not be
+included in the iteration results.
+
+### Class: `fs.Dirent`
+
+
+A representation of a directory entry, which can be a file or a subdirectory
+within the directory, as returned by reading from an {fs.Dir}. The
+directory entry is a combination of the file name and file type pairs.
+
+Additionally, when [`fs.readdir()`][] or [`fs.readdirSync()`][] is called with
+the `withFileTypes` option set to `true`, the resulting array is filled with
+{fs.Dirent} objects, rather than strings or {Buffer}s.
+
+#### `dirent.isBlockDevice()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a block device.
+
+#### `dirent.isCharacterDevice()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a character device.
+
+#### `dirent.isDirectory()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a file system
+directory.
+
+#### `dirent.isFIFO()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a first-in-first-out
+(FIFO) pipe.
+
+#### `dirent.isFile()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a regular file.
+
+#### `dirent.isSocket()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a socket.
+
+#### `dirent.isSymbolicLink()`
+
+
+* Returns: {boolean}
+
+Returns `true` if the {fs.Dirent} object describes a symbolic link.
+
+#### `dirent.name`
+
+
+* {string|Buffer}
+
+The file name that this {fs.Dirent} object refers to. The type of this
+value is determined by the `options.encoding` passed to [`fs.readdir()`][] or
+[`fs.readdirSync()`][].
-#### `filehandle.utimes(atime, mtime)`
+### Class: `fs.FSWatcher`
-* `atime` {number|string|Date}
-* `mtime` {number|string|Date}
-* Returns: {Promise}
+* Extends {EventEmitter}
-Change the file system timestamps of the object referenced by the `FileHandle`
-then resolves the `Promise` with no arguments upon success.
+A successful call to [`fs.watch()`][] method will return a new {fs.FSWatcher}
+object.
-This function does not work on AIX versions before 7.1, it will resolve the
-`Promise` with an error using code `UV_ENOSYS`.
+All {fs.FSWatcher} objects emit a `'change'` event whenever a specific watched
+file is modified.
-#### `filehandle.write(buffer[, offset[, length[, position]]])`
+#### Event: `'change'`
-* `buffer` {Buffer|Uint8Array|string|Object}
-* `offset` {integer}
-* `length` {integer}
-* `position` {integer}
-* Returns: {Promise}
-
-Write `buffer` to the file.
-
-The `Promise` is resolved with an object containing a `bytesWritten` property
-identifying the number of bytes written, and a `buffer` property containing
-a reference to the `buffer` written.
-
-`offset` determines the part of the buffer to be written, and `length` is
-an integer specifying the number of bytes to write.
+* `eventType` {string} The type of change event that has occurred
+* `filename` {string|Buffer} The filename that changed (if relevant/available)
-`position` refers to the offset from the beginning of the file where this data
-should be written. If `typeof position !== 'number'`, the data will be written
-at the current position. See pwrite(2).
+Emitted when something changes in a watched directory or file.
+See more details in [`fs.watch()`][].
-It is unsafe to use `filehandle.write()` multiple times on the same file
-without waiting for the `Promise` to be resolved (or rejected). For this
-scenario, use [`fs.createWriteStream()`][].
+The `filename` argument may not be provided depending on operating system
+support. If `filename` is provided, it will be provided as a {Buffer} if
+`fs.watch()` is called with its `encoding` option set to `'buffer'`, otherwise
+`filename` will be a UTF-8 string.
-On Linux, positional writes do not work when the file is opened in append mode.
-The kernel ignores the position argument and always appends the data to
-the end of the file.
+```js esm
+import { watch } from 'fs';
+// Example when handled through fs.watch() listener
+watch('./tmp', { encoding: 'buffer' }, (eventType, filename) => {
+ if (filename) {
+ console.log(filename);
+ // Prints:
+ }
+});
+```
-#### `filehandle.write(string[, position[, encoding]])`
+#### Event: `'close'`
-* `string` {string|Object}
-* `position` {integer}
-* `encoding` {string} **Default:** `'utf8'`
-* Returns: {Promise}
-
-Write `string` to the file. If `string` is not a string, or an
-object with an own `toString` function property, then an exception is thrown.
+Emitted when the watcher stops watching for changes. The closed
+{fs.FSWatcher} object is no longer usable in the event handler.
-The `Promise` is resolved with an object containing a `bytesWritten` property
-identifying the number of bytes written, and a `buffer` property containing
-a reference to the `string` written.
+#### Event: `'error'`
+
-`position` refers to the offset from the beginning of the file where this data
-should be written. If the type of `position` is not a `number` the data
-will be written at the current position. See pwrite(2).
+* `error` {Error}
-`encoding` is the expected string encoding.
+Emitted when an error occurs while watching the file. The errored
+{fs.FSWatcher} object is no longer usable in the event handler.
-It is unsafe to use `filehandle.write()` multiple times on the same file
-without waiting for the `Promise` to be resolved (or rejected). For this
-scenario, use [`fs.createWriteStream()`][].
+#### `watcher.close()`
+
-On Linux, positional writes do not work when the file is opened in append mode.
-The kernel ignores the position argument and always appends the data to
-the end of the file.
+Stop watching for changes on the given {fs.FSWatcher}. Once stopped, the
+{fs.FSWatcher} object is no longer usable.
-#### `filehandle.writeFile(data, options)`
+#### `watcher.ref()`
-* `data` {string|Buffer|Uint8Array|Object}
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `'utf8'`
-* Returns: {Promise}
-
-Asynchronously writes data to a file, replacing the file if it already exists.
-`data` can be a string, a buffer, or an object with an own `toString` function
-property. The `Promise` is resolved with no arguments upon success.
+* Returns: {fs.FSWatcher}
-The `encoding` option is ignored if `data` is a buffer.
+When called, requests that the Node.js event loop *not* exit so long as the
+{fs.FSWatcher} is active. Calling `watcher.ref()` multiple times will have
+no effect.
-If `options` is a string, then it specifies the encoding.
+By default, all {fs.FSWatcher} objects are "ref'ed", making it normally
+unnecessary to call `watcher.ref()` unless `watcher.unref()` had been
+called previously.
-The `FileHandle` has to support writing.
+#### `watcher.unref()`
+
-It is unsafe to use `filehandle.writeFile()` multiple times on the same file
-without waiting for the `Promise` to be resolved (or rejected).
+* Returns: {fs.FSWatcher}
-If one or more `filehandle.write()` calls are made on a file handle and then a
-`filehandle.writeFile()` call is made, the data will be written from the
-current position till the end of the file. It doesn't always write from the
-beginning of the file.
+When called, the active {fs.FSWatcher} object will not require the Node.js
+event loop to remain active. If there is no other activity keeping the
+event loop running, the process may exit before the {fs.FSWatcher} object's
+callback is invoked. Calling `watcher.unref()` multiple times will have
+no effect.
-#### `filehandle.writev(buffers[, position])`
+### Class: `fs.StatWatcher`
-* `buffers` {ArrayBufferView[]}
-* `position` {integer}
-* Returns: {Promise}
+* Extends {EventEmitter}
-Write an array of `ArrayBufferView`s to the file.
+A successful call to `fs.watchFile()` method will return a new {fs.StatWatcher}
+object.
-The `Promise` is resolved with an object containing a `bytesWritten` property
-identifying the number of bytes written, and a `buffers` property containing
-a reference to the `buffers` input.
+#### `watcher.ref()`
+
-`position` is the offset from the beginning of the file where this data
-should be written. If `typeof position !== 'number'`, the data will be written
-at the current position.
+* Returns: {fs.StatWatcher}
-It is unsafe to call `writev()` multiple times on the same file without waiting
-for the previous operation to complete.
+When called, requests that the Node.js event loop *not* exit so long as the
+{fs.StatWatcher} is active. Calling `watcher.ref()` multiple times will have
+no effect.
-On Linux, positional writes don't work when the file is opened in append mode.
-The kernel ignores the position argument and always appends the data to
-the end of the file.
+By default, all {fs.StatWatcher} objects are "ref'ed", making it normally
+unnecessary to call `watcher.ref()` unless `watcher.unref()` had been
+called previously.
-### `fsPromises.access(path[, mode])`
+#### `watcher.unref()`
-* `path` {string|Buffer|URL}
-* `mode` {integer} **Default:** `fs.constants.F_OK`
-* Returns: {Promise}
+* Returns: {fs.StatWatcher}
-Tests a user's permissions for the file or directory specified by `path`.
-The `mode` argument is an optional integer that specifies the accessibility
-checks to be performed. Check [File access constants][] for possible values
-of `mode`. It is possible to create a mask consisting of the bitwise OR of
-two or more values (e.g. `fs.constants.W_OK | fs.constants.R_OK`).
+When called, the active {fs.StatWatcher} object will not require the Node.js
+event loop to remain active. If there is no other activity keeping the
+event loop running, the process may exit before the {fs.StatWatcher} object's
+callback is invoked. Calling `watcher.unref()` multiple times will have
+no effect.
-If the accessibility check is successful, the `Promise` is resolved with no
-value. If any of the accessibility checks fail, the `Promise` is rejected
-with an `Error` object. The following example checks if the file
-`/etc/passwd` can be read and written by the current process.
+### Class: `fs.ReadStream`
+
-```js
-const fs = require('fs');
-const fsPromises = fs.promises;
+* Extends: {stream.Readable}
-fsPromises.access('/etc/passwd', fs.constants.R_OK | fs.constants.W_OK)
- .then(() => console.log('can access'))
- .catch(() => console.error('cannot access'));
-```
+Instances of {fs.ReadStream} are created and returned using the
+[`fs.createReadStream()`][] function.
-Using `fsPromises.access()` to check for the accessibility of a file before
-calling `fsPromises.open()` is not recommended. Doing so introduces a race
-condition, since other processes may change the file's state between the two
-calls. Instead, user code should open/read/write the file directly and handle
-the error raised if the file is not accessible.
+#### Event: `'close'`
+
-### `fsPromises.appendFile(path, data[, options])`
+Emitted when the {fs.ReadStream}'s underlying file descriptor has been closed.
+
+#### Event: `'open'`
-* `path` {string|Buffer|URL|FileHandle} filename or `FileHandle`
-* `data` {string|Buffer}
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `'utf8'`
- * `mode` {integer} **Default:** `0o666`
- * `flag` {string} See [support of file system `flags`][]. **Default:** `'a'`.
-* Returns: {Promise}
+* `fd` {integer} Integer file descriptor used by the {fs.ReadStream}.
-Asynchronously append data to a file, creating the file if it does not yet
-exist. `data` can be a string or a [`Buffer`][]. The `Promise` will be
-resolved with no arguments upon success.
+Emitted when the {fs.ReadStream}'s file descriptor has been opened.
-If `options` is a string, then it specifies the encoding.
+#### Event: `'ready'`
+
-The `path` may be specified as a `FileHandle` that has been opened
-for appending (using `fsPromises.open()`).
+Emitted when the {fs.ReadStream} is ready to be used.
-### `fsPromises.chmod(path, mode)`
+Fires immediately after `'open'`.
+
+#### `readStream.bytesRead`
-* `path` {string|Buffer|URL}
-* `mode` {string|integer}
-* Returns: {Promise}
+* {number}
-Changes the permissions of a file then resolves the `Promise` with no
-arguments upon succces.
+The number of bytes that have been read so far.
-### `fsPromises.chown(path, uid, gid)`
+#### `readStream.path`
-* `path` {string|Buffer|URL}
-* `uid` {integer}
-* `gid` {integer}
-* Returns: {Promise}
+* {string|Buffer}
-Changes the ownership of a file then resolves the `Promise` with no arguments
-upon success.
+The path to the file the stream is reading from as specified in the first
+argument to `fs.createReadStream()`. If `path` is passed as a string, then
+`readStream.path` will be a string. If `path` is passed as a {Buffer}, then
+`readStream.path` will be a {Buffer}.
-### `fsPromises.copyFile(src, dest[, mode])`
+#### `readStream.pending`
-* `src` {string|Buffer|URL} source filename to copy
-* `dest` {string|Buffer|URL} destination filename of the copy operation
-* `mode` {integer} modifiers for copy operation. **Default:** `0`.
-* Returns: {Promise}
+* {boolean}
-Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
-already exists. The `Promise` will be resolved with no arguments upon success.
+This property is `true` if the underlying file has not been opened yet,
+i.e. before the `'ready'` event is emitted.
-Node.js makes no guarantees about the atomicity of the copy operation. If an
-error occurs after the destination file has been opened for writing, Node.js
-will attempt to remove the destination.
+### Class: `fs.Stats`
+
-`mode` is an optional integer that specifies the behavior
-of the copy operation. It is possible to create a mask consisting of the bitwise
-OR of two or more values (e.g.
-`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
+A {fs.Stats} object provides information about a file.
-* `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
- exists.
-* `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
- copy-on-write reflink. If the platform does not support copy-on-write, then a
- fallback copy mechanism is used.
-* `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
- create a copy-on-write reflink. If the platform does not support
- copy-on-write, then the operation will fail.
+Objects returned from [`fs.stat()`][], [`fs.lstat()`][] and [`fs.fstat()`][] and
+their synchronous counterparts are of this type.
+If `bigint` in the `options` passed to those methods is true, the numeric values
+will be `bigint` instead of `number`, and the object will contain additional
+nanosecond-precision properties suffixed with `Ns`.
-```js
-const {
- promises: fsPromises,
- constants: {
- COPYFILE_EXCL
- }
-} = require('fs');
+```console
+Stats {
+ dev: 2114,
+ ino: 48064969,
+ mode: 33188,
+ nlink: 1,
+ uid: 85,
+ gid: 100,
+ rdev: 0,
+ size: 527,
+ blksize: 4096,
+ blocks: 8,
+ atimeMs: 1318289051000.1,
+ mtimeMs: 1318289051000.1,
+ ctimeMs: 1318289051000.1,
+ birthtimeMs: 1318289051000.1,
+ atime: Mon, 10 Oct 2011 23:24:11 GMT,
+ mtime: Mon, 10 Oct 2011 23:24:11 GMT,
+ ctime: Mon, 10 Oct 2011 23:24:11 GMT,
+ birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
+```
-// destination.txt will be created or overwritten by default.
-fsPromises.copyFile('source.txt', 'destination.txt')
- .then(() => console.log('source.txt was copied to destination.txt'))
- .catch(() => console.log('The file could not be copied'));
+`bigint` version:
-// By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
-fsPromises.copyFile('source.txt', 'destination.txt', COPYFILE_EXCL)
- .then(() => console.log('source.txt was copied to destination.txt'))
- .catch(() => console.log('The file could not be copied'));
+```console
+BigIntStats {
+ dev: 2114n,
+ ino: 48064969n,
+ mode: 33188n,
+ nlink: 1n,
+ uid: 85n,
+ gid: 100n,
+ rdev: 0n,
+ size: 527n,
+ blksize: 4096n,
+ blocks: 8n,
+ atimeMs: 1318289051000n,
+ mtimeMs: 1318289051000n,
+ ctimeMs: 1318289051000n,
+ birthtimeMs: 1318289051000n,
+ atimeNs: 1318289051000000000n,
+ mtimeNs: 1318289051000000000n,
+ ctimeNs: 1318289051000000000n,
+ birthtimeNs: 1318289051000000000n,
+ atime: Mon, 10 Oct 2011 23:24:11 GMT,
+ mtime: Mon, 10 Oct 2011 23:24:11 GMT,
+ ctime: Mon, 10 Oct 2011 23:24:11 GMT,
+ birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
```
-### `fsPromises.lchmod(path, mode)`
+#### `stats.isBlockDevice()`
-* `path` {string|Buffer|URL}
-* `mode` {integer}
-* Returns: {Promise}
+* Returns: {boolean}
-Changes the permissions on a symbolic link then resolves the `Promise` with
-no arguments upon success. This method is only implemented on macOS.
+Returns `true` if the {fs.Stats} object describes a block device.
-### `fsPromises.lchown(path, uid, gid)`
+#### `stats.isCharacterDevice()`
-* `path` {string|Buffer|URL}
-* `uid` {integer}
-* `gid` {integer}
-* Returns: {Promise}
+* Returns: {boolean}
-Changes the ownership on a symbolic link then resolves the `Promise` with
-no arguments upon success.
+Returns `true` if the {fs.Stats} object describes a character device.
-### `fsPromises.lutimes(path, atime, mtime)`
+#### `stats.isDirectory()`
-* `path` {string|Buffer|URL}
-* `atime` {number|string|Date}
-* `mtime` {number|string|Date}
-* Returns: {Promise}
+* Returns: {boolean}
-Changes the access and modification times of a file in the same way as
-[`fsPromises.utimes()`][], with the difference that if the path refers to a
-symbolic link, then the link is not dereferenced: instead, the timestamps of
-the symbolic link itself are changed.
+Returns `true` if the {fs.Stats} object describes a file system directory.
-Upon success, the `Promise` is resolved without arguments.
+If the {fs.Stats} object was obtained from [`fs.lstat()`][], this method will
+always return `false`. This is because [`fs.lstat()`][] returns information
+about a symbolic link itself and not the path it resolves to.
-### `fsPromises.link(existingPath, newPath)`
+#### `stats.isFIFO()`
-* `existingPath` {string|Buffer|URL}
-* `newPath` {string|Buffer|URL}
-* Returns: {Promise}
+* Returns: {boolean}
-Asynchronous link(2). The `Promise` is resolved with no arguments upon success.
+Returns `true` if the {fs.Stats} object describes a first-in-first-out (FIFO)
+pipe.
-### `fsPromises.lstat(path[, options])`
+#### `stats.isFile()`
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `bigint` {boolean} Whether the numeric values in the returned
- [`fs.Stats`][] object should be `bigint`. **Default:** `false`.
-* Returns: {Promise}
+* Returns: {boolean}
-Asynchronous lstat(2). The `Promise` is resolved with the [`fs.Stats`][] object
-for the given symbolic link `path`.
+Returns `true` if the {fs.Stats} object describes a regular file.
-### `fsPromises.mkdir(path[, options])`
+#### `stats.isSocket()`
-* `path` {string|Buffer|URL}
-* `options` {Object|integer}
- * `recursive` {boolean} **Default:** `false`
- * `mode` {string|integer} Not supported on Windows. **Default:** `0o777`.
-* Returns: {Promise}
-
-Asynchronously creates a directory then resolves the `Promise` with either no
-arguments, or the first directory path created if `recursive` is `true`.
+* Returns: {boolean}
-The optional `options` argument can be an integer specifying `mode` (permission
-and sticky bits), or an object with a `mode` property and a `recursive`
-property indicating whether parent directories should be created. Calling
-`fsPromises.mkdir()` when `path` is a directory that exists results in a
-rejection only when `recursive` is false.
+Returns `true` if the {fs.Stats} object describes a socket.
-### `fsPromises.mkdtemp(prefix[, options])`
+#### `stats.isSymbolicLink()`
-* `prefix` {string}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {Promise}
+* Returns: {boolean}
-Creates a unique temporary directory and resolves the `Promise` with the created
-directory path. A unique directory name is generated by appending six random
-characters to the end of the provided `prefix`. Due to platform
-inconsistencies, avoid trailing `X` characters in `prefix`. Some platforms,
-notably the BSDs, can return more than six random characters, and replace
-trailing `X` characters in `prefix` with random characters.
+Returns `true` if the {fs.Stats} object describes a symbolic link.
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use.
+This method is only valid when using [`fs.lstat()`][].
-```js
-fsPromises.mkdtemp(path.join(os.tmpdir(), 'foo-'))
- .catch(console.error);
-```
+#### `stats.dev`
-The `fsPromises.mkdtemp()` method will append the six randomly selected
-characters directly to the `prefix` string. For instance, given a directory
-`/tmp`, if the intention is to create a temporary directory *within* `/tmp`, the
-`prefix` must end with a trailing platform-specific path separator
-(`require('path').sep`).
+* {number|bigint}
-### `fsPromises.open(path, flags[, mode])`
-
+The numeric identifier of the device containing the file.
-* `path` {string|Buffer|URL}
-* `flags` {string|number} See [support of file system `flags`][].
- **Default:** `'r'`.
-* `mode` {string|integer} **Default:** `0o666` (readable and writable)
-* Returns: {Promise}
+#### `stats.ino`
-Asynchronous file open that returns a `Promise` that, when resolved, yields a
-`FileHandle` object. See open(2).
+* {number|bigint}
-`mode` sets the file mode (permission and sticky bits), but only if the file was
-created.
+The file system specific "Inode" number for the file.
-Some characters (`< > : " / \ | ? *`) are reserved under Windows as documented
-by [Naming Files, Paths, and Namespaces][]. Under NTFS, if the filename contains
-a colon, Node.js will open a file system stream, as described by
-[this MSDN page][MSDN-Using-Streams].
+#### `stats.mode`
-### `fsPromises.opendir(path[, options])`
-
+* {number|bigint}
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `encoding` {string|null} **Default:** `'utf8'`
- * `bufferSize` {number} Number of directory entries that are buffered
- internally when reading from the directory. Higher values lead to better
- performance but higher memory usage. **Default:** `32`
-* Returns: {Promise} containing {fs.Dir}
+A bit-field describing the file type and mode.
-Asynchronously open a directory. See opendir(3).
+#### `stats.nlink`
-Creates an [`fs.Dir`][], which contains all further functions for reading from
-and cleaning up the directory.
+* {number|bigint}
-The `encoding` option sets the encoding for the `path` while opening the
-directory and subsequent read operations.
+The number of hard-links that exist for the file.
-Example using async iteration:
+#### `stats.uid`
-```js
-const fs = require('fs');
+* {number|bigint}
-async function print(path) {
- const dir = await fs.promises.opendir(path);
- for await (const dirent of dir) {
- console.log(dirent.name);
- }
-}
-print('./').catch(console.error);
-```
+The numeric user identifier of the user that owns the file (POSIX).
-### `fsPromises.readdir(path[, options])`
-
+#### `stats.gid`
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
- * `withFileTypes` {boolean} **Default:** `false`
-* Returns: {Promise}
+* {number|bigint}
-Reads the contents of a directory then resolves the `Promise` with an array
-of the names of the files in the directory excluding `'.'` and `'..'`.
+The numeric group identifier of the group that owns the file (POSIX).
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use for
-the filenames. If the `encoding` is set to `'buffer'`, the filenames returned
-will be passed as `Buffer` objects.
+#### `stats.rdev`
-If `options.withFileTypes` is set to `true`, the resolved array will contain
-[`fs.Dirent`][] objects.
+* {number|bigint}
-```js
-const fs = require('fs');
+A numeric device identifier if the file represents a device.
-async function print(path) {
- const files = await fs.promises.readdir(path);
- for (const file of files) {
- console.log(file);
- }
-}
-print('./').catch(console.error);
-```
+#### `stats.size`
-### `fsPromises.readFile(path[, options])`
-
+* {number|bigint}
-* `path` {string|Buffer|URL|FileHandle} filename or `FileHandle`
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `null`
- * `flag` {string} See [support of file system `flags`][]. **Default:** `'r'`.
- * `signal` {AbortSignal} allows aborting an in-progress readFile
-* Returns: {Promise}
+The size of the file in bytes.
-Asynchronously reads the entire contents of a file.
+#### `stats.blksize`
+
+* {number|bigint}
-The `Promise` is resolved with the contents of the file. If no encoding is
-specified (using `options.encoding`), the data is returned as a `Buffer`
-object. Otherwise, the data will be a string.
+The file system block size for i/o operations.
-If `options` is a string, then it specifies the encoding.
+#### `stats.blocks`
-When the `path` is a directory, the behavior of `fsPromises.readFile()` is
-platform-specific. On macOS, Linux, and Windows, the promise will be rejected
-with an error. On FreeBSD, a representation of the directory's contents will be
-returned.
+* {number|bigint}
-It is possible to abort an ongoing `readFile` using an `AbortSignal`. If a
-request is aborted the promise returned is rejected with an `AbortError`:
+The number of blocks allocated for this file.
-```js
-const controller = new AbortController();
-const signal = controller.signal;
-readFile(fileName, { signal }).then((file) => { /* ... */ });
-// Abort the request
-controller.abort();
-```
+#### `stats.atimeMs`
+
-Aborting an ongoing request does not abort individual operating
-system requests but rather the internal buffering `fs.readFile` performs.
+* {number|bigint}
-Any specified `FileHandle` has to support reading.
+The timestamp indicating the last time this file was accessed expressed in
+milliseconds since the POSIX Epoch.
-### `fsPromises.readlink(path[, options])`
+#### `stats.mtimeMs`
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {Promise}
-
-Asynchronous readlink(2). The `Promise` is resolved with the `linkString` upon
-success.
+* {number|bigint}
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use for
-the link path returned. If the `encoding` is set to `'buffer'`, the link path
-returned will be passed as a `Buffer` object.
+The timestamp indicating the last time this file was modified expressed in
+milliseconds since the POSIX Epoch.
-### `fsPromises.realpath(path[, options])`
+#### `stats.ctimeMs`
-* `path` {string|Buffer|URL}
-* `options` {string|Object}
- * `encoding` {string} **Default:** `'utf8'`
-* Returns: {Promise}
+* {number|bigint}
-Determines the actual location of `path` using the same semantics as the
-`fs.realpath.native()` function then resolves the `Promise` with the resolved
-path.
+The timestamp indicating the last time the file status was changed expressed
+in milliseconds since the POSIX Epoch.
-Only paths that can be converted to UTF8 strings are supported.
+#### `stats.birthtimeMs`
+
-The optional `options` argument can be a string specifying an encoding, or an
-object with an `encoding` property specifying the character encoding to use for
-the path. If the `encoding` is set to `'buffer'`, the path returned will be
-passed as a `Buffer` object.
+* {number|bigint}
-On Linux, when Node.js is linked against musl libc, the procfs file system must
-be mounted on `/proc` in order for this function to work. Glibc does not have
-this restriction.
+The timestamp indicating the creation time of this file expressed in
+milliseconds since the POSIX Epoch.
-### `fsPromises.rename(oldPath, newPath)`
+#### `stats.atimeNs`
-* `oldPath` {string|Buffer|URL}
-* `newPath` {string|Buffer|URL}
-* Returns: {Promise}
+* {bigint}
-Renames `oldPath` to `newPath` and resolves the `Promise` with no arguments
-upon success.
+Only present when `bigint: true` is passed into the method that generates
+the object.
+The timestamp indicating the last time this file was accessed expressed in
+nanoseconds since the POSIX Epoch.
-### `fsPromises.rmdir(path[, options])`
+#### `stats.mtimeNs`
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
- `EPERM` error is encountered, Node.js retries the operation with a linear
- backoff wait of `retryDelay` milliseconds longer on each try. This option
- represents the number of retries. This option is ignored if the `recursive`
- option is not `true`. **Default:** `0`.
- * `recursive` {boolean} If `true`, perform a recursive directory removal. In
- recursive mode, errors are not reported if `path` does not exist, and
- operations are retried on failure. **Default:** `false`.
- * `retryDelay` {integer} The amount of time in milliseconds to wait between
- retries. This option is ignored if the `recursive` option is not `true`.
- **Default:** `100`.
-* Returns: {Promise}
+* {bigint}
-Removes the directory identified by `path` then resolves the `Promise` with
-no arguments upon success.
+Only present when `bigint: true` is passed into the method that generates
+the object.
+The timestamp indicating the last time this file was modified expressed in
+nanoseconds since the POSIX Epoch.
-Using `fsPromises.rmdir()` on a file (not a directory) results in the
-`Promise` being rejected with an `ENOENT` error on Windows and an `ENOTDIR`
-error on POSIX.
+#### `stats.ctimeNs`
+
-Setting `recursive` to `true` results in behavior similar to the Unix command
-`rm -rf`: an error will not be raised for paths that do not exist, and paths
-that represent files will be deleted. The permissive behavior of the
-`recursive` option is deprecated, `ENOTDIR` and `ENOENT` will be thrown in
-the future.
+* {bigint}
-### `fsPromises.rm(path[, options])`
+Only present when `bigint: true` is passed into the method that generates
+the object.
+The timestamp indicating the last time the file status was changed expressed
+in nanoseconds since the POSIX Epoch.
+
+#### `stats.birthtimeNs`
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `force` {boolean} When `true`, exceptions will be ignored if `path` does
- not exist. **Default:** `false`.
- * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
- `EPERM` error is encountered, Node.js will retry the operation with a linear
- backoff wait of `retryDelay` milliseconds longer on each try. This option
- represents the number of retries. This option is ignored if the `recursive`
- option is not `true`. **Default:** `0`.
- * `recursive` {boolean} If `true`, perform a recursive directory removal. In
- recursive mode operations are retried on failure. **Default:** `false`.
- * `retryDelay` {integer} The amount of time in milliseconds to wait between
- retries. This option is ignored if the `recursive` option is not `true`.
- **Default:** `100`.
+* {bigint}
-Removes files and directories (modeled on the standard POSIX `rm` utility).
-Resolves the `Promise` with no arguments on success.
+Only present when `bigint: true` is passed into the method that generates
+the object.
+The timestamp indicating the creation time of this file expressed in
+nanoseconds since the POSIX Epoch.
-### `fsPromises.stat(path[, options])`
+#### `stats.atime`
-* `path` {string|Buffer|URL}
-* `options` {Object}
- * `bigint` {boolean} Whether the numeric values in the returned
- [`fs.Stats`][] object should be `bigint`. **Default:** `false`.
-* Returns: {Promise}
+* {Date}
-The `Promise` is resolved with the [`fs.Stats`][] object for the given `path`.
+The timestamp indicating the last time this file was accessed.
-### `fsPromises.symlink(target, path[, type])`
+#### `stats.mtime`
-* `target` {string|Buffer|URL}
-* `path` {string|Buffer|URL}
-* `type` {string} **Default:** `'file'`
-* Returns: {Promise}
-
-Creates a symbolic link then resolves the `Promise` with no arguments upon
-success.
+* {Date}
-The `type` argument is only used on Windows platforms and can be one of `'dir'`,
-`'file'`, or `'junction'`. Windows junction points require the destination path
-to be absolute. When using `'junction'`, the `target` argument will
-automatically be normalized to absolute path.
+The timestamp indicating the last time this file was modified.
-### `fsPromises.truncate(path[, len])`
+#### `stats.ctime`
-* `path` {string|Buffer|URL}
-* `len` {integer} **Default:** `0`
-* Returns: {Promise}
+* {Date}
-Truncates the `path` then resolves the `Promise` with no arguments upon
-success. The `path` *must* be a string or `Buffer`.
+The timestamp indicating the last time the file status was changed.
-### `fsPromises.unlink(path)`
+#### `stats.birthtime`
-* `path` {string|Buffer|URL}
-* Returns: {Promise}
+* {Date}
+
+The timestamp indicating the creation time of this file.
-Asynchronous unlink(2). The `Promise` is resolved with no arguments upon
-success.
+#### Stat time values
-### `fsPromises.utimes(path, atime, mtime)`
+The `atimeMs`, `mtimeMs`, `ctimeMs`, `birthtimeMs` properties are
+numeric values that hold the corresponding times in milliseconds. Their
+precision is platform specific. When `bigint: true` is passed into the
+method that generates the object, the properties will be [bigints][],
+otherwise they will be [numbers][MDN-Number].
+
+The `atimeNs`, `mtimeNs`, `ctimeNs`, `birthtimeNs` properties are
+[bigints][] that hold the corresponding times in nanoseconds. They are
+only present when `bigint: true` is passed into the method that generates
+the object. Their precision is platform specific.
+
+`atime`, `mtime`, `ctime`, and `birthtime` are
+[`Date`][MDN-Date] object alternate representations of the various times. The
+`Date` and number values are not connected. Assigning a new number value, or
+mutating the `Date` value, will not be reflected in the corresponding alternate
+representation.
+
+The times in the stat object have the following semantics:
+
+* `atime` "Access Time": Time when file data last accessed. Changed
+ by the mknod(2), utimes(2), and read(2) system calls.
+* `mtime` "Modified Time": Time when file data last modified.
+ Changed by the mknod(2), utimes(2), and write(2) system calls.
+* `ctime` "Change Time": Time when file status was last changed
+ (inode data modification). Changed by the chmod(2), chown(2),
+ link(2), mknod(2), rename(2), unlink(2), utimes(2),
+ read(2), and write(2) system calls.
+* `birthtime` "Birth Time": Time of file creation. Set once when the
+ file is created. On filesystems where birthtime is not available,
+ this field may instead hold either the `ctime` or
+ `1970-01-01T00:00Z` (ie, Unix epoch timestamp `0`). This value may be greater
+ than `atime` or `mtime` in this case. On Darwin and other FreeBSD variants,
+ also set if the `atime` is explicitly set to an earlier value than the current
+ `birthtime` using the utimes(2) system call.
+
+Prior to Node.js 0.12, the `ctime` held the `birthtime` on Windows systems. As
+of 0.12, `ctime` is not "creation time", and on Unix systems, it never was.
+
+### Class: `fs.WriteStream`
-* `path` {string|Buffer|URL}
-* `atime` {number|string|Date}
-* `mtime` {number|string|Date}
-* Returns: {Promise}
+* Extends {stream.Writable}
-Change the file system timestamps of the object referenced by `path` then
-resolves the `Promise` with no arguments upon success.
+Instances of {fs.WriteStream} are created and returned using the
+[`fs.createWriteStream()`][] function.
-The `atime` and `mtime` arguments follow these rules:
+#### Event: `'close'`
+
-* Values can be either numbers representing Unix epoch time, `Date`s, or a
- numeric string like `'123456789.0'`.
-* If the value can not be converted to a number, or is `NaN`, `Infinity` or
- `-Infinity`, an `Error` will be thrown.
+Emitted when the {fs.WriteStream}'s underlying file descriptor has been closed.
-### `fsPromises.writeFile(file, data[, options])`
+#### Event: `'open'`
+
+* `fd` {integer} Integer file descriptor used by the {fs.WriteStream}.
+
+Emitted when the {fs.WriteStream}'s file is opened.
+
+#### Event: `'ready'`
+
-* `file` {string|Buffer|URL|FileHandle} filename or `FileHandle`
-* `data` {string|Buffer|Uint8Array|Object}
-* `options` {Object|string}
- * `encoding` {string|null} **Default:** `'utf8'`
- * `mode` {integer} **Default:** `0o666`
- * `flag` {string} See [support of file system `flags`][]. **Default:** `'w'`.
- * `signal` {AbortSignal} allows aborting an in-progress writeFile
-* Returns: {Promise}
+Emitted when the {fs.WriteStream} is ready to be used.
-Asynchronously writes data to a file, replacing the file if it already exists.
-`data` can be a string, a buffer, or an object with an own `toString` function
-property. The `Promise` is resolved with no arguments upon success.
+Fires immediately after `'open'`.
-The `encoding` option is ignored if `data` is a buffer.
+#### `writeStream.bytesWritten`
+
-If `options` is a string, then it specifies the encoding.
+The number of bytes written so far. Does not include data that is still queued
+for writing.
-Any specified `FileHandle` has to support writing.
+#### `writeStream.path`
+
-It is unsafe to use `fsPromises.writeFile()` multiple times on the same file
-without waiting for the `Promise` to be fulfilled (or rejected).
+The path to the file the stream is writing to as specified in the first
+argument to [`fs.createWriteStream()`][]. If `path` is passed as a string, then
+`writeStream.path` will be a string. If `path` is passed as a {Buffer}, then
+`writeStream.path` will be a {Buffer}.
-Similarly to `fsPromises.readFile` - `fsPromises.writeFile` is a convenience
-method that performs multiple `write` calls internally to write the buffer
-passed to it. For performance sensitive code consider using
-[`fs.createWriteStream()`][].
+#### `writeStream.pending`
+
-It is possible to use an {AbortSignal} to cancel an `fsPromises.writeFile()`.
-Cancelation is "best effort", and some amount of data is likely still
-to be written.
+* {boolean}
-```js
-const controller = new AbortController();
-const { signal } = controller;
-const data = new Uint8Array(Buffer.from('Hello Node.js'));
-(async () => {
- try {
- await fs.writeFile('message.txt', data, { signal });
- } catch (err) {
- // When a request is aborted - err is an AbortError
- }
-})();
-// When the request should be aborted
-controller.abort();
-```
+This property is `true` if the underlying file has not been opened yet,
+i.e. before the `'ready'` event is emitted.
-Aborting an ongoing request does not abort individual operating
-system requests but rather the internal buffering `fs.writeFile` performs.
+### `fs.constants`
+
+* {Object}
+
+Returns an object containing commonly used constants for file system
+operations.
-## FS constants
+#### FS constants
The following constants are exported by `fs.constants`.
@@ -5875,21 +5921,21 @@ To use more than one constant, use the bitwise OR `|` operator.
Example:
-```js
-const fs = require('fs');
+```js esm
+import { open, constants } from 'fs';
const {
O_RDWR,
O_CREAT,
O_EXCL
-} = fs.constants;
+} = constants;
-fs.open('/path/to/my/file', O_RDWR | O_CREAT | O_EXCL, (err, fd) => {
+open('/path/to/my/file', O_RDWR | O_CREAT | O_EXCL, (err, fd) => {
// ...
});
```
-### File access constants
+##### File access constants
The following constants are meant for use with [`fs.access()`][].
@@ -5921,7 +5967,7 @@ The following constants are meant for use with [`fs.access()`][].
require(X) from module at path Y 1. If X is a core module, a. return the core module @@ -210,7 +210,7 @@ LOAD_PACKAGE_IMPORTS(X, DIR) 2. If no scope was found, return. 3. If the SCOPE/package.json "imports" is null or undefined, return. 4. let MATCH = PACKAGE_IMPORTS_RESOLVE(X, pathToFileURL(SCOPE), - ["node", "require"]) defined in the ESM resolver. + ["node", "require"]) defined in the ESM resolver. 5. RESOLVE_ESM_MATCH(MATCH). LOAD_PACKAGE_EXPORTS(X, DIR) @@ -221,7 +221,7 @@ LOAD_PACKAGE_EXPORTS(X, DIR) 3. Parse DIR/NAME/package.json, and look for "exports" field. 4. If "exports" is null or undefined, return. 5. let MATCH = PACKAGE_EXPORTS_RESOLVE(pathToFileURL(DIR/NAME), "." + SUBPATH, - `package.json` "exports", ["node", "require"]) defined in the ESM resolver. + `package.json` "exports", ["node", "require"]) defined in the ESM resolver. 6. RESOLVE_ESM_MATCH(MATCH) LOAD_PACKAGE_SELF(X, DIR) @@ -231,7 +231,7 @@ LOAD_PACKAGE_SELF(X, DIR) 4. If the SCOPE/package.json "name" is not the first segment of X, return. 5. let MATCH = PACKAGE_EXPORTS_RESOLVE(pathToFileURL(SCOPE), "." + X.slice("name".length), `package.json` "exports", ["node", "require"]) - defined in the ESM resolver. + defined in the ESM resolver. 6. RESOLVE_ESM_MATCH(MATCH) RESOLVE_ESM_MATCH(MATCH) @@ -244,7 +244,7 @@ RESOLVE_ESM_MATCH(MATCH) a. LOAD_AS_FILE(RESOLVED_PATH) b. LOAD_AS_DIRECTORY(RESOLVED_PATH) 5. THROW "not found" -``` +## Caching diff --git a/doc/api/n-api.md b/doc/api/n-api.md index 7d9d986b9bbf02..5bf9e890ef5d9b 100644 --- a/doc/api/n-api.md +++ b/doc/api/n-api.md @@ -1,30 +1,29 @@ -# N-API +# Node-API > Stability: 2 - Stable -N-API (pronounced N as in the letter, followed by API) -is an API for building native Addons. It is independent from -the underlying JavaScript runtime (for example, V8) and is maintained as part of -Node.js itself. This API will be Application Binary Interface (ABI) stable -across versions of Node.js. It is intended to insulate Addons from -changes in the underlying JavaScript engine and allow modules +Node-API (formerly N-API) is an API for building native Addons. It is +independent from the underlying JavaScript runtime (for example, V8) and is +maintained as part of Node.js itself. This API will be Application Binary +Interface (ABI) stable across versions of Node.js. It is intended to insulate +addons from changes in the underlying JavaScript engine and allow modules compiled for one major version to run on later major versions of Node.js without recompilation. The [ABI Stability][] guide provides a more in-depth explanation. Addons are built/packaged with the same approach/tools outlined in the section titled [C++ Addons][]. The only difference is the set of APIs that are used by the native code. Instead of using the V8 or [Native Abstractions for Node.js][] -APIs, the functions available in the N-API are used. +APIs, the functions available in Node-API are used. -APIs exposed by N-API are generally used to create and manipulate +APIs exposed by Node-API are generally used to create and manipulate JavaScript values. Concepts and operations generally map to ideas specified in the ECMA-262 Language Specification. The APIs have the following properties: -* All N-API calls return a status code of type `napi_status`. This +* All Node-API calls return a status code of type `napi_status`. This status indicates whether the API call succeeded or failed. * The API's return value is passed via an out parameter. * All JavaScript values are abstracted behind an opaque type named @@ -33,14 +32,14 @@ properties: using `napi_get_last_error_info`. More information can be found in the error handling section [Error handling][]. -The N-API is a C API that ensures ABI stability across Node.js versions +Node-API-API is a C API that ensures ABI stability across Node.js versions and different compiler levels. A C++ API can be easier to use. To support using C++, the project maintains a C++ wrapper module called [`node-addon-api`][]. This wrapper provides an inlineable C++ API. Binaries built -with `node-addon-api` will depend on the symbols for the N-API C-based +with `node-addon-api` will depend on the symbols for the Node-API C-based functions exported by Node.js. `node-addon-api` is a more -efficient way to write code that calls N-API. Take, for example, the +efficient way to write code that calls Node-API. Take, for example, the following `node-addon-api` code. The first section shows the `node-addon-api` code and the second section shows what actually gets used in the addon. @@ -78,13 +77,13 @@ it still gets the benefits of the ABI stability provided by the C API. When using `node-addon-api` instead of the C APIs, start with the API [docs][] for `node-addon-api`. -The [N-API Resource](https://nodejs.github.io/node-addon-examples/) offers an -excellent orientation and tips for developers just getting started with N-API -and `node-addon-api`. +The [Node-API Resource](https://nodejs.github.io/node-addon-examples/) offers +an excellent orientation and tips for developers just getting started with +Node-API and `node-addon-api`. ## Implications of ABI stability -Although N-API provides an ABI stability guarantee, other parts of Node.js do +Although Node-API provides an ABI stability guarantee, other parts of Node.js do not, and any external libraries used from the addon may not. In particular, none of the following APIs provide an ABI stability guarantee across major versions: @@ -111,19 +110,19 @@ versions: ``` Thus, for an addon to remain ABI-compatible across Node.js major versions, it -must use N-API exclusively by restricting itself to using +must use Node-API exclusively by restricting itself to using ```c #include
Describe Something
in more detail here.
Describe Something
in more detail here.
node \\[options\\] index.js' +
'
Please see the' + - 'Command Line Optionsdocument for more information.
An example of a' +
'webserverwritten with Node.js which responds with' +
- '\'Hello, World!\'
:
Check' + - 'out alsothis guide
' + 'out alsothis guide