From 5c873d54fed89cb66a7208e7938b3d5d31e29613 Mon Sep 17 00:00:00 2001 From: Robert Nagy Date: Sun, 16 Aug 2020 22:25:12 +0200 Subject: [PATCH] fix: body error on premature close If socket is closed before response body has ended it should error. --- lib/client.js | 29 +++++++++++++++++++---------- test/client-errors.js | 17 ++++++++++++----- test/client-reconnect.js | 8 ++++++-- test/client.js | 20 +++++++++++++++----- test/pool.js | 4 +++- test/request-timeout.js | 7 ++++++- 6 files changed, 61 insertions(+), 24 deletions(-) diff --git a/lib/client.js b/lib/client.js index 89000741274..417f0d8f9bb 100644 --- a/lib/client.js +++ b/lib/client.js @@ -429,9 +429,15 @@ class Parser extends HTTPParser { } [HTTPParser.kOnExecute] (ret) { + const { socket } = this + if (!Number.isFinite(ret)) { assert(ret instanceof Error) - util.destroy(this.socket, ret) + util.destroy(socket, ret) + return + } + + if (socket.destroyed) { return } @@ -439,13 +445,13 @@ class Parser extends HTTPParser { // `data` events are emitted, and thus `socket.setTimeout` fires the // callback even if the data is constantly flowing into the socket. // See, https://github.com/nodejs/node/commit/ec2822adaad76b126b5cccdeaa1addf2376c9aa6 - this.socket._unrefTimer() + socket._unrefTimer() // This logic cannot live in kOnHeadersComplete since we // have no way of slicing the parsing buffer without knowing // the offset which is only provided in kOnExecute. - if (this.upgrade && !this.socket.destroyed) { - const { socket, client, headers, statusCode } = this + if (this.upgrade) { + const { client, headers, statusCode } = this const request = client[kQueue][client[kRunningIdx]] assert(!socket.destroyed) @@ -492,6 +498,10 @@ class Parser extends HTTPParser { const { client, socket } = this const request = client[kQueue][client[kRunningIdx]] + if (socket.destroyed) { + return + } + // TODO: Check for content-length mismatch from server? assert(!this.upgrade) @@ -551,7 +561,7 @@ class Parser extends HTTPParser { [HTTPParser.kOnBody] (chunk, offset, length) { const { client, socket, statusCode } = this - if (!statusCode) { + if (socket.destroyed) { return } @@ -570,7 +580,6 @@ class Parser extends HTTPParser { // until requests has either completed or socket is destroyed. if (this.read > client[kMaxAbortedPayload]) { client[kQueue][client[kRunningIdx]++] = null - this.statusCode = null util.destroy(socket, new InformationalError('max aborted payload')) } } else if (ret === false) { @@ -582,7 +591,7 @@ class Parser extends HTTPParser { const { client, socket, statusCode, headers, upgrade } = this const request = client[kQueue][client[kRunningIdx]] - if (!statusCode) { + if (socket.destroyed) { return } @@ -610,17 +619,17 @@ class Parser extends HTTPParser { // TOOD: keep writing until maxAbortedPayload? // Response completed before request. - util.destroy(socket, new InformationalError('request reset')) + util.destroy(socket, new InformationalError('reset')) } else if (client[kReset]) { // Destroy socket once all requests have completed. // The request at the tail of the pipeline is the one // that requested reset and no further requests should // have been queued since then. if (!client.running) { - util.destroy(socket, new InformationalError('request reset')) + util.destroy(socket, new InformationalError('reset')) } } else if (!this.shouldKeepAlive) { - util.destroy(socket, new InformationalError('request reset')) + util.destroy(socket, new InformationalError('reset')) } else { socket.resume() resume(client) diff --git a/test/client-errors.js b/test/client-errors.js index 364f5baf724..fedcd8ee754 100644 --- a/test/client-errors.js +++ b/test/client-errors.js @@ -687,7 +687,7 @@ test('socket fail while ending request body', (t) => { }) test('queued request should not fail on socket destroy', (t) => { - t.plan(2) + t.plan(4) const server = createServer() server.on('request', (req, res) => { @@ -706,21 +706,25 @@ test('queued request should not fail on socket destroy', (t) => { method: 'GET' }, (err, data) => { t.error(err) - data.body.resume() + data.body.resume().on('error', () => { + t.pass() + }) client[kSocket].destroy() client.request({ path: '/', method: 'GET' }, (err, data) => { t.error(err) - data.body.resume() + data.body.resume().on('end', () => { + t.pass() + }) }) }) }) }) test('queued request should fail on client destroy', (t) => { - t.plan(5) + t.plan(6) const server = createServer() server.on('request', (req, res) => { @@ -741,6 +745,9 @@ test('queued request should fail on client destroy', (t) => { }, (err, data) => { t.error(err) data.body.resume() + .on('error', () => { + t.pass() + }) client.destroy((err) => { t.error(err) t.strictEqual(requestErrored, true) @@ -771,7 +778,7 @@ test('retry idempotent inflight', (t) => { const client = new Client(`http://localhost:${server.address().port}`, { pipelining: 3 }) - t.tearDown(client.destroy.bind(client)) + t.tearDown(client.close.bind(client)) client.request({ path: '/', diff --git a/test/client-reconnect.js b/test/client-reconnect.js index 7ac28da5186..76d051ea41a 100644 --- a/test/client-reconnect.js +++ b/test/client-reconnect.js @@ -6,7 +6,7 @@ const { createServer } = require('http') const FakeTimers = require('@sinonjs/fake-timers') test('multiple reconnect', (t) => { - t.plan(2) + t.plan(3) const clock = FakeTimers.install() t.teardown(clock.uninstall.bind(clock)) @@ -21,7 +21,11 @@ test('multiple reconnect', (t) => { client.request({ path: '/', method: 'GET' }, (err, data) => { t.error(err) - data.body.resume() + data.body + .resume() + .on('end', () => { + t.pass() + }) }) let n = 0 diff --git a/test/client.js b/test/client.js index 089931d13e2..9b22e9ce542 100644 --- a/test/client.js +++ b/test/client.js @@ -620,7 +620,7 @@ test('url-like url', (t) => { }) test('multiple destroy callback', (t) => { - t.plan(3) + t.plan(4) const server = createServer((req, res) => { res.end() @@ -637,7 +637,11 @@ test('multiple destroy callback', (t) => { client.request({ path: '/', method: 'GET' }, (err, data) => { t.error(err) - data.body.resume() + data.body + .resume() + .on('error', () => { + t.pass() + }) client.destroy(new Error(), (err) => { t.error(err) }) @@ -649,7 +653,7 @@ test('multiple destroy callback', (t) => { }) test('only one streaming req at a time', (t) => { - t.plan(6) + t.plan(7) const server = createServer((req, res) => { req.pipe(res) @@ -693,7 +697,11 @@ test('only one streaming req at a time', (t) => { }) }, (err, data) => { t.error(err) - data.body.resume() + data.body + .resume() + .on('end', () => { + t.pass() + }) }) t.strictEqual(client.busy, true) }) @@ -701,7 +709,7 @@ test('only one streaming req at a time', (t) => { }) test('300 requests succeed', (t) => { - t.plan(300 * 2) + t.plan(300 * 3) const server = createServer((req, res) => { res.end('asd') @@ -720,6 +728,8 @@ test('300 requests succeed', (t) => { t.error(err) data.body.on('data', (chunk) => { t.strictEqual(chunk.toString(), 'asd') + }).on('end', () => { + t.pass() }) }) } diff --git a/test/pool.js b/test/pool.js index 98af3bf5ab0..7f7942d8ee0 100644 --- a/test/pool.js +++ b/test/pool.js @@ -440,7 +440,7 @@ test('pool pipeline args validation', (t) => { }) test('300 requests succeed', (t) => { - t.plan(300 * 2) + t.plan(300 * 3) const server = createServer((req, res) => { res.end('asd') @@ -461,6 +461,8 @@ test('300 requests succeed', (t) => { t.error(err) data.body.on('data', (chunk) => { t.strictEqual(chunk.toString(), 'asd') + }).on('end', () => { + t.pass() }) }) } diff --git a/test/request-timeout.js b/test/request-timeout.js index 6662b30f81d..1f24b812146 100644 --- a/test/request-timeout.js +++ b/test/request-timeout.js @@ -70,7 +70,7 @@ test('request timeout immutable opts', (t) => { }) test('Subsequent request starves', (t) => { - t.plan(2) + t.plan(3) const clock = FakeTimers.install() t.teardown(clock.uninstall.bind(clock)) @@ -89,6 +89,11 @@ test('Subsequent request starves', (t) => { client.request({ path: '/', method: 'GET' }, (err, response) => { t.error(err) + response.body + .resume() + .on('end', () => { + t.pass() + }) }) client.request({ path: '/', method: 'GET', requestTimeout: 50 }, (err, response) => {