-
Notifications
You must be signed in to change notification settings - Fork 29.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
tls: fix writeQueueSize prop, long write timeouts #15791
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
'use strict'; | ||
const common = require('../common'); | ||
if (!common.hasCrypto) | ||
common.skip('missing crypto'); | ||
const assert = require('assert'); | ||
const fixtures = require('../common/fixtures'); | ||
const tls = require('tls'); | ||
|
||
const iter = 10; | ||
const overhead = 30; | ||
|
||
const server = tls.createServer({ | ||
key: fixtures.readKey('agent2-key.pem'), | ||
cert: fixtures.readKey('agent2-cert.pem') | ||
}, common.mustCall((socket) => { | ||
socket.on('readable', common.mustCallAtLeast(() => { | ||
socket.read(); | ||
}, 1)); | ||
|
||
socket.on('end', common.mustCall(() => { | ||
server.close(); | ||
})); | ||
})); | ||
|
||
server.listen(0, common.mustCall(() => { | ||
const client = tls.connect({ | ||
port: server.address().port, | ||
rejectUnauthorized: false | ||
}, common.mustCall(() => { | ||
assert.strictEqual(client.bufferSize, 0); | ||
|
||
for (let i = 1; i < iter; i++) { | ||
client.write('a'); | ||
assert.strictEqual(client.bufferSize, i + overhead); | ||
} | ||
|
||
client.on('finish', common.mustCall(() => { | ||
assert.strictEqual(client.bufferSize, 0); | ||
})); | ||
|
||
client.end(); | ||
})); | ||
})); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
'use strict'; | ||
const common = require('../common'); | ||
const assert = require('assert'); | ||
const http = require('http'); | ||
|
||
// This test assesses whether long-running writes can complete | ||
// or timeout because the socket is not aware that the backing | ||
// stream is still writing. | ||
// To simulate a slow client, we write a really large chunk and | ||
// then proceed through the following cycle: | ||
// 1) Receive first 'data' event and record currently written size | ||
// 2) Once we've read up to currently written size recorded above, | ||
// we pause the stream and wait longer than the server timeout | ||
// 3) Socket.prototype._onTimeout triggers and should confirm | ||
// that the backing stream is still active and writing | ||
// 4) Our timer fires, we resume the socket and start at 1) | ||
|
||
const minReadSize = 250000; | ||
const serverTimeout = common.platformTimeout(500); | ||
let offsetTimeout = common.platformTimeout(100); | ||
let serverConnectionHandle; | ||
let writeSize = 3000000; | ||
let didReceiveData = false; | ||
// this represents each cycles write size, where the cycle consists | ||
// of `write > read > _onTimeout` | ||
let currentWriteSize = 0; | ||
|
||
const server = http.createServer(common.mustCall((req, res) => { | ||
const content = Buffer.alloc(writeSize, 0x44); | ||
|
||
res.writeHead(200, { | ||
'Content-Type': 'application/octet-stream', | ||
'Content-Length': content.length.toString(), | ||
'Vary': 'Accept-Encoding' | ||
}); | ||
|
||
serverConnectionHandle = res.socket._handle; | ||
res.write(content); | ||
res.end(); | ||
})); | ||
server.setTimeout(serverTimeout); | ||
server.on('timeout', () => { | ||
assert.strictEqual(didReceiveData, false, 'Should not timeout'); | ||
}); | ||
|
||
server.listen(0, common.mustCall(() => { | ||
http.get({ | ||
path: '/', | ||
port: server.address().port | ||
}, common.mustCall((res) => { | ||
const resume = () => res.resume(); | ||
let receivedBufferLength = 0; | ||
let firstReceivedAt; | ||
res.on('data', common.mustCallAtLeast((buf) => { | ||
if (receivedBufferLength === 0) { | ||
currentWriteSize = Math.max( | ||
minReadSize, | ||
writeSize - serverConnectionHandle.writeQueueSize | ||
); | ||
didReceiveData = false; | ||
firstReceivedAt = Date.now(); | ||
} | ||
receivedBufferLength += buf.length; | ||
if (receivedBufferLength >= currentWriteSize) { | ||
didReceiveData = true; | ||
writeSize = serverConnectionHandle.writeQueueSize; | ||
receivedBufferLength = 0; | ||
res.pause(); | ||
setTimeout( | ||
resume, | ||
serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could you comment on why we need this formula to calculate the throttle time? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We want to always wait after the |
||
); | ||
offsetTimeout = 0; | ||
} | ||
}, 1)); | ||
res.on('end', common.mustCall(() => { | ||
server.close(); | ||
})); | ||
})); | ||
})); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why unref? That doesn't quite match what I understood when I read "suppress the timeout"?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It'll call
_unrefActive
so that it can come back and check this condition again in whatever the_idleTimeout
is, if need be. Possible the comment is confusing but I'm obviously familiar with what it does so I'm a bad judge.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So … I might be wrong here, but judging from the naming I’d think
_unrefActive
only affects whether the timer handle is ref()ed or not, i.e. whether it is keeping the process alive or not … does that sound about right?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sorry, missed the notification —
_unrefActive
re-schedules a timer for a timer that's a part of theunrefedLists
.node/lib/timers.js
Lines 141 to 151 in 686e092
Not sure if that's a great explanation... ? Let me know if it isn't.
(Admittedly the name
_unrefTimer
in net.js is rather misleading given that it's more like_activeTimer
but I didn't name it... :()Also, let me know once you're done reviewing and if all looks good. Planning to merge this weekend.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah, don’t feel like you need to wait on anything from me! There’s nothing obviously wrong in this PR.
And yes, this should just be renamed…
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
👍 Just being extra safe since this will go into LTS and is a fairly major change to something that's been broken for quite a while. :)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a specific time frame? You can just add a
baking-for-lts
label to indicate that the releaser (likely @gibfahn) should let this sit a while before pulling it in, if you would feel more comfortable with that.(On the other hand, if you feel comfortable not waiting any longer than usual, feel free to voice that as well!)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Since it's a reasonably serious bug fix, it hasn't broken CITGM and no one else has expressed desire for having it bake, I think having it go in at the next opportunity should be fine. Thanks again for reviewing ❤️