diff --git a/packages/bun-usockets/src/bsd.c b/packages/bun-usockets/src/bsd.c index d2fa315e62ae6e..fbf36dd525f518 100644 --- a/packages/bun-usockets/src/bsd.c +++ b/packages/bun-usockets/src/bsd.c @@ -623,18 +623,34 @@ inline __attribute__((always_inline)) LIBUS_SOCKET_DESCRIPTOR bsd_bind_listen_fd setsockopt(listenFd, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (void *) &optval2, sizeof(optval2)); #endif } else { - #if defined(SO_REUSEPORT) - int optval2 = 1; - setsockopt(listenFd, SOL_SOCKET, SO_REUSEPORT, (void *) &optval2, sizeof(optval2)); - #endif +#if defined(SO_REUSEPORT) + if((options & LIBUS_LISTEN_REUSE_PORT)) { + int optval2 = 1; + setsockopt(listenFd, SOL_SOCKET, SO_REUSEPORT, (void *) &optval2, sizeof(optval2)); + } +#endif } #if defined(SO_REUSEADDR) + #ifndef _WIN32 + + // Unlike on Unix, here we don't set SO_REUSEADDR, because it doesn't just + // allow binding to addresses that are in use by sockets in TIME_WAIT, it + // effectively allows 'stealing' a port which is in use by another application. + // See libuv issue #1360. + + int optval3 = 1; setsockopt(listenFd, SOL_SOCKET, SO_REUSEADDR, (void *) &optval3, sizeof(optval3)); + #endif #endif #ifdef IPV6_V6ONLY + // TODO: revise support to match node.js + // if (listenAddr->ai_family == AF_INET6) { + // int disabled = (options & LIBUS_SOCKET_IPV6_ONLY) != 0; + // setsockopt(listenFd, IPPROTO_IPV6, IPV6_V6ONLY, (void *) &disabled, sizeof(disabled)); + // } int disabled = 0; setsockopt(listenFd, IPPROTO_IPV6, IPV6_V6ONLY, (void *) &disabled, sizeof(disabled)); #endif diff --git a/packages/bun-usockets/src/crypto/openssl.c b/packages/bun-usockets/src/crypto/openssl.c index 4c4c2a76d57d6c..e5f7e55ffcef8a 100644 --- a/packages/bun-usockets/src/crypto/openssl.c +++ b/packages/bun-usockets/src/crypto/openssl.c @@ -1858,9 +1858,6 @@ ssl_wrapped_context_on_close(struct us_internal_ssl_socket_t *s, int code, (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); - if (wrapped_context->events.on_close) { - wrapped_context->events.on_close((struct us_socket_t *)s, code, reason); - } // writting here can cause the context to not be writable anymore but its the // user responsability to check for that @@ -1868,6 +1865,10 @@ ssl_wrapped_context_on_close(struct us_internal_ssl_socket_t *s, int code, wrapped_context->old_events.on_close((struct us_socket_t *)s, code, reason); } + if (wrapped_context->events.on_close) { + wrapped_context->events.on_close((struct us_socket_t *)s, code, reason); + } + us_socket_context_unref(0, wrapped_context->tcp_context); return s; } @@ -1880,9 +1881,6 @@ ssl_wrapped_context_on_writable(struct us_internal_ssl_socket_t *s) { (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); - if (wrapped_context->events.on_writable) { - wrapped_context->events.on_writable((struct us_socket_t *)s); - } // writting here can cause the context to not be writable anymore but its the // user responsability to check for that @@ -1890,6 +1888,10 @@ ssl_wrapped_context_on_writable(struct us_internal_ssl_socket_t *s) { wrapped_context->old_events.on_writable((struct us_socket_t *)s); } + if (wrapped_context->events.on_writable) { + wrapped_context->events.on_writable((struct us_socket_t *)s); + } + return s; } @@ -1916,14 +1918,14 @@ ssl_wrapped_context_on_timeout(struct us_internal_ssl_socket_t *s) { struct us_wrapped_socket_context_t *wrapped_context = (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); + if (wrapped_context->old_events.on_timeout) { + wrapped_context->old_events.on_timeout((struct us_socket_t *)s); + } if (wrapped_context->events.on_timeout) { wrapped_context->events.on_timeout((struct us_socket_t *)s); } - if (wrapped_context->old_events.on_timeout) { - wrapped_context->old_events.on_timeout((struct us_socket_t *)s); - } return s; } @@ -1935,15 +1937,14 @@ ssl_wrapped_context_on_long_timeout(struct us_internal_ssl_socket_t *s) { struct us_wrapped_socket_context_t *wrapped_context = (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); + if (wrapped_context->old_events.on_long_timeout) { + wrapped_context->old_events.on_long_timeout((struct us_socket_t *)s); + } if (wrapped_context->events.on_long_timeout) { wrapped_context->events.on_long_timeout((struct us_socket_t *)s); } - if (wrapped_context->old_events.on_long_timeout) { - wrapped_context->old_events.on_long_timeout((struct us_socket_t *)s); - } - return s; } @@ -1954,14 +1955,13 @@ ssl_wrapped_context_on_end(struct us_internal_ssl_socket_t *s) { struct us_wrapped_socket_context_t *wrapped_context = (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); - - if (wrapped_context->events.on_end) { - wrapped_context->events.on_end((struct us_socket_t *)s); - } - if (wrapped_context->old_events.on_end) { wrapped_context->old_events.on_end((struct us_socket_t *)s); } + if (wrapped_context->events.on_end) { + wrapped_context->events.on_end((struct us_socket_t *)s); + } + return s; } @@ -1973,13 +1973,13 @@ ssl_wrapped_on_connect_error(struct us_internal_ssl_socket_t *s, int code) { (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); + if (wrapped_context->old_events.on_connect_error) { + wrapped_context->old_events.on_connect_error((struct us_connecting_socket_t *)s, code); + } if (wrapped_context->events.on_connect_error) { wrapped_context->events.on_connect_error((struct us_connecting_socket_t *)s, code); } - if (wrapped_context->old_events.on_connect_error) { - wrapped_context->old_events.on_connect_error((struct us_connecting_socket_t *)s, code); - } return s; } @@ -1990,14 +1990,14 @@ ssl_wrapped_on_socket_connect_error(struct us_internal_ssl_socket_t *s, int code struct us_wrapped_socket_context_t *wrapped_context = (struct us_wrapped_socket_context_t *)us_internal_ssl_socket_context_ext( context); - + if (wrapped_context->old_events.on_connecting_socket_error) { + wrapped_context->old_events.on_connecting_socket_error((struct us_socket_t *)s, code); + } if (wrapped_context->events.on_connecting_socket_error) { wrapped_context->events.on_connecting_socket_error((struct us_socket_t *)s, code); } - if (wrapped_context->old_events.on_connecting_socket_error) { - wrapped_context->old_events.on_connecting_socket_error((struct us_socket_t *)s, code); - } + return s; } diff --git a/packages/bun-usockets/src/libusockets.h b/packages/bun-usockets/src/libusockets.h index c32768fc2d4fdf..cf34618f853076 100644 --- a/packages/bun-usockets/src/libusockets.h +++ b/packages/bun-usockets/src/libusockets.h @@ -96,6 +96,10 @@ enum { LIBUS_LISTEN_EXCLUSIVE_PORT = 1, /* Allow socket to keep writing after readable side closes */ LIBUS_SOCKET_ALLOW_HALF_OPEN = 2, + /* Setting reusePort allows multiple sockets on the same host to bind to the same port. Incoming connections are distributed by the operating system to listening sockets. This option is available only on some platforms, such as Linux 3.9+, DragonFlyBSD 3.6+, FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+*/ + LIBUS_LISTEN_REUSE_PORT = 4, + /* etting ipv6Only will disable dual-stack support, i.e., binding to host :: won't make 0.0.0.0 be bound.*/ + LIBUS_SOCKET_IPV6_ONLY = 8, }; /* Library types publicly available */ diff --git a/packages/bun-usockets/src/loop.c b/packages/bun-usockets/src/loop.c index 581c9bb91798fe..19bb4fe9ea8167 100644 --- a/packages/bun-usockets/src/loop.c +++ b/packages/bun-usockets/src/loop.c @@ -22,6 +22,7 @@ #include #endif + /* The loop has 2 fallthrough polls */ void us_internal_loop_data_init(struct us_loop_t *loop, void (*wakeup_cb)(struct us_loop_t *loop), void (*pre_cb)(struct us_loop_t *loop), void (*post_cb)(struct us_loop_t *loop)) { diff --git a/src/bun.js/api/bun/socket.zig b/src/bun.js/api/bun/socket.zig index e87d82dd1e19e2..7d1b2b79be04b0 100644 --- a/src/bun.js/api/bun/socket.zig +++ b/src/bun.js/api/bun/socket.zig @@ -310,6 +310,8 @@ pub const SocketConfig = struct { default_data: JSC.JSValue = .zero, exclusive: bool = false, allowHalfOpen: bool = false, + reusePort: bool = false, + ipv6Only: bool = false, pub fn fromJS(vm: *JSC.VirtualMachine, opts: JSC.JSValue, globalObject: *JSC.JSGlobalObject) bun.JSError!SocketConfig { var hostname_or_unix: JSC.ZigString.Slice = JSC.ZigString.Slice.empty; @@ -317,6 +319,8 @@ pub const SocketConfig = struct { var port: ?u16 = null; var exclusive = false; var allowHalfOpen = false; + var reusePort = false; + var ipv6Only = false; var ssl: ?JSC.API.ServerConfig.SSLConfig = null; var default_data = JSValue.zero; @@ -365,11 +369,19 @@ pub const SocketConfig = struct { } } - if (try opts.getTruthy(globalObject, "exclusive")) |_| { - exclusive = true; + if (try opts.getBooleanLoose(globalObject, "exclusive")) |exclusive_| { + exclusive = exclusive_; } - if (try opts.getTruthy(globalObject, "allowHalfOpen")) |_| { - allowHalfOpen = true; + if (try opts.getBooleanLoose(globalObject, "allowHalfOpen")) |allow_half_open| { + allowHalfOpen = allow_half_open; + } + + if (try opts.getBooleanLoose(globalObject, "reusePort")) |reuse_port| { + reusePort = reuse_port; + } + + if (try opts.getBooleanLoose(globalObject, "ipv6Only")) |ipv6_only| { + ipv6Only = ipv6_only; } if (try opts.getStringish(globalObject, "hostname") orelse try opts.getStringish(globalObject, "host")) |hostname| { @@ -437,6 +449,8 @@ pub const SocketConfig = struct { .default_data = default_data, .exclusive = exclusive, .allowHalfOpen = allowHalfOpen, + .reusePort = reusePort, + .ipv6Only = ipv6Only, }; } }; @@ -603,10 +617,13 @@ pub const Listener = struct { const ssl_enabled = ssl != null; - var socket_flags: i32 = if (exclusive) uws.LIBUS_LISTEN_EXCLUSIVE_PORT else uws.LIBUS_LISTEN_DEFAULT; + var socket_flags: i32 = if (exclusive) uws.LIBUS_LISTEN_EXCLUSIVE_PORT else (if (socket_config.reusePort) uws.LIBUS_SOCKET_REUSE_PORT else uws.LIBUS_LISTEN_DEFAULT); if (socket_config.allowHalfOpen) { socket_flags |= uws.LIBUS_SOCKET_ALLOW_HALF_OPEN; } + if (socket_config.ipv6Only) { + socket_flags |= uws.LIBUS_SOCKET_IPV6_ONLY; + } defer if (ssl != null) ssl.?.deinit(); if (Environment.isWindows) { @@ -2174,6 +2191,8 @@ fn NewSocket(comptime ssl: bool) type { } const args = callframe.argumentsUndef(2); + this.ref(); + defer this.deref(); return switch (this.writeOrEndBuffered(globalObject, args.ptr[0], args.ptr[1], false)) { .fail => .zero, @@ -2469,7 +2488,6 @@ fn NewSocket(comptime ssl: bool) type { ) bun.JSError!JSValue { JSC.markBinding(@src()); const args = callframe.arguments_old(1); - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); if (args.len > 0 and args.ptr[0].toBoolean()) { this.socket.shutdownRead(); } else { @@ -2494,6 +2512,9 @@ fn NewSocket(comptime ssl: bool) type { return JSValue.jsNumber(@as(i32, -1)); } + this.ref(); + defer this.deref(); + return switch (this.writeOrEnd(globalObject, args.mut(), false, true)) { .fail => .zero, .success => |result| brk: { @@ -3488,7 +3509,8 @@ fn NewSocket(comptime ssl: bool) type { TLSSocket.dataSetCached(tls_js_value, globalObject, default_data); tls.socket = new_socket; - tls.socket_context = new_socket.context(); // owns the new tls context that have a ref from the old one + const new_context = new_socket.context().?; + tls.socket_context = new_context; // owns the new tls context that have a ref from the old one tls.ref(); const vm = handlers.vm; @@ -3518,7 +3540,7 @@ fn NewSocket(comptime ssl: bool) type { .connection = if (this.connection) |c| c.clone() else null, .wrapped = .tcp, .protos = null, - .socket_context = null, // raw socket will dont own the context + .socket_context = new_context.ref(true), }); raw.ref(); @@ -3637,7 +3659,7 @@ pub fn NewWrappedHandler(comptime tls: bool) type { if (comptime tls) { TLSSocket.onData(this.tls, socket, data); } else { - // tedius use this + // tedius use this (tedius is a pure-javascript implementation of TDS protocol used to interact with instances of Microsoft's SQL Server) TLSSocket.onData(this.tcp, socket, data); } } diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index 2946d6a5143116..a42b2106fbbf51 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -7227,7 +7227,8 @@ pub fn NewServer(comptime NamespaceType: type, comptime ssl_enabled_: bool, comp app.listenWithConfig(*ThisServer, this, onListen, .{ .port = tcp.port, .host = host, - .options = if (this.config.reuse_port) 0 else 1, + // IPV6_ONLY is the default for bun, different from node it also set exclusive port in case reuse port is not set + .options = (if (this.config.reuse_port) uws.LIBUS_SOCKET_REUSE_PORT else uws.LIBUS_LISTEN_EXCLUSIVE_PORT) | uws.LIBUS_SOCKET_IPV6_ONLY, }); }, @@ -7237,7 +7238,8 @@ pub fn NewServer(comptime NamespaceType: type, comptime ssl_enabled_: bool, comp this, onListen, unix, - if (this.config.reuse_port) 0 else 1, + // IPV6_ONLY is the default for bun, different from node it also set exclusive port in case reuse port is not set + (if (this.config.reuse_port) uws.LIBUS_SOCKET_REUSE_PORT else uws.LIBUS_LISTEN_EXCLUSIVE_PORT) | uws.LIBUS_SOCKET_IPV6_ONLY, ); }, } diff --git a/src/deps/uws.zig b/src/deps/uws.zig index ed122a3e7ed86c..183bbacfb1a2cf 100644 --- a/src/deps/uws.zig +++ b/src/deps/uws.zig @@ -10,6 +10,9 @@ pub const u_int64_t = c_ulonglong; pub const LIBUS_LISTEN_DEFAULT: i32 = 0; pub const LIBUS_LISTEN_EXCLUSIVE_PORT: i32 = 1; pub const LIBUS_SOCKET_ALLOW_HALF_OPEN: i32 = 2; +pub const LIBUS_SOCKET_REUSE_PORT: i32 = 4; +pub const LIBUS_SOCKET_IPV6_ONLY: i32 = 8; + pub const Socket = opaque { pub fn write2(this: *Socket, first: []const u8, second: []const u8) i32 { const rc = us_socket_write2(0, this, first.ptr, first.len, second.ptr, second.len); @@ -2356,6 +2359,11 @@ pub const SocketContext = opaque { us_socket_context_free(@as(i32, 0), this); } + pub fn ref(this: *SocketContext, comptime ssl: bool) *SocketContext { + us_socket_context_ref(@intFromBool(ssl), this); + return this; + } + pub fn cleanCallbacks(ctx: *SocketContext, is_ssl: bool) void { const ssl_int: i32 = @intFromBool(is_ssl); // replace callbacks with dummy ones diff --git a/src/js/node/net.ts b/src/js/node/net.ts index c9b8ec5de5f6e2..c06c476816c0d6 100644 --- a/src/js/node/net.ts +++ b/src/js/node/net.ts @@ -243,6 +243,7 @@ const Socket = (function (InternalSocket) { static #End(socket) { const self = socket.data; if (!self) return; + // we just reuse the same code but we can push null or enqueue right away Socket.#EmitEndNT(self); } @@ -798,10 +799,20 @@ const Socket = (function (InternalSocket) { return this; } + end(...args) { + if (!this._readableState.endEmitted) { + this.secureConnecting = false; + } + return super.end(...args); + } + _destroy(err, callback) { this.connecting = false; - const { ending } = this._writableState; + if (!err && this.secureConnecting && !this.isServer) { + this.secureConnecting = false; + err = new ConnResetException("Client network socket disconnected before secure TLS connection was established"); + } // lets make sure that the writable side is closed if (!ending) { // at this state destroyed will be true but we need to close the writable side @@ -900,7 +911,15 @@ const Socket = (function (InternalSocket) { } resetAndDestroy() { - this._handle?.end(); + if (this._handle) { + if (this.connecting) { + this.once("connect", () => this._handle?.terminate()); + } else { + this._handle.terminate(); + } + } else { + this.destroy($ERR_SOCKET_CLOSED_BEFORE_CONNECTION("ERR_SOCKET_CLOSED_BEFORE_CONNECTION")); + } } setKeepAlive(enable = false, initialDelayMsecs = 0) { @@ -1171,6 +1190,9 @@ class Server extends EventEmitter { let backlog; let path; let exclusive = false; + let allowHalfOpen = false; + let reusePort = false; + let ipv6Only = false; //port is actually path if (typeof port === "string") { if (Number.isSafeInteger(hostname)) { @@ -1200,13 +1222,15 @@ class Server extends EventEmitter { options.signal?.addEventListener("abort", () => this.close()); hostname = options.host; - exclusive = options.exclusive === true; + exclusive = options.exclusive; path = options.path; port = options.port; + ipv6Only = options.ipv6Only; + allowHalfOpen = options.allowHalfOpen; + reusePort = options.reusePort; const isLinux = process.platform === "linux"; - if (!Number.isSafeInteger(port) || port < 0) { if (path) { const isAbstractPath = path.startsWith("\0"); @@ -1277,6 +1301,9 @@ class Server extends EventEmitter { backlog, undefined, exclusive, + ipv6Only, + allowHalfOpen, + reusePort, undefined, undefined, path, @@ -1291,12 +1318,15 @@ class Server extends EventEmitter { return this; } - [kRealListen](path, port, hostname, exclusive, tls, contexts, onListen) { + [kRealListen](path, port, hostname, exclusive, ipv6Only, allowHalfOpen, reusePort, tls, contexts, onListen) { if (path) { this._handle = Bun.listen({ unix: path, tls, - allowHalfOpen: this[bunSocketServerOptions]?.allowHalfOpen || false, + allowHalfOpen: allowHalfOpen || this[bunSocketServerOptions]?.allowHalfOpen || false, + reusePort: reusePort || this[bunSocketServerOptions]?.reusePort || false, + ipv6Only: ipv6Only || this[bunSocketServerOptions]?.ipv6Only || false, + exclusive: exclusive || this[bunSocketServerOptions]?.exclusive || false, socket: SocketClass[bunSocketServerHandlers], }); } else { @@ -1305,7 +1335,10 @@ class Server extends EventEmitter { port, hostname, tls, - allowHalfOpen: this[bunSocketServerOptions]?.allowHalfOpen || false, + allowHalfOpen: allowHalfOpen || this[bunSocketServerOptions]?.allowHalfOpen || false, + reusePort: reusePort || this[bunSocketServerOptions]?.reusePort || false, + ipv6Only: ipv6Only || this[bunSocketServerOptions]?.ipv6Only || false, + exclusive: exclusive || this[bunSocketServerOptions]?.exclusive || false, socket: SocketClass[bunSocketServerHandlers], }); } @@ -1343,6 +1376,16 @@ function emitErrorAndCloseNextTick(self, error) { self.emit("error", error); self.emit("close"); } +class ConnResetException extends Error { + constructor(msg) { + super(msg); + this.code = "ECONNRESET"; + } + + get ["constructor"]() { + return Error; + } +} function emitListeningNextTick(self, onListen) { if (typeof onListen === "function") { @@ -1364,6 +1407,9 @@ function listenInCluster( backlog, fd, exclusive, + ipv6Only, + allowHalfOpen, + reusePort, flags, options, path, @@ -1377,7 +1423,7 @@ function listenInCluster( if (cluster === undefined) cluster = require("node:cluster"); if (cluster.isPrimary || exclusive) { - server[kRealListen](path, port, hostname, exclusive, tls, contexts, onListen); + server[kRealListen](path, port, hostname, exclusive, ipv6Only, allowHalfOpen, reusePort, tls, contexts, onListen); return; } @@ -1395,7 +1441,7 @@ function listenInCluster( if (err) { throw new ExceptionWithHostPort(err, "bind", address, port); } - server[kRealListen](path, port, hostname, exclusive, tls, contexts, onListen); + server[kRealListen](path, port, hostname, exclusive, ipv6Only, allowHalfOpen, reusePort, tls, contexts, onListen); }); } diff --git a/test/js/node/net/node-net-allowHalfOpen.test.js b/test/js/node/net/node-net-allowHalfOpen.test.js new file mode 100644 index 00000000000000..3485bdc37b7ca0 --- /dev/null +++ b/test/js/node/net/node-net-allowHalfOpen.test.js @@ -0,0 +1,115 @@ +import net from "node:net"; +import { tempDirWithFiles, nodeExe } from "harness"; +import { expect, test } from "bun:test"; + +async function nodeRun(callback, clients = 1) { + const cwd = tempDirWithFiles("server", { + "index.mjs": ` + import net from "node:net"; + let clients = ${clients}; + const server = net.createServer({ allowHalfOpen: true }, socket => { + // Listen for data from the client + socket.on("data", data => { + console.log(data.toString()); + }); + + socket.on("end", () => { + console.log("Received FIN"); + if(--clients == 0) { + server.close(); + } + }); + socket.on("error", console.error); + + // start sending FIN + socket.end(); + }); + server.listen(0, "127.0.0.1", ()=> { + console.log(server.address().port?.toString()); + }) + `, + }); + const process = Bun.spawn([nodeExe(), "index.mjs"], { + cwd, + stdin: "ignore", + stdout: "pipe", + stderr: "pipe", + }); + + const reader = process.stdout.getReader(); + let continueReading = true; + let stdout = ""; + let port = 0; + do { + const { done, value } = await reader.read(); + + continueReading = !done; + const decoder = new TextDecoder(); + if (value) { + if (!port) { + port = parseInt(decoder.decode(value), 10); + callback(port); + } else { + stdout += decoder.decode(value); + } + } + } while (continueReading); + + return { + stdout, + stderr: (await Bun.readableStreamToText(process.stderr)).trim(), + code: await process.exited, + }; +} + +async function doHalfOpenRequest(port, allowHalfOpen) { + const { promise, resolve, reject } = Promise.withResolvers(); + + const client = net.connect({ host: "127.0.0.1", port, allowHalfOpen }, () => { + client.write("Hello, World"); + }); + client.on("error", reject); + client.on("close", resolve); + client.on("end", () => { + // delay the write response + setTimeout(() => { + client.write("Write after end"); + client.end(); + }, 10); + }); + await promise; +} + +test("allowHalfOpen: true should work on client-side", async () => { + const { promise: portPromise, resolve } = Promise.withResolvers(); + const process = nodeRun(resolve, 1); + + const port = await portPromise; + await doHalfOpenRequest(port, true); + const result = await process; + expect(result.code).toBe(0); + expect(result.stderr).toBe(""); + expect( + result.stdout + .split("\n") + .map(s => s.trim()) + .filter(s => s), + ).toEqual(["Hello, World", "Write after end", "Received FIN"]); +}); + +test("allowHalfOpen: false should work on client-side", async () => { + const { promise: portPromise, resolve } = Promise.withResolvers(); + const process = nodeRun(resolve, 1); + + const port = await portPromise; + await doHalfOpenRequest(port, false); + const result = await process; + expect(result.code).toBe(0); + expect(result.stderr).toBe(""); + expect( + result.stdout + .split("\n") + .map(s => s.trim()) + .filter(s => s), + ).toEqual(["Hello, World", "Received FIN"]); +}); diff --git a/test/js/node/test/parallel/net-bind-twice-exclusive.test.js b/test/js/node/test/parallel/net-bind-twice-exclusive.test.js new file mode 100644 index 00000000000000..98548784794d63 --- /dev/null +++ b/test/js/node/test/parallel/net-bind-twice-exclusive.test.js @@ -0,0 +1,39 @@ +//#FILE: test-net-bind-twice.js +//#SHA1: 432eb9529d0affc39c8af9ebc1147528d96305c9 +//----------------- +"use strict"; +const net = require("net"); + +test("net.Server should not allow binding to the same port twice", done => { + const server1 = net.createServer(() => { + throw new Error("Server1 should not receive connections"); + }); + + server1.listen( + { + exclusive: true, + port: 0, + host: "127.0.0.1", + }, + () => { + const server2 = net.createServer(() => { + throw new Error("Server2 should not receive connections"); + }); + + const port = server1.address().port; + server2.listen(port, "127.0.0.1", () => { + throw new Error("Server2 should not be able to listen"); + }); + + server2.on("error", e => { + console.error(e); + expect(e.code).toBe("EADDRINUSE"); + server1.close(() => { + done(); + }); + }); + }, + ); +}); + +//<#END_FILE: test-net-bind-twice.js diff --git a/test/js/node/test/parallel/net-bind-twice-reuseport.test.js b/test/js/node/test/parallel/net-bind-twice-reuseport.test.js new file mode 100644 index 00000000000000..55067ce9d6ab6c --- /dev/null +++ b/test/js/node/test/parallel/net-bind-twice-reuseport.test.js @@ -0,0 +1,42 @@ +//#FILE: test-net-bind-twice.js +//#SHA1: 432eb9529d0affc39c8af9ebc1147528d96305c9 +//----------------- +"use strict"; + +import { test } from "bun:test"; +import net from "node:net"; +import { isWindows } from "harness"; + +test.skipIf(isWindows)("net.Server should not allow binding to the same port twice", done => { + const server1 = net.createServer(() => { + throw new Error("Server1 should not receive connections"); + }); + + const options = { + reusePort: true, + port: 0, + host: "127.0.0.1", + }; + server1.listen(options, () => { + const server2 = net.createServer(() => { + throw new Error("Server2 should not receive connections"); + }); + + const port = server1.address().port; + server2.listen({ ...options, port }, () => { + server2.close(() => { + server1.close(() => { + done(); + }); + }); + }); + + server2.on("error", e => { + server1.close(() => { + done(e); + }); + }); + }); +}); + +//<#END_FILE: test-net-bind-twice.js diff --git a/test/js/node/test/parallel/net-bind-twice.test.js b/test/js/node/test/parallel/net-bind-twice.test.js index de2b9428ca8d91..56454d2aabf525 100644 --- a/test/js/node/test/parallel/net-bind-twice.test.js +++ b/test/js/node/test/parallel/net-bind-twice.test.js @@ -26,6 +26,6 @@ test("net.Server should not allow binding to the same port twice", done => { }); }); }); -}, 100000); +}); //<#END_FILE: test-net-bind-twice.js diff --git a/test/js/node/test/parallel/net-server-try-ports.test.js b/test/js/node/test/parallel/net-server-try-ports.test.js new file mode 100644 index 00000000000000..67668a69bce154 --- /dev/null +++ b/test/js/node/test/parallel/net-server-try-ports.test.js @@ -0,0 +1,28 @@ +//#FILE: test-net-server-try-ports.js +//#SHA1: 8f3f2a7c0fcc9b76f2aaf8ac2bb00c81e6a752fa +//----------------- +"use strict"; + +const net = require("net"); + +test("Server should handle EADDRINUSE and bind to another port", done => { + const server1 = new net.Server(); + const server2 = new net.Server(); + + server2.on("error", e => { + expect(e.code).toBe("EADDRINUSE"); + + server2.listen(0, () => { + server1.close(); + server2.close(); + done(); + }); + }); + + server1.listen(0, () => { + // This should make server2 emit EADDRINUSE + server2.listen(server1.address().port); + }); +}); + +//<#END_FILE: test-net-server-try-ports.js diff --git a/test/js/node/test/parallel/tls-zero-clear-in.test.js b/test/js/node/test/parallel/tls-zero-clear-in.test.js index 26283f4311d925..4b254bdf3c8eed 100644 --- a/test/js/node/test/parallel/tls-zero-clear-in.test.js +++ b/test/js/node/test/parallel/tls-zero-clear-in.test.js @@ -68,7 +68,7 @@ test("SSL_write() call with 0 bytes should not be treated as error", done => { // treated as error. conn.end(""); - conn.on("error", () => { + conn.on("error", err => { done(new Error("Unexpected error event")); }); diff --git a/test/js/node/tls/node-tls-connect.test.ts b/test/js/node/tls/node-tls-connect.test.ts index 8ecbb11bb65a2c..0852df396e9b4d 100644 --- a/test/js/node/tls/node-tls-connect.test.ts +++ b/test/js/node/tls/node-tls-connect.test.ts @@ -4,8 +4,10 @@ import net from "net"; import { join } from "path"; import tls, { checkServerIdentity, connect as tlsConnect, TLSSocket } from "tls"; import stream from "stream"; +import { once } from "events"; import { Duplex } from "node:stream"; +import type { AddressInfo } from "net"; const symbolConnectOptions = Symbol.for("::buntlsconnectoptions::"); @@ -117,6 +119,22 @@ it("should have checkServerIdentity", async () => { expect(checkServerIdentity).toBeFunction(); expect(tls.checkServerIdentity).toBeFunction(); }); + +it("should thow ECONNRESET if FIN is received before handshake", async () => { + await using server = net.createServer(c => { + c.end(); + }); + await once(server.listen(0, "127.0.0.1"), "listening"); + const { promise, resolve } = Promise.withResolvers(); + tls.connect((server.address() as AddressInfo).port).on("error", resolve); + + const error = await promise; + + expect(error).toBeDefined(); + // TODO: today we are a little incompatible with node.js we need to change `UNABLE_TO_GET_ISSUER_CERT` when closed before handshake complete on the openssl.c to emit error `ECONNRESET` instead of SSL fail, + // current behavior is not wrong because is the right error but is incompatible with node.js + expect((error as Error).code as string).toBeOneOf(["ECONNRESET", "UNABLE_TO_GET_ISSUER_CERT"]); +}); it("should be able to grab the JSStreamSocket constructor", () => { // this keep http2-wrapper compatibility with node.js const socket = new tls.TLSSocket(new stream.PassThrough());