diff --git a/.github/workflows/bun-release-test.yml b/.github/workflows/bun-release-test.yml index c331c47e93bcff..2af8e2d0afc745 100644 --- a/.github/workflows/bun-release-test.yml +++ b/.github/workflows/bun-release-test.yml @@ -28,7 +28,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: Setup Bun - uses: oven-sh/setup-bun@v1 + uses: ./.github/actions/setup-bun with: bun-version: "1.1.0" - name: Setup Node diff --git a/.github/workflows/bun-release.yml b/.github/workflows/bun-release.yml index 89ca85073705c8..b523388e4e189b 100644 --- a/.github/workflows/bun-release.yml +++ b/.github/workflows/bun-release.yml @@ -58,7 +58,7 @@ jobs: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.GPG_PASSPHRASE }} - name: Setup Bun - uses: oven-sh/setup-bun@v1 + uses: ./.github/actions/setup-bun with: bun-version: "1.0.21" - name: Install Dependencies @@ -83,7 +83,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: Setup Bun - uses: oven-sh/setup-bun@v1 + uses: ./.github/actions/setup-bun with: bun-version: "1.0.21" - name: Install Dependencies @@ -112,12 +112,12 @@ jobs: node-version: latest - name: Setup Bun if: ${{ env.BUN_VERSION != 'canary' }} - uses: oven-sh/setup-bun@v1 + uses: ./.github/actions/setup-bun with: bun-version: "1.0.21" - name: Setup Bun if: ${{ env.BUN_VERSION == 'canary' }} - uses: oven-sh/setup-bun@v1 + uses: ./.github/actions/setup-bun with: bun-version: "canary" # Must be 'canary' so tag is correct - name: Install Dependencies @@ -254,7 +254,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: Setup Bun - uses: oven-sh/setup-bun@v1 + uses: ./.github/actions/setup-bun with: bun-version: "1.0.21" - name: Install Dependencies diff --git a/CMakeLists.txt b/CMakeLists.txt index eaf7b30e710cb9..7c20793ea28ae9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.22) cmake_policy(SET CMP0091 NEW) cmake_policy(SET CMP0067 NEW) -set(Bun_VERSION "1.1.3") +set(Bun_VERSION "1.1.4") set(WEBKIT_TAG e3a2d89a0b1644cc8d5c245bd2ffee4d4bd6c1d5) set(BUN_WORKDIR "${CMAKE_CURRENT_BINARY_DIR}") @@ -555,6 +555,7 @@ else() add_compile_definitions("BUN_DEBUG=1") set(ASSERT_ENABLED "1") endif() + message(STATUS "Using WebKit from ${WEBKIT_DIR}") else() if(NOT EXISTS "${WEBKIT_DIR}/lib/${libWTF}.${STATIC_LIB_EXT}" OR NOT EXISTS "${WEBKIT_DIR}/lib/${libJavaScriptCore}.${STATIC_LIB_EXT}") diff --git a/docs/api/sqlite.md b/docs/api/sqlite.md index ddc405fb02571d..04ab1bbf4d63d6 100644 --- a/docs/api/sqlite.md +++ b/docs/api/sqlite.md @@ -62,7 +62,7 @@ const db = new Database("mydb.sqlite", { create: true }); You can also use an import attribute to load a database. ```ts -import db from "./mydb.sqlite" with {"type": "sqlite"}; +import db from "./mydb.sqlite" with { "type": "sqlite" }; console.log(db.query("select * from users LIMIT 1").get()); ``` @@ -74,16 +74,39 @@ import { Database } from "bun:sqlite"; const db = new Database("./mydb.sqlite"); ``` -### `.close()` +### `.close(throwOnError: boolean = false)` -To close a database: +To close a database connection, but allow existing queries to finish, call `.close(false)`: ```ts const db = new Database(); -db.close(); +// ... do stuff +db.close(false); ``` -Note: `close()` is called automatically when the database is garbage collected. It is safe to call multiple times but has no effect after the first. +To close the database and throw an error if there are any pending queries, call `.close(true)`: + +```ts +const db = new Database(); +// ... do stuff +db.close(true); +``` + +Note: `close(false)` is called automatically when the database is garbage collected. It is safe to call multiple times but has no effect after the first. + +### `using` statement + +You can use the `using` statement to ensure that a database connection is closed when the `using` block is exited. + +```ts +import { Database } from "bun:sqlite"; + +{ + using db = new Database("mydb.sqlite"); + using query = db.query("select 'Hello world' as message;"); + console.log(query.get()); // => { message: "Hello world" } +} +``` ### `.serialize()` @@ -128,6 +151,8 @@ db.exec("PRAGMA journal_mode = WAL;"); {% details summary="What is WAL mode" %} In WAL mode, writes to the database are written directly to a separate file called the "WAL file" (write-ahead log). This file will be later integrated into the main database file. Think of it as a buffer for pending writes. Refer to the [SQLite docs](https://www.sqlite.org/wal.html) for a more detailed overview. + +On macOS, WAL files may be persistent by default. This is not a bug, it is how macOS configured the system version of SQLite. {% /details %} ## Statements @@ -387,6 +412,25 @@ db.loadExtension("myext"); {% /details %} +### .fileControl(cmd: number, value: any) + +To use the advanced `sqlite3_file_control` API, call `.fileControl(cmd, value)` on your `Database` instance. + +```ts +import { Database, constants } from "bun:sqlite"; + +const db = new Database(); +// Ensure WAL mode is NOT persistent +// this prevents wal files from lingering after the database is closed +db.fileControl(constants.SQLITE_FCNTL_PERSIST_WAL, 0); +``` + +`value` can be: + +- `number` +- `TypedArray` +- `undefined` or `null` + ## Reference ```ts diff --git a/docs/guides/ecosystem/neon-serverless-postgres.md b/docs/guides/ecosystem/neon-serverless-postgres.md index a51eb46d12e28d..0f5dcb9a42f906 100644 --- a/docs/guides/ecosystem/neon-serverless-postgres.md +++ b/docs/guides/ecosystem/neon-serverless-postgres.md @@ -41,10 +41,10 @@ console.log(rows[0].version); --- -Start the program using `bun run`. The Postgres version should be printed to the console. +Start the program using `bun ./index.ts`. The Postgres version should be printed to the console. ```sh -$ bun run index.ts +$ bun ./index.ts PostgreSQL 16.2 on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit ``` diff --git a/package.json b/package.json index 9478d4c95298cd..16b59e98faf454 100644 --- a/package.json +++ b/package.json @@ -26,6 +26,7 @@ "build:release": "cmake . -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-release && ninja -Cbuild-release", "build:debug-zig-release": "cmake . -DCMAKE_BUILD_TYPE=Release -DZIG_OPTIMIZE=Debug -GNinja -Bbuild-debug-zig-release && ninja -Cbuild-debug-zig-release", "build:safe": "cmake . -DZIG_OPTIMIZE=ReleaseSafe -DUSE_DEBUG_JSC=ON -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-safe && ninja -Cbuild-safe", + "build:windows": "cmake -B build -S . -G Ninja -DCMAKE_BUILD_TYPE=Debug && ninja -Cbuild", "typecheck": "tsc --noEmit && cd test && bun run typecheck", "fmt": "prettier --write --cache './{.vscode,src,test,bench,packages/{bun-types,bun-inspector-*,bun-vscode,bun-debug-adapter-protocol}}/**/*.{mjs,ts,tsx,js,jsx}'", "fmt:zig": "zig fmt src/*.zig src/*/*.zig src/*/*/*.zig src/*/*/*/*.zig", diff --git a/packages/bun-internal-test/src/banned.json b/packages/bun-internal-test/src/banned.json index 2689404af4eb87..d3df1c1a0772b0 100644 --- a/packages/bun-internal-test/src/banned.json +++ b/packages/bun-internal-test/src/banned.json @@ -1,3 +1,7 @@ { - "std.debug.assert": "Use bun.assert instead" + "std.debug.assert": "Use bun.assert instead", + "@import(\"root\").bun.": "Only import 'bun' once", + "std.mem.indexOfAny": "Use bun.strings.indexAny or bun.strings.indexAnyComptime", + "std.debug.print": "Don't let this be committed", + "": "" } diff --git a/packages/bun-internal-test/src/linter.ts b/packages/bun-internal-test/src/linter.ts index f6d82510db2bda..e42dfac28f41c2 100644 --- a/packages/bun-internal-test/src/linter.ts +++ b/packages/bun-internal-test/src/linter.ts @@ -16,8 +16,12 @@ const write = (text: string) => { report += text; }; for (const [banned, suggestion] of Object.entries(BANNED)) { + if (banned.length === 0) continue; // Run git grep to find occurrences of std.debug.assert in .zig files - let stdout = await $`git grep -n "${banned}" "src/**/**.zig"`.text(); + // .nothrow() is here since git will exit with non-zero if no matches are found. + let stdout = await $`git grep -n -F "${banned}" "src/**/**.zig" | grep -v -F '//' | grep -v -F bench` + .nothrow() + .text(); stdout = stdout.trim(); if (stdout.length === 0) continue; diff --git a/packages/bun-internal-test/src/runner.node.mjs b/packages/bun-internal-test/src/runner.node.mjs index 194771eec05e33..267bdef75a6a45 100644 --- a/packages/bun-internal-test/src/runner.node.mjs +++ b/packages/bun-internal-test/src/runner.node.mjs @@ -497,9 +497,17 @@ if (failing_tests.length) { report += "[Link to file](" + linkToGH(path) + ")\n\n"; report += `${reason}\n\n`; report += "```\n"; - report += output + + let failing_output = output .replace(/\x1b\[[0-9;]*m/g, "") .replace(/^::(group|endgroup|error|warning|set-output|add-matcher|remove-matcher).*$/gm, ""); + + if (failing_output.length > 1024 * 64) { + failing_output = failing_output.slice(0, 1024 * 64) + `\n\n[truncated output (length: ${failing_output.length})]`; + } + + report += failing_output; + report += "```\n\n"; } } diff --git a/packages/bun-types/bun.d.ts b/packages/bun-types/bun.d.ts index e2e48007883474..4a8758e58b9b17 100644 --- a/packages/bun-types/bun.d.ts +++ b/packages/bun-types/bun.d.ts @@ -4145,6 +4145,11 @@ declare module "bun" { */ windowsHide?: boolean; + /** + * If true, no quoting or escaping of arguments is done on Windows. + */ + windowsVerbatimArguments?: boolean; + /** * Path to the executable to run in the subprocess. This defaults to `cmds[0]`. * diff --git a/packages/bun-types/sqlite.d.ts b/packages/bun-types/sqlite.d.ts index f0ef907e82a473..1f2fd7ff5db1bd 100644 --- a/packages/bun-types/sqlite.d.ts +++ b/packages/bun-types/sqlite.d.ts @@ -24,7 +24,7 @@ * | `null` | `NULL` | */ declare module "bun:sqlite" { - export class Database { + export class Database implements Disposable { /** * Open or create a SQLite3 database * @@ -257,7 +257,20 @@ declare module "bun:sqlite" { * * Internally, this calls `sqlite3_close_v2`. */ - close(): void; + close( + /** + * If `true`, then the database will throw an error if it is in use + * @default false + * + * When true, this calls `sqlite3_close` instead of `sqlite3_close_v2`. + * + * Learn more about this in the [sqlite3 documentation](https://www.sqlite.org/c3ref/close.html). + * + * Bun will automatically call close by default when the database instance is garbage collected. + * In The future, Bun may default `throwOnError` to be true but for backwards compatibility, it is false by default. + */ + throwOnError?: boolean, + ): void; /** * The filename passed when `new Database()` was called @@ -304,6 +317,8 @@ declare module "bun:sqlite" { */ static setCustomSQLite(path: string): boolean; + [Symbol.dispose](): void; + /** * Creates a function that always runs inside a transaction. When the * function is invoked, it will begin a new transaction. When the function @@ -427,6 +442,17 @@ declare module "bun:sqlite" { * ``` */ static deserialize(serialized: NodeJS.TypedArray | ArrayBufferLike, isReadOnly?: boolean): Database; + + /** + * See `sqlite3_file_control` for more information. + * @link https://www.sqlite.org/c3ref/file_control.html + */ + fileControl(op: number, arg?: ArrayBufferView | number): number; + /** + * See `sqlite3_file_control` for more information. + * @link https://www.sqlite.org/c3ref/file_control.html + */ + fileControl(zDbName: string, op: number, arg?: ArrayBufferView | number): number; } /** @@ -455,7 +481,7 @@ declare module "bun:sqlite" { * // => undefined * ``` */ - export class Statement { + export class Statement implements Disposable { /** * Creates a new prepared statement from native code. * @@ -633,6 +659,11 @@ declare module "bun:sqlite" { */ finalize(): void; + /** + * Calls {@link finalize} if it wasn't already called. + */ + [Symbol.dispose](): void; + /** * Return the expanded SQL string for the prepared statement. * @@ -766,6 +797,187 @@ declare module "bun:sqlite" { * @constant 0x04 */ SQLITE_PREPARE_NO_VTAB: number; + + /** + * @constant 1 + */ + SQLITE_FCNTL_LOCKSTATE: number; + /** + * @constant 2 + */ + SQLITE_FCNTL_GET_LOCKPROXYFILE: number; + /** + * @constant 3 + */ + SQLITE_FCNTL_SET_LOCKPROXYFILE: number; + /** + * @constant 4 + */ + SQLITE_FCNTL_LAST_ERRNO: number; + /** + * @constant 5 + */ + SQLITE_FCNTL_SIZE_HINT: number; + /** + * @constant 6 + */ + SQLITE_FCNTL_CHUNK_SIZE: number; + /** + * @constant 7 + */ + SQLITE_FCNTL_FILE_POINTER: number; + /** + * @constant 8 + */ + SQLITE_FCNTL_SYNC_OMITTED: number; + /** + * @constant 9 + */ + SQLITE_FCNTL_WIN32_AV_RETRY: number; + /** + * @constant 10 + * + * Control whether or not the WAL is persisted + * Some versions of macOS configure WAL to be persistent by default. + * + * You can change this with code like the below: + * ```ts + * import { Database } from "bun:sqlite"; + * + * const db = Database.open("mydb.sqlite"); + * db.fileControl(constants.SQLITE_FCNTL_PERSIST_WAL, 0); + * // enable WAL + * db.exec("PRAGMA journal_mode = WAL"); + * // .. do some work + * db.close(); + * ``` + * + */ + SQLITE_FCNTL_PERSIST_WAL: number; + /** + * @constant 11 + */ + SQLITE_FCNTL_OVERWRITE: number; + /** + * @constant 12 + */ + SQLITE_FCNTL_VFSNAME: number; + /** + * @constant 13 + */ + SQLITE_FCNTL_POWERSAFE_OVERWRITE: number; + /** + * @constant 14 + */ + SQLITE_FCNTL_PRAGMA: number; + /** + * @constant 15 + */ + SQLITE_FCNTL_BUSYHANDLER: number; + /** + * @constant 16 + */ + SQLITE_FCNTL_TEMPFILENAME: number; + /** + * @constant 18 + */ + SQLITE_FCNTL_MMAP_SIZE: number; + /** + * @constant 19 + */ + SQLITE_FCNTL_TRACE: number; + /** + * @constant 20 + */ + SQLITE_FCNTL_HAS_MOVED: number; + /** + * @constant 21 + */ + SQLITE_FCNTL_SYNC: number; + /** + * @constant 22 + */ + SQLITE_FCNTL_COMMIT_PHASETWO: number; + /** + * @constant 23 + */ + SQLITE_FCNTL_WIN32_SET_HANDLE: number; + /** + * @constant 24 + */ + SQLITE_FCNTL_WAL_BLOCK: number; + /** + * @constant 25 + */ + SQLITE_FCNTL_ZIPVFS: number; + /** + * @constant 26 + */ + SQLITE_FCNTL_RBU: number; + /** + * @constant 27 + */ + SQLITE_FCNTL_VFS_POINTER: number; + /** + * @constant 28 + */ + SQLITE_FCNTL_JOURNAL_POINTER: number; + /** + * @constant 29 + */ + SQLITE_FCNTL_WIN32_GET_HANDLE: number; + /** + * @constant 30 + */ + SQLITE_FCNTL_PDB: number; + /** + * @constant 31 + */ + SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: number; + /** + * @constant 32 + */ + SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: number; + /** + * @constant 33 + */ + SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: number; + /** + * @constant 34 + */ + SQLITE_FCNTL_LOCK_TIMEOUT: number; + /** + * @constant 35 + */ + SQLITE_FCNTL_DATA_VERSION: number; + /** + * @constant 36 + */ + SQLITE_FCNTL_SIZE_LIMIT: number; + /** + * @constant 37 + */ + SQLITE_FCNTL_CKPT_DONE: number; + /** + * @constant 38 + */ + SQLITE_FCNTL_RESERVE_BYTES: number; + /** + * @constant 39 + */ + SQLITE_FCNTL_CKPT_START: number; + /** + * @constant 40 + */ + SQLITE_FCNTL_EXTERNAL_READER: number; + /** + * @constant 41 + */ + SQLITE_FCNTL_CKSM_FILE: number; + /** + * @constant 42 + */ + SQLITE_FCNTL_RESET_CACHE: number; }; /** diff --git a/packages/bun-usockets/src/crypto/openssl.c b/packages/bun-usockets/src/crypto/openssl.c index b33f907dde74ad..9f502ecb6044ba 100644 --- a/packages/bun-usockets/src/crypto/openssl.c +++ b/packages/bun-usockets/src/crypto/openssl.c @@ -16,7 +16,6 @@ */ #if (defined(LIBUS_USE_OPENSSL) || defined(LIBUS_USE_WOLFSSL)) - /* These are in sni_tree.cpp */ void *sni_new(); void sni_free(void *sni, void (*cb)(void *)); @@ -72,7 +71,10 @@ struct us_internal_ssl_socket_context_t { // socket context SSL_CTX *ssl_context; int is_parent; - +#if ALLOW_SERVER_RENEGOTIATION + unsigned int client_renegotiation_limit; + unsigned int client_renegotiation_window; +#endif /* These decorate the base implementation */ struct us_internal_ssl_socket_t *(*on_open)(struct us_internal_ssl_socket_t *, int is_client, char *ip, @@ -96,13 +98,24 @@ struct us_internal_ssl_socket_context_t { void *handshake_data; }; -// same here, should or shouldn't it contain s? +// same here, should or shouldn't it +enum { + HANDSHAKE_PENDING = 0, + HANDSHAKE_COMPLETED = 1, + HANDSHAKE_RENEGOTIATION_PENDING = 2, +}; + struct us_internal_ssl_socket_t { struct us_socket_t s; SSL *ssl; +#if ALLOW_SERVER_RENEGOTIATION + unsigned int client_pending_renegotiations; + uint64_t last_ssl_renegotiation; + unsigned int is_client : 1; +#endif unsigned int ssl_write_wants_read : 1; // we use this for now unsigned int ssl_read_wants_write : 1; - unsigned int pending_handshake : 1; + unsigned int handshake_state : 2; unsigned int received_ssl_shutdown : 1; }; @@ -142,8 +155,6 @@ int BIO_s_custom_write(BIO *bio, const char *data, int length) { return -1; } - // printf("BIO_s_custom_write returns: %d\n", written); - return written; } @@ -151,8 +162,6 @@ int BIO_s_custom_read(BIO *bio, char *dst, int length) { struct loop_ssl_data *loop_ssl_data = (struct loop_ssl_data *)BIO_get_data(bio); - // printf("BIO_s_custom_read\n"); - if (!loop_ssl_data->ssl_read_input_length) { BIO_set_flags(bio, BIO_FLAGS_SHOULD_RETRY | BIO_FLAGS_READ); return -1; @@ -183,17 +192,40 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s, (struct loop_ssl_data *)loop->data.ssl_data; s->ssl = SSL_new(context->ssl_context); +#if ALLOW_SERVER_RENEGOTIATION + s->client_pending_renegotiations = context->client_renegotiation_limit; + s->last_ssl_renegotiation = 0; + s->is_client = is_client ? 1 : 0; + +#endif s->ssl_write_wants_read = 0; s->ssl_read_wants_write = 0; - s->pending_handshake = 1; + s->handshake_state = HANDSHAKE_PENDING; s->received_ssl_shutdown = 0; SSL_set_bio(s->ssl, loop_ssl_data->shared_rbio, loop_ssl_data->shared_wbio); +// if we allow renegotiation, we need to set the mode here +// https://github.com/oven-sh/bun/issues/6197 +// https://github.com/oven-sh/bun/issues/5363 +// renegotiation is only valid for <= TLS1_2_VERSION +// this can be a DoS vector for servers, so we enable it using a limit +// we do not use ssl_renegotiate_freely, since ssl_renegotiate_explicit is +// more performant when using BoringSSL +#if ALLOW_SERVER_RENEGOTIATION + if (context->client_renegotiation_limit) { + SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit); + } else { + SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never); + } +#endif BIO_up_ref(loop_ssl_data->shared_rbio); BIO_up_ref(loop_ssl_data->shared_wbio); if (is_client) { +#if ALLOW_SERVER_RENEGOTIATION == 0 + SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit); +#endif SSL_set_connect_state(s->ssl); } else { SSL_set_accept_state(s->ssl); @@ -205,7 +237,9 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s, // Hello Message! // always handshake after open - us_internal_ssl_handshake(s); + // this is important because some servers/clients can get stuck waiting for + // this + us_internal_update_handshake(s); return result; } @@ -223,39 +257,75 @@ void us_internal_on_ssl_handshake( struct us_internal_ssl_socket_t * us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code, void *reason) { - struct us_internal_ssl_socket_context_t *context = - (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); - if (s->pending_handshake) { - s->pending_handshake = 0; + if (s->handshake_state != HANDSHAKE_COMPLETED) { // if we have some pending handshake we cancel it and try to check the // latest handshake error this way we will always call on_handshake with the // latest error before closing this should always call // secureConnection/secure before close if we remove this here, we will need // to do this check on every on_close event on sockets, fetch etc and will // increase complexity on a lot of places - if (context->on_handshake != NULL) { - struct us_bun_verify_error_t verify_error = us_internal_verify_error(s); - context->on_handshake(s, 0, verify_error, context->handshake_data); - } + us_internal_trigger_handshake_callback(s, 0); } return (struct us_internal_ssl_socket_t *)us_socket_close( 0, (struct us_socket_t *)s, code, reason); } -void us_internal_ssl_handshake(struct us_internal_ssl_socket_t *s) { +void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s, + int success) { struct us_internal_ssl_socket_context_t *context = (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); - void (*on_handshake)(struct us_internal_ssl_socket_t *, int, - struct us_bun_verify_error_t, void *) = - context->on_handshake; - void *custom_data = context->handshake_data; - // will start on_open, on_writable or on_data - if (!s->ssl) { - return; + // always set the handshake state to completed + s->handshake_state = HANDSHAKE_COMPLETED; + + if (context->on_handshake != NULL) { + struct us_bun_verify_error_t verify_error = us_internal_verify_error(s); + context->on_handshake(s, success, verify_error, context->handshake_data); } +} +int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) { + // handle renegotation here since we are using ssl_renegotiate_explicit + + // if is a server and we have no pending renegotiation we can check + // the limits + s->handshake_state = HANDSHAKE_RENEGOTIATION_PENDING; +#if ALLOW_SERVER_RENEGOTIATION + if (!s->is_client && !SSL_renegotiate_pending(s->ssl)) { + uint64_t now = time(NULL); + struct us_internal_ssl_socket_context_t *context = + (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); + // if is not the first time we negotiate and we are outside the time + // window, reset the limits + if (s->last_ssl_renegotiation && (now - s->last_ssl_renegotiation) >= + context->client_renegotiation_window) { + // reset the limits + s->client_pending_renegotiations = context->client_renegotiation_limit; + } + // if we have no more renegotiations, we should close the connection + if (s->client_pending_renegotiations == 0) { + return 0; + } + s->last_ssl_renegotiation = now; + s->client_pending_renegotiations--; + } +#endif + if (!SSL_renegotiate(s->ssl)) { + // we failed to renegotiate + us_internal_trigger_handshake_callback(s, 0); + return 0; + } + return 1; +} + +void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) { + struct us_internal_ssl_socket_context_t *context = + (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); + + // nothing todo here, renegotiation must be handled in SSL_read + if (s->handshake_state != HANDSHAKE_PENDING) + return; struct us_loop_t *loop = us_socket_context_loop(0, &context->sc); struct loop_ssl_data *loop_ssl_data = @@ -268,13 +338,8 @@ void us_internal_ssl_handshake(struct us_internal_ssl_socket_t *s) { if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) || SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) { - s->pending_handshake = 0; - struct us_bun_verify_error_t verify_error = (struct us_bun_verify_error_t){ - .error = 0, .code = NULL, .reason = NULL}; - if (on_handshake != NULL) { - on_handshake(s, 0, verify_error, custom_data); - } + us_internal_trigger_handshake_callback(s, 0); return; } @@ -285,45 +350,34 @@ void us_internal_ssl_handshake(struct us_internal_ssl_socket_t *s) { us_internal_ssl_socket_close(s, 0, NULL); return; } + if (result <= 0) { int err = SSL_get_error(s->ssl, result); // as far as I know these are the only errors we want to handle if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) { - s->pending_handshake = 0; + us_internal_trigger_handshake_callback(s, 1); - struct us_bun_verify_error_t verify_error = us_internal_verify_error(s); // clear per thread error queue if it may contain something if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) { ERR_clear_error(); } - - // error - if (on_handshake != NULL) { - on_handshake(s, 0, verify_error, custom_data); - } return; } - s->pending_handshake = 1; - context->on_handshake = on_handshake; - context->handshake_data = custom_data; + s->handshake_state = HANDSHAKE_PENDING; // Ensure that we'll cycle through internal openssl's state if (!us_socket_is_closed(0, &s->s) && !us_internal_ssl_socket_is_shut_down(s)) { us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0); } - } else { - s->pending_handshake = 0; - struct us_bun_verify_error_t verify_error = us_internal_verify_error(s); - // success - if (on_handshake != NULL) { - on_handshake(s, 1, verify_error, custom_data); - } - // Ensure that we'll cycle through internal openssl's state - if (!us_socket_is_closed(0, &s->s) && - !us_internal_ssl_socket_is_shut_down(s)) { - us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0); - } + return; + } + // success + us_internal_trigger_handshake_callback(s, 1); + // Ensure that we'll cycle through internal openssl's state + if (!us_socket_is_closed(0, &s->s) && + !us_internal_ssl_socket_is_shut_down(s)) { + us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0); } } @@ -332,7 +386,6 @@ ssl_on_close(struct us_internal_ssl_socket_t *s, int code, void *reason) { struct us_internal_ssl_socket_context_t *context = (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); - s->pending_handshake = 0; SSL_free(s->ssl); return context->on_close(s, code, reason); @@ -340,9 +393,6 @@ ssl_on_close(struct us_internal_ssl_socket_t *s, int code, void *reason) { struct us_internal_ssl_socket_t * ssl_on_end(struct us_internal_ssl_socket_t *s) { - if (s && s->pending_handshake) { - s->pending_handshake = 0; - } // whatever state we are in, a TCP FIN is always an answered shutdown /* Todo: this should report CLEANLY SHUTDOWN as reason */ @@ -352,6 +402,7 @@ ssl_on_end(struct us_internal_ssl_socket_t *s) { // this whole function needs a complete clean-up struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, void *data, int length) { + // note: this context can change when we adopt the socket! struct us_internal_ssl_socket_context_t *context = (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); @@ -360,9 +411,6 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, struct loop_ssl_data *loop_ssl_data = (struct loop_ssl_data *)loop->data.ssl_data; - if (s->pending_handshake) { - us_internal_ssl_handshake(s); - } // note: if we put data here we should never really clear it (not in write // either, it still should be available for SSL_write to read from!) loop_ssl_data->ssl_read_input = data; @@ -380,7 +428,6 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, int ret = 0; if ((ret = SSL_shutdown(s->ssl)) == 1) { // two phase shutdown is complete here - // printf("Two step SSL shutdown complete\n"); /* Todo: this should also report some kind of clean shutdown */ return us_internal_ssl_socket_close(s, 0, NULL); @@ -398,7 +445,6 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, // no further processing of data when in shutdown state return s; } - // bug checking: this loop needs a lot of attention and clean-ups and // check-ups int read = 0; @@ -411,17 +457,24 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, LIBUS_RECV_BUFFER_LENGTH - read); // we need to check if we received a shutdown here if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) { - s->pending_handshake = 0; s->received_ssl_shutdown = 1; // we will only close after we handle the data and errors } + if (just_read <= 0) { int err = SSL_get_error(s->ssl, just_read); - // as far as I know these are the only errors we want to handle if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) { - - if (err == SSL_ERROR_ZERO_RETURN) { + if (err == SSL_ERROR_WANT_RENEGOTIATE) { + if (us_internal_ssl_renegotiate(s)) { + // ok, we are done here, we need to call SSL_read again + // this dont mean that we are done with the handshake renegotiation + // we need to call SSL_read again + continue; + } + // clean and close renegotiation failed + err = SSL_ERROR_SSL; + } else if (err == SSL_ERROR_ZERO_RETURN) { // zero return can be EOF/FIN, if we have data just signal on_data and // close if (read) { @@ -477,6 +530,9 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, break; } + } else if (s->handshake_state == HANDSHAKE_RENEGOTIATION_PENDING) { + // renegotiation ended successfully call on_handshake + us_internal_trigger_handshake_callback(s, 1); } read += just_read; @@ -499,6 +555,7 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, goto restart; } } + // we received the shutdown after reading so we close if (s->received_ssl_shutdown) { us_internal_ssl_socket_close(s, 0, NULL); @@ -526,13 +583,11 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s, struct us_internal_ssl_socket_t * ssl_on_writable(struct us_internal_ssl_socket_t *s) { + us_internal_update_handshake(s); + struct us_internal_ssl_socket_context_t *context = (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); - if (s->pending_handshake) { - us_internal_ssl_handshake(s); - } - // todo: cork here so that we efficiently output both from reading and from // writing? if (s->ssl_read_wants_write) { @@ -547,7 +602,6 @@ ssl_on_writable(struct us_internal_ssl_socket_t *s) { s = (struct us_internal_ssl_socket_t *)context->sc.on_data(&s->s, 0, 0); // cast here! } - // Do not call on_writable if the socket is closed. // on close means the socket data is no longer accessible if (!s || us_socket_is_closed(0, &s->s)) { @@ -972,8 +1026,7 @@ long us_internal_verify_peer_certificate( // NOLINT(runtime/int) struct us_bun_verify_error_t us_internal_verify_error(struct us_internal_ssl_socket_t *s) { - if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) || - s->received_ssl_shutdown) { + if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) { return (struct us_bun_verify_error_t){ .error = 0, .code = NULL, .reason = NULL}; } @@ -1248,11 +1301,18 @@ void us_bun_internal_ssl_socket_context_add_server_name( /* Attach the user data to this context */ if (1 != SSL_CTX_set_ex_data(ssl_context, 0, user)) { +#if BUN_DEBUG printf("CANNOT SET EX DATA!\n"); + abort(); +#endif } /* We do not want to hold any nullptr's in our SNI tree */ if (ssl_context) { +#if ALLOW_SERVER_RENEGOTIATION + context->client_renegotiation_limit = options.client_renegotiation_limit; + context->client_renegotiation_window = options.client_renegotiation_window; +#endif if (sni_add(context->sni, hostname_pattern, ssl_context)) { /* If we already had that name, ignore */ free_ssl_context(ssl_context); @@ -1310,8 +1370,6 @@ int sni_cb(SSL *ssl, int *al, void *arg) { SSL_CTX *resolved_ssl_context = resolve_context( (struct us_internal_ssl_socket_context_t *)arg, hostname); if (resolved_ssl_context) { - // printf("Did find matching SNI context for hostname: <%s>!\n", - // hostname); SSL_set_SSL_CTX(ssl, resolved_ssl_context); } else { /* Call a blocking callback notifying of missing context */ @@ -1396,7 +1454,6 @@ us_internal_bun_create_ssl_socket_context( /* I guess this is the only optional callback */ context->on_server_name = NULL; - /* Then we extend its SSL parts */ context->ssl_context = ssl_context; // create_ssl_context_from_options(options); @@ -1404,6 +1461,10 @@ us_internal_bun_create_ssl_socket_context( context->on_handshake = NULL; context->handshake_data = NULL; +#if ALLOW_SERVER_RENEGOTIATION + context->client_renegotiation_limit = options.client_renegotiation_limit; + context->client_renegotiation_window = options.client_renegotiation_window; +#endif /* We, as parent context, may ignore data */ context->sc.is_low_prio = (int (*)(struct us_socket_t *))ssl_is_low_prio; @@ -1600,9 +1661,7 @@ int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s, loop_ssl_data->ssl_socket = &s->s; loop_ssl_data->msg_more = msg_more; loop_ssl_data->last_write_was_msg_more = 0; - // printf("Calling SSL_write\n"); int written = SSL_write(s->ssl, data, length); - // printf("Returning from SSL_write\n"); loop_ssl_data->msg_more = 0; if (loop_ssl_data->last_write_was_msg_more && !msg_more) { @@ -1611,22 +1670,22 @@ int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s, if (written > 0) { return written; - } else { - int err = SSL_get_error(s->ssl, written); - if (err == SSL_ERROR_WANT_READ) { - // here we need to trigger writable event next ssl_read! - s->ssl_write_wants_read = 1; - } else if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) { - // these two errors may add to the error queue, which is per thread and - // must be cleared - ERR_clear_error(); + } - // all errors here except for want write are critical and should not - // happen - } + int err = SSL_get_error(s->ssl, written); + if (err == SSL_ERROR_WANT_READ) { + // here we need to trigger writable event next ssl_read! + s->ssl_write_wants_read = 1; + } else if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) { + // these two errors may add to the error queue, which is per thread and + // must be cleared + ERR_clear_error(); - return 0; + // all errors here except for want write are critical and should not + // happen } + + return 0; } void *us_internal_ssl_socket_ext(struct us_internal_ssl_socket_t *s) { @@ -1938,7 +1997,7 @@ struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls( socket->ssl = NULL; socket->ssl_write_wants_read = 0; socket->ssl_read_wants_write = 0; - socket->pending_handshake = 1; + socket->handshake_state = HANDSHAKE_PENDING; socket->received_ssl_shutdown = 0; return socket; } diff --git a/packages/bun-usockets/src/internal/internal.h b/packages/bun-usockets/src/internal/internal.h index 40b0fb9e1b803a..76074ecce50cc7 100644 --- a/packages/bun-usockets/src/internal/internal.h +++ b/packages/bun-usockets/src/internal/internal.h @@ -254,7 +254,10 @@ void us_internal_ssl_socket_context_on_data( struct us_internal_ssl_socket_t *(*on_data)( struct us_internal_ssl_socket_t *s, char *data, int length)); -void us_internal_ssl_handshake(struct us_internal_ssl_socket_t *s); +void us_internal_update_handshake(struct us_internal_ssl_socket_t *s); +int us_internal_renegotiate(struct us_internal_ssl_socket_t *s); +void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s, + int success); void us_internal_on_ssl_handshake( struct us_internal_ssl_socket_context_t *context, us_internal_on_handshake_t onhandshake, void *custom_data); @@ -289,7 +292,7 @@ struct us_listen_socket_t *us_internal_ssl_socket_context_listen( int port, int options, int socket_ext_size); struct us_listen_socket_t *us_internal_ssl_socket_context_listen_unix( - struct us_internal_ssl_socket_context_t *context, const char *path, + struct us_internal_ssl_socket_context_t *context, const char *path, size_t pathlen, int options, int socket_ext_size); struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_connect( diff --git a/packages/bun-usockets/src/libusockets.h b/packages/bun-usockets/src/libusockets.h index 2c17db3c219201..7f4e39865c155a 100644 --- a/packages/bun-usockets/src/libusockets.h +++ b/packages/bun-usockets/src/libusockets.h @@ -44,6 +44,7 @@ #define LIBUS_RECV_BUFFER_PADDING 32 /* Guaranteed alignment of extension memory */ #define LIBUS_EXT_ALIGNMENT 16 +#define ALLOW_SERVER_RENEGOTIATION 0 /* Define what a socket descriptor is based on platform */ #ifdef _WIN32 @@ -195,6 +196,8 @@ struct us_bun_socket_context_options_t { unsigned int secure_options; int reject_unauthorized; int request_cert; + unsigned int client_renegotiation_limit; + unsigned int client_renegotiation_window; }; /* Return 15-bit timestamp for this context */ diff --git a/packages/bun-usockets/src/loop.c b/packages/bun-usockets/src/loop.c index 21dee79c9f8c1d..193cc850ccfee6 100644 --- a/packages/bun-usockets/src/loop.c +++ b/packages/bun-usockets/src/loop.c @@ -338,7 +338,13 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events) do { const struct us_loop_t* loop = s->context->loop; - int length = bsd_recv(us_poll_fd(&s->p), loop->data.recv_buf + LIBUS_RECV_BUFFER_PADDING, LIBUS_RECV_BUFFER_LENGTH, MSG_DONTWAIT); + #ifdef _WIN32 + const int recv_flags = MSG_PUSH_IMMEDIATE; + #else + const int recv_flags = MSG_DONTWAIT | MSG_NOSIGNAL; + #endif + + int length = bsd_recv(us_poll_fd(&s->p), loop->data.recv_buf + LIBUS_RECV_BUFFER_PADDING, LIBUS_RECV_BUFFER_LENGTH, recv_flags); if (length > 0) { s = s->context->on_data(s, loop->data.recv_buf + LIBUS_RECV_BUFFER_PADDING, length); diff --git a/packages/bun-uws/src/App.h b/packages/bun-uws/src/App.h index 074cdca6a020e2..dc053fd838ffbd 100644 --- a/packages/bun-uws/src/App.h +++ b/packages/bun-uws/src/App.h @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - +// clang-format off #ifndef UWS_APP_H #define UWS_APP_H @@ -77,6 +77,8 @@ namespace uWS { unsigned int secure_options = 0; int reject_unauthorized = 0; int request_cert = 0; + unsigned int client_renegotiation_limit = 3; + unsigned int client_renegotiation_window = 600; /* Conversion operator used internally */ operator struct us_bun_socket_context_options_t() const { @@ -597,4 +599,4 @@ typedef TemplatedApp SSLApp; } -#endif // UWS_APP_H +#endif // UWS_APP_H \ No newline at end of file diff --git a/src/bun.js/api/bun/socket.zig b/src/bun.js/api/bun/socket.zig index 835052d680b19f..0d7aff4b1beae8 100644 --- a/src/bun.js/api/bun/socket.zig +++ b/src/bun.js/api/bun/socket.zig @@ -132,6 +132,11 @@ noinline fn getSSLException(globalThis: *JSC.JSGlobalObject, defaultMessage: []c return exception; } +/// we always allow and check the SSL certificate after the handshake or renegotiation +fn alwaysAllowSSLVerifyCallback(_: c_int, _: ?*BoringSSL.X509_STORE_CTX) callconv(.C) c_int { + return 1; +} + fn normalizeHost(input: anytype) @TypeOf(input) { return input; } @@ -218,8 +223,9 @@ const Handlers = struct { this.unprotect(); // will deinit when is not wrapped or when is the TCP wrapped connection if (wrapped != .tls) { - if (ctx) |ctx_| + if (ctx) |ctx_| { ctx_.deinit(ssl); + } } bun.default_allocator.destroy(this); } @@ -825,23 +831,27 @@ pub const Listener = struct { const arguments = callframe.arguments(1); log("close", .{}); - if (arguments.len > 0 and arguments.ptr[0].isBoolean() and arguments.ptr[0].toBoolean() and this.socket_context != null) { - this.socket_context.?.close(this.ssl); - this.listener = null; - } else { - var listener = this.listener orelse return JSValue.jsUndefined(); - this.listener = null; - listener.close(this.ssl); - } + var listener = this.listener orelse return JSValue.jsUndefined(); + this.listener = null; this.poll_ref.unref(this.handlers.vm); + // if we already have no active connections, we can deinit the context now if (this.handlers.active_connections == 0) { this.handlers.unprotect(); - this.socket_context.?.close(this.ssl); + // deiniting the context will also close the listener this.socket_context.?.deinit(this.ssl); this.socket_context = null; this.strong_self.clear(); this.strong_data.clear(); + } else { + const forceClose = arguments.len > 0 and arguments.ptr[0].isBoolean() and arguments.ptr[0].toBoolean() and this.socket_context != null; + if (forceClose) { + // close all connections in this context and wait for them to close + this.socket_context.?.close(this.ssl); + } else { + // only close the listener and wait for the connections to close by it self + listener.close(this.ssl); + } } return JSValue.jsUndefined(); @@ -2044,6 +2054,84 @@ fn NewSocket(comptime ssl: bool) type { return JSValue.jsUndefined(); } + pub fn disableRenegotiation( + this: *This, + _: *JSC.JSGlobalObject, + _: *JSC.CallFrame, + ) callconv(.C) JSValue { + if (comptime ssl == false) { + return JSValue.jsUndefined(); + } + if (this.detached) { + return JSValue.jsUndefined(); + } + + const ssl_ptr = this.socket.ssl(); + BoringSSL.SSL_set_renegotiate_mode(ssl_ptr, BoringSSL.ssl_renegotiate_never); + return JSValue.jsUndefined(); + } + + pub fn setVerifyMode( + this: *This, + globalObject: *JSC.JSGlobalObject, + callframe: *JSC.CallFrame, + ) callconv(.C) JSValue { + if (comptime ssl == false) { + return JSValue.jsUndefined(); + } + if (this.detached) { + return JSValue.jsUndefined(); + } + + const args = callframe.arguments(2); + + if (args.len < 2) { + globalObject.throw("Expected requestCert and rejectUnauthorized arguments", .{}); + return .zero; + } + const request_cert_js = args.ptr[0]; + const reject_unauthorized_js = args.ptr[1]; + if (!request_cert_js.isBoolean() or !reject_unauthorized_js.isBoolean()) { + globalObject.throw("Expected requestCert and rejectUnauthorized arguments to be boolean", .{}); + return .zero; + } + + const request_cert = request_cert_js.toBoolean(); + const reject_unauthorized = request_cert_js.toBoolean(); + var verify_mode: c_int = BoringSSL.SSL_VERIFY_NONE; + if (this.handlers.is_server) { + if (request_cert) { + verify_mode = BoringSSL.SSL_VERIFY_PEER; + if (reject_unauthorized) + verify_mode |= BoringSSL.SSL_VERIFY_FAIL_IF_NO_PEER_CERT; + } + } + const ssl_ptr = this.socket.ssl(); + // we always allow and check the SSL certificate after the handshake or renegotiation + BoringSSL.SSL_set_verify(ssl_ptr, verify_mode, alwaysAllowSSLVerifyCallback); + return JSValue.jsUndefined(); + } + + pub fn renegotiate( + this: *This, + globalObject: *JSC.JSGlobalObject, + _: *JSC.CallFrame, + ) callconv(.C) JSValue { + if (comptime ssl == false) { + return JSValue.jsUndefined(); + } + if (this.detached) { + return JSValue.jsUndefined(); + } + + const ssl_ptr = this.socket.ssl(); + BoringSSL.ERR_clear_error(); + if (BoringSSL.SSL_renegotiate(ssl_ptr) != 1) { + globalObject.throwValue(getSSLException(globalObject, "SSL_renegotiate error")); + return .zero; + } + return JSValue.jsUndefined(); + } pub fn getTLSTicket( this: *This, globalObject: *JSC.JSGlobalObject, diff --git a/src/bun.js/api/bun/subprocess.zig b/src/bun.js/api/bun/subprocess.zig index 7aba7f26e796c1..4a9ae15ad78069 100644 --- a/src/bun.js/api/bun/subprocess.zig +++ b/src/bun.js/api/bun/subprocess.zig @@ -1591,6 +1591,7 @@ pub const Subprocess = struct { var argv0: ?[*:0]const u8 = null; var windows_hide: bool = false; + var windows_verbatim_arguments: bool = false; { if (args.isEmptyOrUndefinedOrNull()) { @@ -1869,6 +1870,12 @@ pub const Subprocess = struct { windows_hide = val.asBoolean(); } } + + if (args.get(globalThis, "windowsVerbatimArguments")) |val| { + if (val.isBoolean()) { + windows_verbatim_arguments = val.asBoolean(); + } + } } } } @@ -1966,6 +1973,7 @@ pub const Subprocess = struct { .windows = if (Environment.isWindows) .{ .hide_window = windows_hide, + .verbatim_arguments = windows_verbatim_arguments, .loop = JSC.EventLoopHandle.init(jsc_vm), } else {}, }; diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index 5c3dbb49916fc3..adb737dd2625c4 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -215,6 +215,8 @@ pub const ServerConfig = struct { ssl_ciphers: [*c]const u8 = null, protos: [*c]const u8 = null, protos_len: usize = 0, + client_renegotiation_limit: u32 = 0, + client_renegotiation_window: u32 = 0, const log = Output.scoped(.SSLConfig, false); @@ -660,6 +662,18 @@ pub const ServerConfig = struct { } } + if (obj.getTruthy(global, "clientRenegotiationLimit")) |client_renegotiation_limit| { + if (client_renegotiation_limit.isNumber()) { + result.client_renegotiation_limit = client_renegotiation_limit.toU32(); + } + } + + if (obj.getTruthy(global, "clientRenegotiationWindow")) |client_renegotiation_window| { + if (client_renegotiation_window.isNumber()) { + result.client_renegotiation_window = client_renegotiation_window.toU32(); + } + } + if (obj.getTruthy(global, "dhParamsFile")) |dh_params_file_name| { var sliced = dh_params_file_name.toSlice(global, bun.default_allocator); defer sliced.deinit(); diff --git a/src/bun.js/api/sockets.classes.ts b/src/bun.js/api/sockets.classes.ts index c99abf6552d131..06418b93ed09eb 100644 --- a/src/bun.js/api/sockets.classes.ts +++ b/src/bun.js/api/sockets.classes.ts @@ -29,6 +29,18 @@ function generate(ssl) { fn: "getCipher", length: 0, }, + renegotiate: { + fn: "renegotiate", + length: 0, + }, + disableRenegotiation: { + fn: "disableRenegotiation", + length: 0, + }, + setVerifyMode: { + fn: "setVerifyMode", + length: 2, + }, getSession: { fn: "getSession", length: 0, diff --git a/src/bun.js/bindings/bindings-generator.zig b/src/bun.js/bindings/bindings-generator.zig index a215a979268b2d..3b9bb3dc9814e2 100644 --- a/src/bun.js/bindings/bindings-generator.zig +++ b/src/bun.js/bindings/bindings-generator.zig @@ -3,12 +3,12 @@ const Exports = @import("exports.zig"); const HeaderGen = @import("./header-gen.zig").HeaderGen; const std = @import("std"); const builtin = @import("builtin"); +const bun = @import("root").bun; const io = std.io; const fs = std.fs; const process = std.process; const ChildProcess = std.ChildProcess; const Progress = std.Progress; -const print = std.debug.print; const mem = std.mem; const testing = std.testing; const Allocator = std.mem.Allocator; @@ -20,7 +20,7 @@ const JSC = bun.JSC; const Classes = JSC.GlobalClasses; pub fn main() anyerror!void { - var allocator = std.heap.c_allocator; + const allocator = std.heap.c_allocator; const src: std.builtin.SourceLocation = @src(); const src_path = comptime bun.Environment.base_path ++ std.fs.path.dirname(src.file).?; { @@ -46,16 +46,16 @@ pub fn main() anyerror!void { inline while (i < Classes.len) : (i += 1) { const Class = Classes[i]; const paths = [_][]const u8{ src_path, Class.name ++ ".generated.h" }; - var headerFilePath = try std.fs.path.join( + const headerFilePath = try std.fs.path.join( allocator, &paths, ); - var implFilePath = try std.fs.path.join( + const implFilePath = try std.fs.path.join( allocator, &[_][]const u8{ std.fs.path.dirname(src.file) orelse return error.BadPath, Class.name ++ ".generated.cpp" }, ); var headerFile = try std.fs.createFileAbsolute(headerFilePath, .{}); - var header_writer = headerFile.writer(); + const header_writer = headerFile.writer(); var implFile = try std.fs.createFileAbsolute(implFilePath, .{}); try Class.@"generateC++Header"(header_writer); try Class.@"generateC++Class"(implFile.writer()); diff --git a/src/bun.js/bindings/sqlite/JSSQLStatement.cpp b/src/bun.js/bindings/sqlite/JSSQLStatement.cpp index c0b17460235c21..6c8912fdc7c2cd 100644 --- a/src/bun.js/bindings/sqlite/JSSQLStatement.cpp +++ b/src/bun.js/bindings/sqlite/JSSQLStatement.cpp @@ -1,5 +1,10 @@ + #include "root.h" +#include "JavaScriptCore/ExceptionScope.h" +#include "JavaScriptCore/JSArrayBufferView.h" +#include "JavaScriptCore/JSType.h" + #include "JSSQLStatement.h" #include #include @@ -29,6 +34,8 @@ #include #include "BunBuiltinNames.h" #include "sqlite3_error_codes.h" +#include "wtf/BitVector.h" +#include "wtf/Vector.h" #include /* ******************************************************************************** */ @@ -200,7 +207,7 @@ extern "C" void Bun__closeAllSQLiteDatabasesForTermination() for (auto& db : dbs) { if (db->db) - sqlite3_close_v2(db->db); + sqlite3_close(db->db); } } @@ -317,6 +324,9 @@ class JSSQLStatement : public JSC::JSDestructibleObject { VersionSqlite3* version_db; uint64_t version; bool hasExecuted = false; + // Tracks which columns are valid in the current result set. Used to handle duplicate column names. + // The bit at index i is set if the column at index i is valid. + WTF::BitVector validColumns; std::unique_ptr columnNames; mutable JSC::WriteBarrier _prototype; mutable JSC::WriteBarrier _structure; @@ -335,6 +345,47 @@ class JSSQLStatement : public JSC::JSDestructibleObject { void finishCreation(JSC::VM& vm); }; +static JSValue toJS(JSC::VM& vm, JSC::JSGlobalObject* globalObject, sqlite3_stmt* stmt, int i) +{ + switch (sqlite3_column_type(stmt, i)) { + case SQLITE_INTEGER: { + // https://github.com/oven-sh/bun/issues/1536 + return jsNumberFromSQLite(stmt, i); + } + case SQLITE_FLOAT: { + return jsDoubleNumber(sqlite3_column_double(stmt, i)); + } + // > Note that the SQLITE_TEXT constant was also used in SQLite version + // > 2 for a completely different meaning. Software that links against + // > both SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, + // > not SQLITE_TEXT. + case SQLITE3_TEXT: { + size_t len = sqlite3_column_bytes(stmt, i); + const unsigned char* text = len > 0 ? sqlite3_column_text(stmt, i) : nullptr; + if (UNLIKELY(text == nullptr || len == 0)) { + return jsEmptyString(vm); + } + + return len < 64 ? jsString(vm, WTF::String::fromUTF8({ text, len })) : JSC::JSValue::decode(Bun__encoding__toStringUTF8(text, len, globalObject)); + } + case SQLITE_BLOB: { + size_t len = sqlite3_column_bytes(stmt, i); + const void* blob = len > 0 ? sqlite3_column_blob(stmt, i) : nullptr; + if (LIKELY(len > 0 && blob != nullptr)) { + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), len); + memcpy(array->vector(), blob, len); + return array; + } + + return JSC::JSUint8Array::create(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 0); + } + default: { + break; + } + } + + return jsNull(); +} extern "C" { static JSC_DECLARE_JIT_OPERATION_WITHOUT_WTF_INTERNAL(jsSQLStatementExecuteStatementFunctionGetWithoutTypeChecking, JSC::EncodedJSValue, (JSC::JSGlobalObject * lexicalGlobalObject, JSSQLStatement* castedThis)); } @@ -416,6 +467,7 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ castedThis->columnNames->propertyNameMode(), castedThis->columnNames->privateSymbolMode())); } + castedThis->validColumns.clearAll(); castedThis->update_version(); JSC::VM& vm = lexicalGlobalObject->vm(); @@ -435,10 +487,10 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ // see https://github.com/oven-sh/bun/issues/987 // also see https://github.com/oven-sh/bun/issues/1646 auto& globalObject = *lexicalGlobalObject; - PropertyOffset offset; + auto columnNames = castedThis->columnNames.get(); bool anyHoles = false; - for (int i = 0; i < count; i++) { + for (int i = count - 1; i >= 0; i--) { const char* name = sqlite3_column_name(stmt, i); if (name == nullptr) { @@ -452,13 +504,29 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ break; } - columnNames->add(Identifier::fromString(vm, WTF::String::fromUTF8({ name, len }))); + // When joining multiple tables, the same column names can appear multiple times + // columnNames de-dupes property names internally + // We can't have two properties with the same name, so we use validColumns to track this. + auto preCount = columnNames->size(); + columnNames->add( + Identifier::fromString(vm, WTF::String::fromUTF8({name, len})) + ); + auto curCount = columnNames->size(); + + if (preCount != curCount) { + castedThis->validColumns.set(i); + } } if (LIKELY(!anyHoles)) { + PropertyOffset offset; Structure* structure = globalObject.structureCache().emptyObjectStructureForPrototype(&globalObject, globalObject.objectPrototype(), columnNames->size()); vm.writeBarrier(castedThis, structure); + // We iterated over the columns in reverse order so we need to reverse the columnNames here + // Importantly we reverse before adding the properties to the structure to ensure that index accesses + // later refer to the correct property. + columnNames->data()->propertyNameVector().reverse(); for (const auto& propertyName : *columnNames) { structure = Structure::addPropertyTransition(vm, structure, propertyName, 0, offset); } @@ -473,6 +541,7 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ castedThis->columnNames->vm(), castedThis->columnNames->propertyNameMode(), castedThis->columnNames->privateSymbolMode())); + castedThis->validColumns.clearAll(); } } @@ -484,7 +553,7 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ // see https://github.com/oven-sh/bun/issues/987 JSC::JSObject* object = JSC::constructEmptyObject(lexicalGlobalObject, lexicalGlobalObject->objectPrototype(), std::min(static_cast(count), JSFinalObject::maxInlineCapacity)); - for (int i = 0; i < count; i++) { + for (int i = count - 1; i >= 0; i--) { const char* name = sqlite3_column_name(stmt, i); if (name == nullptr) @@ -515,9 +584,18 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ } } - object->putDirect(vm, key, primitive, 0); + auto preCount = castedThis->columnNames->size(); castedThis->columnNames->add(key); + auto curCount = castedThis->columnNames->size(); + + // only put the property if it's not a duplicate + if (preCount != curCount) { + castedThis->validColumns.set(i); + object->putDirect(vm, key, primitive, 0); + } } + // We iterated over the columns in reverse order so we need to reverse the columnNames here + castedThis->columnNames->data()->propertyNameVector().reverse(); castedThis->_prototype.set(vm, castedThis, object); } @@ -1226,6 +1304,8 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementOpenStatementFunction, (JSC::JSGlobalObje openFlags = flags.toInt32(lexicalGlobalObject); } + JSValue finalizationTarget = callFrame->argument(2); + sqlite3* db = nullptr; int statusCode = sqlite3_open_v2(path.utf8().data(), &db, openFlags, nullptr); @@ -1244,10 +1324,20 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementOpenStatementFunction, (JSC::JSGlobalObje if (status != SQLITE_OK) { // TODO: log a warning here that defensive mode is unsupported. } - auto count = databases().size(); + auto index = databases().size(); sqlite3_extended_result_codes(db, 1); databases().append(new VersionSqlite3(db)); - RELEASE_AND_RETURN(scope, JSValue::encode(jsNumber(count))); + if (finalizationTarget.isObject()) { + vm.heap.addFinalizer(finalizationTarget.getObject(), [index](JSC::JSCell* ptr) -> void { + auto* db = databases()[index]; + if (!db->db) { + return; + } + sqlite3_close_v2(db->db); + databases()[index]->db = nullptr; + }); + } + RELEASE_AND_RETURN(scope, JSValue::encode(jsNumber(index))); } JSC_DEFINE_HOST_FUNCTION(jsSQLStatementCloseStatementFunction, (JSC::JSGlobalObject * lexicalGlobalObject, JSC::CallFrame* callFrame)) @@ -1270,6 +1360,7 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementCloseStatementFunction, (JSC::JSGlobalObj } JSValue dbNumber = callFrame->argument(0); + JSValue throwOnError = callFrame->argument(1); if (!dbNumber.isNumber()) { throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Expected number"_s)); return JSValue::encode(jsUndefined()); @@ -1282,13 +1373,17 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementCloseStatementFunction, (JSC::JSGlobalObj return JSValue::encode(jsUndefined()); } + bool shouldThrowOnError = (throwOnError.isEmpty() || throwOnError.isUndefined()) ? false : throwOnError.toBoolean(lexicalGlobalObject); + RETURN_IF_EXCEPTION(scope, {}); + sqlite3* db = databases()[dbIndex]->db; // no-op if already closed if (!db) { return JSValue::encode(jsUndefined()); } - int statusCode = sqlite3_close_v2(db); + // sqlite3_close_v2 is used for automatic GC cleanup + int statusCode = shouldThrowOnError ? sqlite3_close(db) : sqlite3_close_v2(db); if (statusCode != SQLITE_OK) { throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, WTF::String::fromUTF8(sqlite3_errstr(statusCode)))); return JSValue::encode(jsUndefined()); @@ -1298,6 +1393,91 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementCloseStatementFunction, (JSC::JSGlobalObj return JSValue::encode(jsUndefined()); } +JSC_DEFINE_HOST_FUNCTION(jsSQLStatementFcntlFunction, (JSC::JSGlobalObject * lexicalGlobalObject, JSC::CallFrame* callFrame)) +{ + JSC::VM& vm = lexicalGlobalObject->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSValue thisValue = callFrame->thisValue(); + JSSQLStatementConstructor* thisObject = jsDynamicCast(thisValue.getObject()); + if (UNLIKELY(!thisObject)) { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Expected SQLStatement"_s)); + return JSValue::encode(jsUndefined()); + } + + if (callFrame->argumentCount() < 2) { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Expected 2 arguments"_s)); + return JSValue::encode(jsUndefined()); + } + + JSValue dbNumber = callFrame->argument(0); + JSValue databaseFileName = callFrame->argument(1); + JSValue opNumber = callFrame->argument(2); + JSValue resultValue = callFrame->argument(3); + + if (!dbNumber.isNumber() || !opNumber.isNumber()) { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Expected number"_s)); + return JSValue::encode(jsUndefined()); + } + + int dbIndex = dbNumber.toInt32(lexicalGlobalObject); + int op = opNumber.toInt32(lexicalGlobalObject); + + if (dbIndex < 0 || dbIndex >= databases().size()) { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Invalid database handle"_s)); + return JSValue::encode(jsUndefined()); + } + + sqlite3* db = databases()[dbIndex]->db; + // no-op if already closed + if (!db) { + return JSValue::encode(jsUndefined()); + } + + CString fileNameStr; + + if (databaseFileName.isString()) { + fileNameStr = databaseFileName.toWTFString(lexicalGlobalObject).utf8(); + RETURN_IF_EXCEPTION(scope, {}); + } + + int resultInt = -1; + void* resultPtr = nullptr; + if (resultValue.isObject()) { + if (auto* view = jsDynamicCast(resultValue.getObject())) { + if (view->isDetached()) { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "TypedArray is detached"_s)); + return JSValue::encode(jsUndefined()); + } + + resultPtr = view->vector(); + if (resultPtr == nullptr) { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Expected buffer"_s)); + return JSValue::encode(jsUndefined()); + } + } + } else if (resultValue.isNumber()) { + resultInt = resultValue.toInt32(lexicalGlobalObject); + RETURN_IF_EXCEPTION(scope, {}); + + resultPtr = &resultInt; + } else if (resultValue.isNull()) { + + } else { + throwException(lexicalGlobalObject, scope, createError(lexicalGlobalObject, "Expected result to be a number, null or a TypedArray"_s)); + return {}; + } + + int statusCode = sqlite3_file_control(db, fileNameStr.isNull() ? nullptr : fileNameStr.data(), op, resultPtr); + + if (statusCode == SQLITE_ERROR) { + throwException(lexicalGlobalObject, scope, createSQLiteError(lexicalGlobalObject, db)); + return JSValue::encode(jsUndefined()); + } + + return JSValue::encode(jsNumber(statusCode)); +} + /* Hash table for constructor */ static const HashTableValue JSSQLStatementConstructorTableValues[] = { { "open"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementOpenStatementFunction, 2 } }, @@ -1309,6 +1489,7 @@ static const HashTableValue JSSQLStatementConstructorTableValues[] = { { "setCustomSQLite"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementSetCustomSQLite, 1 } }, { "serialize"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementSerialize, 1 } }, { "deserialize"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementDeserialize, 2 } }, + { "fcntl"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementFcntlFunction, 2 } }, }; const ClassInfo JSSQLStatementConstructor::s_info = { "SQLStatement"_s, &Base::s_info, nullptr, nullptr, CREATE_METHOD_TABLE(JSSQLStatementConstructor) }; @@ -1345,54 +1526,15 @@ static inline JSC::JSValue constructResultObject(JSC::JSGlobalObject* lexicalGlo if (auto* structure = castedThis->_structure.get()) { result = JSC::constructEmptyObject(vm, structure); - for (unsigned int i = 0; i < count; i++) { - JSValue value; - - // Loop 1. Fill the rowBuffer with values from SQLite - switch (sqlite3_column_type(stmt, i)) { - case SQLITE_INTEGER: { - // https://github.com/oven-sh/bun/issues/1536 - value = jsNumberFromSQLite(stmt, i); - break; - } - case SQLITE_FLOAT: { - value = jsNumber(sqlite3_column_double(stmt, i)); - break; - } - // > Note that the SQLITE_TEXT constant was also used in SQLite version - // > 2 for a completely different meaning. Software that links against - // > both SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, - // > not SQLITE_TEXT. - case SQLITE3_TEXT: { - size_t len = sqlite3_column_bytes(stmt, i); - const unsigned char* text = len > 0 ? sqlite3_column_text(stmt, i) : nullptr; - - if (len > 64) { - value = JSC::JSValue::decode(Bun__encoding__toStringUTF8(text, len, lexicalGlobalObject)); - break; - } else { - value = jsString(vm, WTF::String::fromUTF8({ text, len })); - break; - } - } - case SQLITE_BLOB: { - size_t len = sqlite3_column_bytes(stmt, i); - const void* blob = len > 0 ? sqlite3_column_blob(stmt, i) : nullptr; - JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(lexicalGlobalObject, lexicalGlobalObject->m_typedArrayUint8.get(lexicalGlobalObject), len); - - if (LIKELY(blob && len)) - memcpy(array->vector(), blob, len); - - value = array; - break; - } - default: { - value = jsNull(); - break; - } + // i: the index of columns returned from SQLite + // j: the index of object property + for (int i = 0, j = 0; j < count; i++, j++) { + if (!castedThis->validColumns.get(i)) { + // this column is duplicate, skip + j -= 1; + continue; } - - result->putDirectOffset(vm, i, value); + result->putDirectOffset(vm, j, toJS(vm, lexicalGlobalObject, stmt, i)); } } else { @@ -1402,8 +1544,13 @@ static inline JSC::JSValue constructResultObject(JSC::JSGlobalObject* lexicalGlo result = JSC::JSFinalObject::create(vm, JSC::JSFinalObject::createStructure(vm, lexicalGlobalObject, lexicalGlobalObject->objectPrototype(), JSFinalObject::maxInlineCapacity)); } - for (int i = 0; i < count; i++) { - auto name = columnNames[i]; + for (int i = 0, j = 0; j < count; i++, j++) { + if (!castedThis->validColumns.get(i)) { + j -= 1; + continue; + } + auto name = columnNames[j]; + result->putDirect(vm, name, toJS(vm, lexicalGlobalObject, stmt, i), 0); switch (sqlite3_column_type(stmt, i)) { case SQLITE_INTEGER: { @@ -1457,53 +1604,19 @@ static inline JSC::JSArray* constructResultRow(JSC::JSGlobalObject* lexicalGloba { int count = castedThis->columnNames->size(); auto& vm = lexicalGlobalObject->vm(); + auto throwScope = DECLARE_THROW_SCOPE(vm); JSC::JSArray* result = JSArray::create(vm, lexicalGlobalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous), count); auto* stmt = castedThis->stmt; - for (int i = 0; i < count; i++) { - - switch (sqlite3_column_type(stmt, i)) { - case SQLITE_INTEGER: { - // https://github.com/oven-sh/bun/issues/1536 - result->putDirectIndex(lexicalGlobalObject, i, jsNumberFromSQLite(stmt, i)); - break; - } - case SQLITE_FLOAT: { - result->putDirectIndex(lexicalGlobalObject, i, jsDoubleNumber(sqlite3_column_double(stmt, i))); - break; - } - // > Note that the SQLITE_TEXT constant was also used in SQLite version - // > 2 for a completely different meaning. Software that links against - // > both SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, - // > not SQLITE_TEXT. - case SQLITE_TEXT: { - size_t len = sqlite3_column_bytes(stmt, i); - const unsigned char* text = len > 0 ? sqlite3_column_text(stmt, i) : nullptr; - if (UNLIKELY(text == nullptr || len == 0)) { - result->putDirectIndex(lexicalGlobalObject, i, jsEmptyString(vm)); - continue; - } - result->putDirectIndex(lexicalGlobalObject, i, len < 64 ? jsString(vm, WTF::String::fromUTF8({ text, len })) : JSC::JSValue::decode(Bun__encoding__toStringUTF8(text, len, lexicalGlobalObject))); - break; - } - case SQLITE_BLOB: { - size_t len = sqlite3_column_bytes(stmt, i); - const void* blob = len > 0 ? sqlite3_column_blob(stmt, i) : nullptr; - if (LIKELY(len > 0 && blob != nullptr)) { - JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(lexicalGlobalObject, lexicalGlobalObject->m_typedArrayUint8.get(lexicalGlobalObject), len); - memcpy(array->vector(), blob, len); - result->putDirectIndex(lexicalGlobalObject, i, array); - } else { - result->putDirectIndex(lexicalGlobalObject, i, JSC::JSUint8Array::create(lexicalGlobalObject, lexicalGlobalObject->m_typedArrayUint8.get(lexicalGlobalObject), 0)); - } - break; - } - default: { - result->putDirectIndex(lexicalGlobalObject, i, jsNull()); - break; - } + for (int i = 0, j = 0; j < count; i++, j++) { + if (!castedThis->validColumns.get(i)) { + j -= 1; + continue; } + JSValue value = toJS(vm, lexicalGlobalObject, stmt, i); + RETURN_IF_EXCEPTION(throwScope, nullptr); + result->putDirectIndex(lexicalGlobalObject, j, value); } return result; diff --git a/src/bun.js/bindings/sqlite/lazy_sqlite3.h b/src/bun.js/bindings/sqlite/lazy_sqlite3.h index 06c0a83e5551f8..00a1c292d07d7d 100644 --- a/src/bun.js/bindings/sqlite/lazy_sqlite3.h +++ b/src/bun.js/bindings/sqlite/lazy_sqlite3.h @@ -20,6 +20,8 @@ typedef int (*lazy_sqlite3_bind_parameter_index_type)(sqlite3_stmt*, const char* typedef int (*lazy_sqlite3_changes_type)(sqlite3*); typedef int (*lazy_sqlite3_clear_bindings_type)(sqlite3_stmt*); typedef int (*lazy_sqlite3_close_v2_type)(sqlite3*); +typedef int (*lazy_sqlite3_close_type)(sqlite3*); +typedef int (*lazy_sqlite3_file_control_type)(sqlite3*, const char* zDbName, int op, void* pArg); typedef int (*lazy_sqlite3_extended_result_codes_type)(sqlite3*, int onoff); typedef const void* (*lazy_sqlite3_column_blob_type)(sqlite3_stmt*, int iCol); typedef double (*lazy_sqlite3_column_double_type)(sqlite3_stmt*, int iCol); @@ -100,6 +102,8 @@ static lazy_sqlite3_bind_text16_type lazy_sqlite3_bind_text16; static lazy_sqlite3_changes_type lazy_sqlite3_changes; static lazy_sqlite3_clear_bindings_type lazy_sqlite3_clear_bindings; static lazy_sqlite3_close_v2_type lazy_sqlite3_close_v2; +static lazy_sqlite3_close_type lazy_sqlite3_close; +static lazy_sqlite3_file_control_type lazy_sqlite3_file_control; static lazy_sqlite3_column_blob_type lazy_sqlite3_column_blob; static lazy_sqlite3_column_bytes_type lazy_sqlite3_column_bytes; static lazy_sqlite3_column_bytes16_type lazy_sqlite3_column_bytes16; @@ -147,6 +151,8 @@ static lazy_sqlite3_memory_used_type lazy_sqlite3_memory_used; #define sqlite3_changes lazy_sqlite3_changes #define sqlite3_clear_bindings lazy_sqlite3_clear_bindings #define sqlite3_close_v2 lazy_sqlite3_close_v2 +#define sqlite3_close lazy_sqlite3_close +#define sqlite3_file_control lazy_sqlite3_file_control #define sqlite3_column_blob lazy_sqlite3_column_blob #define sqlite3_column_bytes lazy_sqlite3_column_bytes #define sqlite3_column_count lazy_sqlite3_column_count @@ -226,6 +232,8 @@ static int lazyLoadSQLite() lazy_sqlite3_changes = (lazy_sqlite3_changes_type)dlsym(sqlite3_handle, "sqlite3_changes"); lazy_sqlite3_clear_bindings = (lazy_sqlite3_clear_bindings_type)dlsym(sqlite3_handle, "sqlite3_clear_bindings"); lazy_sqlite3_close_v2 = (lazy_sqlite3_close_v2_type)dlsym(sqlite3_handle, "sqlite3_close_v2"); + lazy_sqlite3_close = (lazy_sqlite3_close_type)dlsym(sqlite3_handle, "sqlite3_close"); + lazy_sqlite3_file_control = (lazy_sqlite3_file_control_type)dlsym(sqlite3_handle, "sqlite3_file_control"); lazy_sqlite3_column_blob = (lazy_sqlite3_column_blob_type)dlsym(sqlite3_handle, "sqlite3_column_blob"); lazy_sqlite3_column_bytes = (lazy_sqlite3_column_bytes_type)dlsym(sqlite3_handle, "sqlite3_column_bytes"); lazy_sqlite3_column_count = (lazy_sqlite3_column_count_type)dlsym(sqlite3_handle, "sqlite3_column_count"); diff --git a/src/bun.js/node/node_fs.zig b/src/bun.js/node/node_fs.zig index 93a29f747f9d4a..b234da2087eee7 100644 --- a/src/bun.js/node/node_fs.zig +++ b/src/bun.js/node/node_fs.zig @@ -5642,58 +5642,12 @@ pub const NodeFS = struct { } pub fn writeFileWithPathBuffer(pathbuf: *[bun.MAX_PATH_BYTES]u8, args: Arguments.WriteFile) Maybe(Return.WriteFile) { - var path: [:0]const u8 = undefined; - var pathbuf2: [bun.MAX_PATH_BYTES]u8 = undefined; - const fd = switch (args.file) { .path => brk: { - // On Windows, we potentially mutate the path in posixToPlatformInPlace - // We cannot mutate JavaScript strings in-place. That will break many things. - // So we must always copy the path string on Windows. - path = path: { - const temp_path = args.file.path.sliceZWithForceCopy(pathbuf, Environment.isWindows); - if (Environment.isWindows) { - bun.path.posixToPlatformInPlace(u8, temp_path); - } - break :path temp_path; - }; - - var is_dirfd_different = false; - var dirfd = args.dirfd; - if (Environment.isWindows) { - while (std.mem.startsWith(u8, path, "..\\")) { - is_dirfd_different = true; - var buffer: bun.WPathBuffer = undefined; - const dirfd_path_len = std.os.windows.kernel32.GetFinalPathNameByHandleW(args.dirfd.cast(), &buffer, buffer.len, 0); - const dirfd_path = buffer[0..dirfd_path_len]; - const parent_path = bun.Dirname.dirname(u16, dirfd_path).?; - if (std.mem.startsWith(u16, parent_path, &bun.windows.nt_maxpath_prefix)) @constCast(parent_path)[1] = '?'; - const newdirfd = switch (bun.sys.openDirAtWindows(bun.invalid_fd, parent_path, .{ .no_follow = true })) { - .result => |fd| fd, - .err => |err| { - return .{ .err = err.withPath(path) }; - }, - }; - path = path[3..]; - dirfd = newdirfd; - } - } - defer if (is_dirfd_different) { - var d = dirfd.asDir(); - d.close(); - }; - if (Environment.isWindows) { - // windows openat does not support path traversal, fix it here. - // use pathbuf2 here since without it 'panic: @memcpy arguments alias' triggers - if (std.mem.indexOf(u8, path, "\\.\\") != null or std.mem.indexOf(u8, path, "\\..\\") != null) { - const fixed_path = bun.path.normalizeStringWindows(path, &pathbuf2, false, false); - pathbuf2[fixed_path.len] = 0; - path = pathbuf2[0..fixed_path.len :0]; - } - } + const path = args.file.path.sliceZWithForceCopy(pathbuf, true); const open_result = Syscall.openat( - dirfd, + args.dirfd, path, @intFromEnum(args.flag) | os.O.NOCTTY, args.mode, @@ -5780,7 +5734,9 @@ pub const NodeFS = struct { } } else { // https://github.com/oven-sh/bun/issues/2931 - if ((@intFromEnum(args.flag) & std.os.O.APPEND) == 0) { + // https://github.com/oven-sh/bun/issues/10222 + // only truncate if we're not appending and writing to a path + if ((@intFromEnum(args.flag) & std.os.O.APPEND) == 0 and args.file != .fd) { _ = ftruncateSync(.{ .fd = fd, .len = @as(JSC.WebCore.Blob.SizeType, @truncate(written)) }); } } diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig index 8752a92a966c3e..894e99a5ff4674 100644 --- a/src/bun.js/node/types.zig +++ b/src/bun.js/node/types.zig @@ -248,7 +248,9 @@ pub fn Maybe(comptime ReturnTypeT: type, comptime ErrorTypeT: type) type { pub inline fn errnoSysFd(rc: anytype, syscall: Syscall.Tag, fd: bun.FileDescriptor) ?@This() { if (comptime Environment.isWindows) { - if (rc != 0) return null; + if (comptime @TypeOf(rc) == std.os.windows.NTSTATUS) {} else { + if (rc != 0) return null; + } } return switch (Syscall.getErrno(rc)) { .SUCCESS => null, @@ -268,7 +270,9 @@ pub fn Maybe(comptime ReturnTypeT: type, comptime ErrorTypeT: type) type { @compileError("Do not pass WString path to errnoSysP, it needs the path encoded as utf8"); } if (comptime Environment.isWindows) { - if (rc != 0) return null; + if (comptime @TypeOf(rc) == std.os.windows.NTSTATUS) {} else { + if (rc != 0) return null; + } } return switch (Syscall.getErrno(rc)) { .SUCCESS => null, @@ -926,12 +930,9 @@ pub const Valid = struct { 0...bun.MAX_PATH_BYTES => return true, else => { // TODO: should this be an EINVAL? - JSC.throwInvalidArguments( - comptime std.fmt.comptimePrint("Invalid path string: path is too long (max: {d})", .{bun.MAX_PATH_BYTES}), - .{}, - ctx, - exception, - ); + var system_error = bun.sys.Error.fromCode(.NAMETOOLONG, .open).withPath(zig_str.slice()).toSystemError(); + system_error.syscall = bun.String.dead; + exception.* = system_error.toErrorInstance(ctx).asObjectRef(); return false; }, } @@ -944,12 +945,9 @@ pub const Valid = struct { 0...bun.MAX_PATH_BYTES => return true, else => { // TODO: should this be an EINVAL? - JSC.throwInvalidArguments( - comptime std.fmt.comptimePrint("Invalid path string: path is too long (max: {d})", .{bun.MAX_PATH_BYTES}), - .{}, - ctx, - exception, - ); + var system_error = bun.sys.Error.fromCode(.NAMETOOLONG, .open).toSystemError(); + system_error.syscall = bun.String.dead; + exception.* = system_error.toErrorInstance(ctx).asObjectRef(); return false; }, } @@ -970,14 +968,9 @@ pub const Valid = struct { }, else => { - - // TODO: should this be an EINVAL? - JSC.throwInvalidArguments( - comptime std.fmt.comptimePrint("Invalid path buffer: path is too long (max: {d})", .{bun.MAX_PATH_BYTES}), - .{}, - ctx, - exception, - ); + var system_error = bun.sys.Error.fromCode(.NAMETOOLONG, .open).toSystemError(); + system_error.syscall = bun.String.dead; + exception.* = system_error.toErrorInstance(ctx).asObjectRef(); return false; }, 1...bun.MAX_PATH_BYTES => return true, diff --git a/src/bun.js/unbounded_queue.zig b/src/bun.js/unbounded_queue.zig index 85287a899cc86f..5fafbd48d4afc2 100644 --- a/src/bun.js/unbounded_queue.zig +++ b/src/bun.js/unbounded_queue.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const bun = @import("root").bun; const os = std.os; const mem = std.mem; @@ -7,7 +8,7 @@ const atomic = std.atomic; const builtin = std.builtin; const testing = std.testing; -const assert = @import("root").bun.assert; +const assert = bun.assert; const mpsc = @This(); diff --git a/src/bun.js/webcore/streams.zig b/src/bun.js/webcore/streams.zig index 11cfadf62cbdb5..0ef0e1c315b3b6 100644 --- a/src/bun.js/webcore/streams.zig +++ b/src/bun.js/webcore/streams.zig @@ -1964,6 +1964,7 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { } fn handleWrote(this: *@This(), amount1: usize) void { + defer log("handleWrote: {d} offset: {d}, {d}", .{ amount1, this.offset, this.buffer.len }); const amount = @as(Blob.SizeType, @truncate(amount1)); this.offset += amount; this.wrote += amount; @@ -1996,8 +1997,11 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { if (this.requested_end and !this.res.state().isHttpWriteCalled()) { this.handleFirstWriteIfNecessary(); const success = this.res.tryEnd(buf, this.end_len, false); - this.has_backpressure = !success; - if (this.has_backpressure) { + if (success) { + this.has_backpressure = false; + this.handleWrote(this.end_len); + } else { + this.has_backpressure = true; this.res.onWritable(*@This(), onWritable, this); } return success; @@ -2018,7 +2022,7 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { } else { this.has_backpressure = !this.res.write(buf); } - + this.handleWrote(buf.len); return true; } @@ -2064,7 +2068,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { // if we were unable to send it, retry return false; } - this.handleWrote(@as(Blob.SizeType, @truncate(chunk.len))); total_written = chunk.len; if (this.requested_end) { @@ -2150,7 +2153,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { const success = this.send(slice); if (success) { - this.handleWrote(@as(Blob.SizeType, @truncate(slice.len))); return .{ .result = JSValue.jsNumber(slice.len) }; } @@ -2178,7 +2180,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { assert(slice.len > 0); const success = this.send(slice); if (success) { - this.handleWrote(@as(Blob.SizeType, @truncate(slice.len))); return .{ .result = JSC.JSPromise.resolvedPromiseValue(globalThis, JSValue.jsNumber(slice.len)) }; } } @@ -2221,7 +2222,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { // - large-ish chunk // - no backpressure if (this.send(bytes)) { - this.handleWrote(len); return .{ .owned = len }; } @@ -2236,7 +2236,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { }; const slice = this.readableSlice(); if (this.send(slice)) { - this.handleWrote(slice.len); return .{ .owned = len }; } } else { @@ -2274,7 +2273,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { // - large-ish chunk // - no backpressure if (this.send(bytes)) { - this.handleWrote(bytes.len); return .{ .owned = len }; } do_send = false; @@ -2286,7 +2284,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { if (do_send) { if (this.send(this.readableSlice())) { - this.handleWrote(bytes.len); return .{ .owned = len }; } } @@ -2299,7 +2296,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { }; const readable = this.readableSlice(); if (this.send(readable)) { - this.handleWrote(readable.len); return .{ .owned = len }; } } else { @@ -2336,7 +2332,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { const readable = this.readableSlice(); if (readable.len >= this.highWaterMark or this.hasBackpressure()) { if (this.send(readable)) { - this.handleWrote(readable.len); return .{ .owned = @as(Blob.SizeType, @intCast(written)) }; } } @@ -2464,8 +2459,6 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { this.auto_flusher.registered = true; return true; } - - this.handleWrote(readable.len); this.auto_flusher.registered = false; return false; } diff --git a/src/bun.zig b/src/bun.zig index 21dd48e958abd4..62cbcce34cf164 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -28,6 +28,7 @@ else pub const huge_allocator_threshold: comptime_int = @import("./memory_allocator.zig").huge_threshold; pub const callmod_inline: std.builtin.CallModifier = if (builtin.mode == .Debug) .auto else .always_inline; +pub const callconv_inline: std.builtin.CallingConvention = if (builtin.mode == .Debug) .Unspecified else .Inline; /// We cannot use a threadlocal memory allocator for FileSystem-related things /// FileSystem is a singleton. @@ -3107,7 +3108,7 @@ pub inline fn debugAssert(cheap_value_only_plz: bool) void { } } -pub inline fn assert(value: bool) void { +pub fn assert(value: bool) callconv(callconv_inline) void { if (comptime !Environment.allow_assert) { return; } diff --git a/src/bun_js.zig b/src/bun_js.zig index 73ce474679bb58..f23b8dfcdda660 100644 --- a/src/bun_js.zig +++ b/src/bun_js.zig @@ -42,8 +42,7 @@ pub const Run = struct { arena: Arena, any_unhandled: bool = false, - pub fn bootStandalone(ctx_: Command.Context, entry_path: string, graph: bun.StandaloneModuleGraph) !void { - var ctx = ctx_; + pub fn bootStandalone(ctx: Command.Context, entry_path: string, graph: bun.StandaloneModuleGraph) !void { JSC.markBinding(@src()); bun.JSC.initialize(); @@ -55,7 +54,7 @@ pub const Run = struct { var arena = try Arena.init(); if (!ctx.debug.loaded_bunfig) { - try bun.CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", &ctx, .RunCommand); + try bun.CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", ctx, .RunCommand); } run = .{ @@ -123,7 +122,7 @@ pub const Run = struct { vm.global.vm().holdAPILock(&run, callback); } - fn bootBunShell(ctx: *const Command.Context, entry_path: []const u8) !bun.shell.ExitCode { + fn bootBunShell(ctx: Command.Context, entry_path: []const u8) !bun.shell.ExitCode { @setCold(true); // this is a hack: make dummy bundler so we can use its `.runEnvLoader()` function to populate environment variables probably should split out the functionality @@ -139,16 +138,15 @@ pub const Run = struct { return bun.shell.Interpreter.initAndRunFromFile(ctx, mini, entry_path); } - pub fn boot(ctx_: Command.Context, entry_path: string) !void { - var ctx = ctx_; + pub fn boot(ctx: Command.Context, entry_path: string) !void { JSC.markBinding(@src()); if (!ctx.debug.loaded_bunfig) { - try bun.CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", &ctx, .RunCommand); + try bun.CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", ctx, .RunCommand); } if (strings.endsWithComptime(entry_path, ".sh")) { - const exit_code = try bootBunShell(&ctx, entry_path); + const exit_code = try bootBunShell(ctx, entry_path); Global.exitWide(exit_code); return; } diff --git a/src/bunfig.zig b/src/bunfig.zig index 755b33eef09c25..c38b202ea87bb6 100644 --- a/src/bunfig.zig +++ b/src/bunfig.zig @@ -49,7 +49,7 @@ pub const Bunfig = struct { log: *logger.Log, allocator: std.mem.Allocator, bunfig: *Api.TransformOptions, - ctx: *Command.Context, + ctx: Command.Context, fn addError(this: *Parser, loc: logger.Loc, comptime text: string) !void { this.log.addError(this.source, loc, text) catch unreachable; @@ -777,7 +777,7 @@ pub const Bunfig = struct { } }; - pub fn parse(allocator: std.mem.Allocator, source: logger.Source, ctx: *Command.Context, comptime cmd: Command.Tag) !void { + pub fn parse(allocator: std.mem.Allocator, source: logger.Source, ctx: Command.Context, comptime cmd: Command.Tag) !void { const log_count = ctx.log.errors + ctx.log.warnings; const expr = if (strings.eqlComptime(source.path.name.ext[1..], "toml")) TOML.parse(&source, ctx.log, allocator) catch |err| { diff --git a/src/cli.zig b/src/cli.zig index ecb553e624864c..ef4986a9119332 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -261,7 +261,7 @@ pub const Arguments = struct { Global.exit(0); } - pub fn loadConfigPath(allocator: std.mem.Allocator, auto_loaded: bool, config_path: [:0]const u8, ctx: *Command.Context, comptime cmd: Command.Tag) !void { + pub fn loadConfigPath(allocator: std.mem.Allocator, auto_loaded: bool, config_path: [:0]const u8, ctx: Command.Context, comptime cmd: Command.Tag) !void { var config_file = switch (bun.sys.openA(config_path, std.os.O.RDONLY, 0)) { .result => |fd| fd.asFile(), .err => |err| { @@ -306,7 +306,7 @@ pub const Arguments = struct { return null; } - pub fn loadConfig(allocator: std.mem.Allocator, user_config_path_: ?string, ctx: *Command.Context, comptime cmd: Command.Tag) !void { + pub fn loadConfig(allocator: std.mem.Allocator, user_config_path_: ?string, ctx: Command.Context, comptime cmd: Command.Tag) !void { var config_buf: [bun.MAX_PATH_BYTES]u8 = undefined; if (comptime cmd.readGlobalConfig()) { if (!ctx.has_loaded_global_config) { @@ -368,12 +368,12 @@ pub const Arguments = struct { comptime cmd: Command.Tag, allocator: std.mem.Allocator, args: clap.Args(clap.Help, cmd.params()), - ctx: *Command.Context, + ctx: Command.Context, ) !void { return try loadConfig(allocator, args.option("--config"), ctx, comptime cmd); } - pub fn parse(allocator: std.mem.Allocator, ctx: *Command.Context, comptime cmd: Command.Tag) !Api.TransformOptions { + pub fn parse(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: Command.Tag) !Api.TransformOptions { var diag = clap.Diagnostic{}; const params_to_parse = comptime cmd.params(); @@ -1136,7 +1136,18 @@ pub const Command = struct { } = .{}, }; - pub const Context = struct { + var global_cli_ctx: Context = undefined; + + var context_data: ContextData = ContextData{ + .args = std.mem.zeroes(Api.TransformOptions), + .log = undefined, + .start_time = 0, + .allocator = undefined, + }; + + pub const init = ContextData.create; + + pub const ContextData = struct { start_time: i128, args: Api.TransformOptions, log: *logger.Log, @@ -1173,34 +1184,28 @@ pub const Command = struct { minify_identifiers: bool = false, }; - const _ctx = Command.Context{ - .args = std.mem.zeroes(Api.TransformOptions), - .log = undefined, - .start_time = 0, - .allocator = undefined, - }; - pub fn create(allocator: std.mem.Allocator, log: *logger.Log, comptime command: Command.Tag) anyerror!Context { Cli.cmd = command; - var ctx = _ctx; - ctx.log = log; - ctx.start_time = start_time; - ctx.allocator = allocator; + global_cli_ctx = &context_data; + global_cli_ctx.log = log; + global_cli_ctx.start_time = start_time; + global_cli_ctx.allocator = allocator; if (comptime Command.Tag.uses_global_options.get(command)) { - ctx.args = try Arguments.parse(allocator, &ctx, command); + global_cli_ctx.args = try Arguments.parse(allocator, global_cli_ctx, command); } if (comptime Environment.isWindows) { - if (ctx.debug.hot_reload == .watch and !bun.isWatcherChild()) { + if (global_cli_ctx.debug.hot_reload == .watch and !bun.isWatcherChild()) { // this is noreturn bun.becomeWatcherManager(allocator); } } - return ctx; + return global_cli_ctx; } }; + pub const Context = *ContextData; // std.process.args allocates! const ArgsIterator = struct { @@ -1371,12 +1376,14 @@ pub const Command = struct { // bun build --compile entry point if (try bun.StandaloneModuleGraph.fromExecutable(bun.default_allocator)) |graph| { - var ctx = Command.Context{ + context_data = .{ .args = std.mem.zeroes(Api.TransformOptions), .log = log, .start_time = start_time, .allocator = bun.default_allocator, }; + global_cli_ctx = &context_data; + var ctx = global_cli_ctx; ctx.args.target = Api.Target.bun; if (bun.argv().len > 1) { @@ -1404,7 +1411,7 @@ pub const Command = struct { .InitCommand => return try InitCommand.exec(allocator, bun.argv()), .BuildCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .BuildCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .BuildCommand); + const ctx = try Command.init(allocator, log, .BuildCommand); try BuildCommand.exec(ctx); }, .InstallCompletionsCommand => { @@ -1414,28 +1421,28 @@ pub const Command = struct { }, .InstallCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .InstallCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .InstallCommand); + const ctx = try Command.init(allocator, log, .InstallCommand); try InstallCommand.exec(ctx); return; }, .AddCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .AddCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .AddCommand); + const ctx = try Command.init(allocator, log, .AddCommand); try AddCommand.exec(ctx); return; }, .UpdateCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .UpdateCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .UpdateCommand); + const ctx = try Command.init(allocator, log, .UpdateCommand); try UpdateCommand.exec(ctx); return; }, .BunxCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .BunxCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .BunxCommand); + const ctx = try Command.init(allocator, log, .BunxCommand); try BunxCommand.exec(ctx, bun.argv()[if (is_bunx_exe) 0 else 1..]); return; @@ -1443,7 +1450,7 @@ pub const Command = struct { .ReplCommand => { // TODO: Put this in native code. if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .BunxCommand) unreachable; - var ctx = try Command.Context.create(allocator, log, .BunxCommand); + var ctx = try Command.init(allocator, log, .BunxCommand); ctx.debug.run_in_bun = true; // force the same version of bun used. fixes bun-debug for example var args = bun.argv()[0..]; args[1] = "bun-repl"; @@ -1452,42 +1459,42 @@ pub const Command = struct { }, .RemoveCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .RemoveCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .RemoveCommand); + const ctx = try Command.init(allocator, log, .RemoveCommand); try RemoveCommand.exec(ctx); return; }, .LinkCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .LinkCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .LinkCommand); + const ctx = try Command.init(allocator, log, .LinkCommand); try LinkCommand.exec(ctx); return; }, .UnlinkCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .UnlinkCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .UnlinkCommand); + const ctx = try Command.init(allocator, log, .UnlinkCommand); try UnlinkCommand.exec(ctx); return; }, .PackageManagerCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .PackageManagerCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .PackageManagerCommand); + const ctx = try Command.init(allocator, log, .PackageManagerCommand); try PackageManagerCommand.exec(ctx); return; }, .TestCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .TestCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .TestCommand); + const ctx = try Command.init(allocator, log, .TestCommand); try TestCommand.exec(ctx); return; }, .GetCompletionsCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .GetCompletionsCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .GetCompletionsCommand); + const ctx = try Command.init(allocator, log, .GetCompletionsCommand); var filter = ctx.positionals; for (filter, 0..) |item, i| { @@ -1583,7 +1590,7 @@ pub const Command = struct { }); // Create command wraps bunx - const ctx = try Command.Context.create(allocator, log, .CreateCommand); + const ctx = try Command.init(allocator, log, .CreateCommand); var args = try std.process.argsAlloc(allocator); @@ -1699,7 +1706,7 @@ pub const Command = struct { }, .RunCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .RunCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .RunCommand); + const ctx = try Command.init(allocator, log, .RunCommand); if (ctx.filters.len > 0) { FilterRun.runScriptsWithFilter(ctx) catch |err| { @@ -1718,20 +1725,20 @@ pub const Command = struct { }, .RunAsNodeCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .RunAsNodeCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .RunAsNodeCommand); + const ctx = try Command.init(allocator, log, .RunAsNodeCommand); bun.assert(pretend_to_be_node); try RunCommand.execAsIfNode(ctx); }, .UpgradeCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .UpgradeCommand) unreachable; - const ctx = try Command.Context.create(allocator, log, .UpgradeCommand); + const ctx = try Command.init(allocator, log, .UpgradeCommand); try UpgradeCommand.exec(ctx); return; }, .AutoCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .AutoCommand) unreachable; - var ctx = Command.Context.create(allocator, log, .AutoCommand) catch |e| { + const ctx = Command.init(allocator, log, .AutoCommand) catch |e| { switch (e) { error.MissingEntryPoint => { HelpCommand.execWithReason(allocator, .explicit); @@ -1805,7 +1812,7 @@ pub const Command = struct { } if (!ctx.debug.loaded_bunfig) { - try bun.CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", &ctx, .RunCommand); + try bun.CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", ctx, .RunCommand); } if (ctx.preloads.len > 0) @@ -1820,7 +1827,7 @@ pub const Command = struct { if (default_loader) |loader| { if (loader.canBeRunByBun()) { was_js_like = true; - if (maybeOpenWithBunJS(&ctx)) { + if (maybeOpenWithBunJS(ctx)) { return; } did_check = true; @@ -1828,7 +1835,7 @@ pub const Command = struct { } if (force_using_bun and !did_check) { - if (maybeOpenWithBunJS(&ctx)) { + if (maybeOpenWithBunJS(ctx)) { return; } } @@ -1873,16 +1880,16 @@ pub const Command = struct { try HelpCommand.exec(allocator); }, .ExecCommand => { - var ctx = try Command.Context.create(allocator, log, .RunCommand); + const ctx = try Command.init(allocator, log, .RunCommand); if (ctx.positionals.len > 1) { - try ExecCommand.exec(&ctx); + try ExecCommand.exec(ctx); } else Tag.printHelp(.ExecCommand, true); }, } } - fn maybeOpenWithBunJS(ctx: *Command.Context) bool { + fn maybeOpenWithBunJS(ctx: Command.Context) bool { if (ctx.args.entry_points.len == 0) return false; @@ -1959,7 +1966,7 @@ pub const Command = struct { } BunJS.Run.boot( - ctx.*, + ctx, absolute_script_path.?, ) catch |err| { if (Output.enable_ansi_colors) { diff --git a/src/cli/build_command.zig b/src/cli/build_command.zig index 751f8df13f1657..d1c058b1a5e87e 100644 --- a/src/cli/build_command.zig +++ b/src/cli/build_command.zig @@ -36,10 +36,9 @@ var estimated_input_lines_of_code_: usize = undefined; pub const BuildCommand = struct { pub fn exec( - ctx_: Command.Context, + ctx: Command.Context, ) !void { Global.configureAllocator(.{ .long_running = true }); - var ctx = ctx_; const allocator = ctx.allocator; var log = ctx.log; estimated_input_lines_of_code_ = 0; @@ -63,8 +62,8 @@ pub const BuildCommand = struct { } var outfile = ctx.bundler_options.outfile; - this_bundler.options.public_path = ctx_.bundler_options.public_path; - this_bundler.resolver.opts.public_path = ctx_.bundler_options.public_path; + this_bundler.options.public_path = ctx.bundler_options.public_path; + this_bundler.resolver.opts.public_path = ctx.bundler_options.public_path; this_bundler.options.entry_naming = ctx.bundler_options.entry_naming; this_bundler.options.chunk_naming = ctx.bundler_options.chunk_naming; diff --git a/src/cli/bunx_command.zig b/src/cli/bunx_command.zig index d86431ab48c28f..79d5de7c42c420 100644 --- a/src/cli/bunx_command.zig +++ b/src/cli/bunx_command.zig @@ -212,8 +212,7 @@ pub const BunxCommand = struct { Global.exit(1); } - pub fn exec(ctx_: bun.CLI.Command.Context, argv: [][:0]const u8) !void { - var ctx = ctx_; + pub fn exec(ctx: bun.CLI.Command.Context, argv: [][:0]const u8) !void { // Don't log stuff ctx.debug.silent = true; @@ -331,8 +330,7 @@ pub const BunxCommand = struct { else => ":", }; - const has_banned_char = std.mem.indexOfAny(u8, update_request.name, banned_path_chars) != null or - std.mem.indexOfAny(u8, display_version, banned_path_chars) != null; + const has_banned_char = bun.strings.indexAnyComptime(update_request.name, banned_path_chars) != null or bun.strings.indexAnyComptime(display_version, banned_path_chars) != null; break :brk try if (has_banned_char) // This branch gets hit usually when a URL is requested as the package diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 9eed58bf420dda..0e5fffff14971b 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -378,7 +378,7 @@ pub const CreateCommand = struct { progress.refresh(); var pluckers: [1]Archive.Plucker = if (!create_options.skip_package_json) - [1]Archive.Plucker{try Archive.Plucker.init("package.json", 2048, ctx.allocator)} + [1]Archive.Plucker{try Archive.Plucker.init(comptime strings.literal(bun.OSPathChar, "package.json"), 2048, ctx.allocator)} else [1]Archive.Plucker{undefined}; diff --git a/src/cli/exec_command.zig b/src/cli/exec_command.zig index 2ed2a11148d237..d15e254873b6ea 100644 --- a/src/cli/exec_command.zig +++ b/src/cli/exec_command.zig @@ -13,7 +13,7 @@ const open = @import("../open.zig"); const Command = bun.CLI.Command; pub const ExecCommand = struct { - pub fn exec(ctx: *Command.Context) !void { + pub fn exec(ctx: Command.Context) !void { const script = ctx.positionals[1]; // this is a hack: make dummy bundler so we can use its `.runEnvLoader()` function to populate environment variables probably should split out the functionality var bundle = try bun.Bundler.init( diff --git a/src/cli/package_manager_command.zig b/src/cli/package_manager_command.zig index 3cbe12fe445c34..68848bf83371e6 100644 --- a/src/cli/package_manager_command.zig +++ b/src/cli/package_manager_command.zig @@ -137,7 +137,7 @@ pub const PackageManagerCommand = struct { const subcommand = getSubcommand(&pm.options.positionals); if (pm.options.global) { - try pm.setupGlobalDir(&ctx); + try pm.setupGlobalDir(ctx); } if (strings.eqlComptime(subcommand, "bin")) { diff --git a/src/cli/run_command.zig b/src/cli/run_command.zig index 2c3a8d854b284a..9adb0ccd28c8d5 100644 --- a/src/cli/run_command.zig +++ b/src/cli/run_command.zig @@ -266,7 +266,7 @@ pub const RunCommand = struct { const log = Output.scoped(.RUN, false); fn runPackageScriptForeground( - ctx: *Command.Context, + ctx: Command.Context, allocator: std.mem.Allocator, original_script: string, name: string, @@ -1294,12 +1294,11 @@ pub const RunCommand = struct { } pub fn exec( - ctx_: Command.Context, + ctx: Command.Context, comptime bin_dirs_only: bool, comptime log_errors: bool, comptime did_try_open_with_bun_js: bool, ) !bool { - var ctx = ctx_; // Step 1. Figure out what we're trying to run var positionals = ctx.positionals; if (positionals.len > 0 and strings.eqlComptime(positionals[0], "run") or strings.eqlComptime(positionals[0], "r")) { @@ -1384,7 +1383,7 @@ pub const RunCommand = struct { // once we know it's a file, check if they have any preloads if (ext.len > 0 and !has_loader) { if (!ctx.debug.loaded_bunfig) { - try CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", &ctx, .RunCommand); + try CLI.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", ctx, .RunCommand); } if (ctx.preloads.len == 0) @@ -1464,7 +1463,7 @@ pub const RunCommand = struct { if (scripts.get(temp_script_buffer[1..])) |prescript| { if (!try runPackageScriptForeground( - &ctx, + ctx, ctx.allocator, prescript, temp_script_buffer[1..], @@ -1479,7 +1478,7 @@ pub const RunCommand = struct { } if (!try runPackageScriptForeground( - &ctx, + ctx, ctx.allocator, script_content, script_name_to_search, @@ -1494,7 +1493,7 @@ pub const RunCommand = struct { if (scripts.get(temp_script_buffer)) |postscript| { if (!try runPackageScriptForeground( - &ctx, + ctx, ctx.allocator, postscript, temp_script_buffer, @@ -1682,10 +1681,9 @@ pub const BunXFastPath = struct { var environment_buffer: bun.WPathBuffer = undefined; /// If this returns, it implies the fast path cannot be taken - fn tryLaunch(ctx_const: Command.Context, path_to_use: [:0]u16, env: *DotEnv.Loader, passthrough: []const []const u8) void { + fn tryLaunch(ctx: Command.Context, path_to_use: [:0]u16, env: *DotEnv.Loader, passthrough: []const []const u8) void { if (!bun.FeatureFlags.windows_bunx_fast_path) return; - var ctx = ctx_const; bun.assert(bun.isSliceInBufferT(u16, path_to_use, &BunXFastPath.direct_launch_buffer)); var command_line = BunXFastPath.direct_launch_buffer[path_to_use.len..]; @@ -1718,7 +1716,7 @@ pub const BunXFastPath = struct { .arguments = command_line[0..i], .force_use_bun = ctx.debug.run_in_bun, .direct_launch_with_bun_js = &directLaunchCallback, - .cli_context = &ctx, + .cli_context = ctx, .environment = env.map.writeWindowsEnvBlock(&environment_buffer) catch return, }; @@ -1734,12 +1732,12 @@ pub const BunXFastPath = struct { debug("did not start via shim", .{}); } - fn directLaunchCallback(wpath: []u16, ctx: *const Command.Context) void { + fn directLaunchCallback(wpath: []u16, ctx: Command.Context) void { const utf8 = bun.strings.convertUTF16toUTF8InBuffer( bun.reinterpretSlice(u8, &direct_launch_buffer), wpath, ) catch return; - Run.boot(ctx.*, utf8) catch |err| { + Run.boot(ctx, utf8) catch |err| { ctx.log.printForLogLevel(Output.errorWriter()) catch {}; Output.err(err, "Failed to run bin \"{s}\"", .{std.fs.path.basename(utf8)}); Global.exit(1); diff --git a/src/cli/upgrade_command.zig b/src/cli/upgrade_command.zig index 0dfaa308485271..dcc90d7d60d265 100644 --- a/src/cli/upgrade_command.zig +++ b/src/cli/upgrade_command.zig @@ -154,7 +154,8 @@ pub const UpgradeCheckerThread = struct { fn run(env_loader: *DotEnv.Loader) void { _run(env_loader) catch |err| { if (Environment.isDebug) { - std.debug.print("\n[UpgradeChecker] ERROR: {s}\n", .{@errorName(err)}); + Output.prettyError("\n[UpgradeChecker] ERROR: {s}\n", .{@errorName(err)}); + Output.flush(); } }; } diff --git a/src/deps/diffz/DiffMatchPatch.zig b/src/deps/diffz/DiffMatchPatch.zig index 4300282adbe4de..405060238b2ce0 100644 --- a/src/deps/diffz/DiffMatchPatch.zig +++ b/src/deps/diffz/DiffMatchPatch.zig @@ -1398,7 +1398,7 @@ fn diffCommonOverlap(text1_in: []const u8, text2_in: []const u8) usize { } // pub fn main() void { -// var arena = @import("root").bun.ArenaAllocator.init(std.heap.page_allocator); +// var arena = bun.ArenaAllocator.init(std.heap.page_allocator); // defer arena.deinit(); // var bruh = default.diff(arena.allocator(), "Hello World.", "Goodbye World.", true); @@ -1406,7 +1406,7 @@ fn diffCommonOverlap(text1_in: []const u8, text2_in: []const u8) usize { // } // test { -// var arena = @import("root").bun.ArenaAllocator.init(testing.allocator); +// var arena = bun.ArenaAllocator.init(testing.allocator); // defer arena.deinit(); // var bruh = try default.diff(arena.allocator(), "Hello World.", "Goodbye World.", true); @@ -1455,7 +1455,7 @@ test diffCommonOverlap { } test diffHalfMatch { - var arena = @import("root").bun.ArenaAllocator.init(testing.allocator); + var arena = bun.ArenaAllocator.init(testing.allocator); defer arena.deinit(); var one_timeout = DiffMatchPatch{}; @@ -1549,7 +1549,7 @@ test diffHalfMatch { } test diffLinesToChars { - var arena = @import("root").bun.ArenaAllocator.init(testing.allocator); + var arena = bun.ArenaAllocator.init(testing.allocator); defer arena.deinit(); // Convert lines down to characters. @@ -1611,7 +1611,7 @@ test diffLinesToChars { } test diffCharsToLines { - var arena = @import("root").bun.ArenaAllocator.init(testing.allocator); + var arena = bun.ArenaAllocator.init(testing.allocator); defer arena.deinit(); try testing.expect((Diff.init(.equal, "a")).eql(Diff.init(.equal, "a"))); @@ -1640,7 +1640,7 @@ test diffCharsToLines { } test diffCleanupMerge { - var arena = @import("root").bun.ArenaAllocator.init(testing.allocator); + var arena = bun.ArenaAllocator.init(testing.allocator); defer arena.deinit(); // Cleanup a messy diff. @@ -1828,7 +1828,7 @@ test diffCleanupMerge { } test diffCleanupSemanticLossless { - var arena = @import("root").bun.ArenaAllocator.init(testing.allocator); + var arena = bun.ArenaAllocator.init(testing.allocator); defer arena.deinit(); var diffs = DiffList{}; @@ -1953,7 +1953,7 @@ fn rebuildtexts(allocator: std.mem.Allocator, diffs: DiffList) ![2][]const u8 { } test diffBisect { - var arena = @import("root").bun.ArenaAllocator.init(talloc); + var arena = bun.ArenaAllocator.init(talloc); defer arena.deinit(); // Normal. @@ -1987,7 +1987,7 @@ test diffBisect { const talloc = testing.allocator; test diff { - var arena = @import("root").bun.ArenaAllocator.init(talloc); + var arena = bun.ArenaAllocator.init(talloc); defer arena.deinit(); // Perform a trivial diff. @@ -2094,7 +2094,7 @@ test diff { } test diffCleanupSemantic { - var arena = @import("root").bun.ArenaAllocator.init(talloc); + var arena = bun.ArenaAllocator.init(talloc); defer arena.deinit(); // Cleanup semantically trivial equalities. diff --git a/src/deps/picohttp.zig b/src/deps/picohttp.zig index e46ded225d0fa4..e1f8cdaf39a85a 100644 --- a/src/deps/picohttp.zig +++ b/src/deps/picohttp.zig @@ -199,30 +199,6 @@ pub const Response = struct { } }; -test "pico_http: parse response" { - const RES = "HTTP/1.1 200 OK\r\n" ++ - "Date: Mon, 22 Mar 2021 08:15:54 GMT\r\n" ++ - "Content-Type: text/html; charset=utf-8\r\n" ++ - "Content-Length: 9593\r\n" ++ - "Connection: keep-alive\r\n" ++ - "Server: gunicorn/19.9.0\r\n" ++ - "Access-Control-Allow-Origin: *\r\n" ++ - "Access-Control-Allow-Credentials: true\r\n" ++ - "\r\n"; - - var headers: [32]Header = undefined; - - const res = try Response.parse(RES, &headers); - - std.debug.print("Minor Version: {}\n", .{res.minor_version}); - std.debug.print("Status Code: {}\n", .{res.status_code}); - std.debug.print("Status: {s}\n", .{res.status}); - - for (res.headers) |header| { - std.debug.print("{}\n", .{header}); - } -} - pub const Headers = struct { headers: []const Header, @@ -253,22 +229,4 @@ pub const Headers = struct { } }; -test "pico_http: parse headers" { - const HEADERS = "Date: Mon, 22 Mar 2021 08:15:54 GMT\r\n" ++ - "Content-Type: text/html; charset=utf-8\r\n" ++ - "Content-Length: 9593\r\n" ++ - "Connection: keep-alive\r\n" ++ - "Server: gunicorn/19.9.0\r\n" ++ - "Access-Control-Allow-Origin: *\r\n" ++ - "Access-Control-Allow-Credentials: true\r\n" ++ - "\r\n"; - - var headers: [32]Header = undefined; - - const result = try Headers.parse(HEADERS, &headers); - for (result.headers) |header| { - std.debug.print("{}\n", .{header}); - } -} - pub usingnamespace c; diff --git a/src/deps/uws.zig b/src/deps/uws.zig index 386934c50d2dd8..b5a169ccd840f9 100644 --- a/src/deps/uws.zig +++ b/src/deps/uws.zig @@ -893,6 +893,47 @@ pub const SocketContext = opaque { us_socket_context_free(@as(i32, 0), this); } + pub fn cleanCallbacks(ctx: *SocketContext, is_ssl: bool) void { + const ssl_int: i32 = @intFromBool(is_ssl); + // replace callbacks with dummy ones + const DummyCallbacks = struct { + fn open(socket: *Socket, _: i32, _: [*c]u8, _: i32) callconv(.C) ?*Socket { + return socket; + } + fn close(socket: *Socket, _: i32, _: ?*anyopaque) callconv(.C) ?*Socket { + return socket; + } + fn data(socket: *Socket, _: [*c]u8, _: i32) callconv(.C) ?*Socket { + return socket; + } + fn writable(socket: *Socket) callconv(.C) ?*Socket { + return socket; + } + fn timeout(socket: *Socket) callconv(.C) ?*Socket { + return socket; + } + fn connect_error(socket: *Socket, _: i32) callconv(.C) ?*Socket { + return socket; + } + fn end(socket: *Socket) callconv(.C) ?*Socket { + return socket; + } + fn handshake(_: *Socket, _: i32, _: us_bun_verify_error_t, _: ?*anyopaque) callconv(.C) void {} + fn long_timeout(socket: *Socket) callconv(.C) ?*Socket { + return socket; + } + }; + us_socket_context_on_open(ssl_int, ctx, DummyCallbacks.open); + us_socket_context_on_close(ssl_int, ctx, DummyCallbacks.close); + us_socket_context_on_data(ssl_int, ctx, DummyCallbacks.data); + us_socket_context_on_writable(ssl_int, ctx, DummyCallbacks.writable); + us_socket_context_on_timeout(ssl_int, ctx, DummyCallbacks.timeout); + us_socket_context_on_connect_error(ssl_int, ctx, DummyCallbacks.connect_error); + us_socket_context_on_end(ssl_int, ctx, DummyCallbacks.end); + us_socket_context_on_handshake(ssl_int, ctx, DummyCallbacks.handshake, null); + us_socket_context_on_long_timeout(ssl_int, ctx, DummyCallbacks.long_timeout); + } + fn getLoop(this: *SocketContext, ssl: bool) ?*Loop { if (ssl) { return us_socket_context_loop(@as(i32, 1), this); @@ -902,6 +943,8 @@ pub const SocketContext = opaque { /// closes and deinit the SocketContexts pub fn deinit(this: *SocketContext, ssl: bool) void { + // we clean the callbacks to avoid UAF because we are deiniting + this.cleanCallbacks(ssl); this.close(ssl); //always deinit in next iteration if (ssl) { @@ -1132,6 +1175,8 @@ pub const us_bun_socket_context_options_t = extern struct { secure_options: u32 = 0, reject_unauthorized: i32 = 0, request_cert: i32 = 0, + client_renegotiation_limit: u32 = 3, + client_renegotiation_window: u32 = 600, }; pub const us_bun_verify_error_t = extern struct { diff --git a/src/deps/zig b/src/deps/zig index 7fe33d94eaeb1a..593a407f121a28 160000 --- a/src/deps/zig +++ b/src/deps/zig @@ -1 +1 @@ -Subproject commit 7fe33d94eaeb1af7705e9c5f43a3b243aa895436 +Subproject commit 593a407f121a2870e9c645da33c11db5e4331920 diff --git a/src/deps/zig-clap/clap.zig b/src/deps/zig-clap/clap.zig index 1ce0c628ecf2b0..cea034c1a7853b 100644 --- a/src/deps/zig-clap/clap.zig +++ b/src/deps/zig-clap/clap.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const bun = @import("root").bun; const debug = std.debug; const heap = std.heap; @@ -251,7 +252,7 @@ fn testDiag(diag: Diagnostic, err: anyerror, expected: []const u8) void { pub fn Args(comptime Id: type, comptime params: []const Param(Id)) type { return struct { - arena: @import("root").bun.ArenaAllocator, + arena: bun.ArenaAllocator, clap: ComptimeClap(Id, params), exe_arg: ?[]const u8, @@ -484,7 +485,7 @@ pub fn simpleHelp( if (desc_text.len == 0) continue; // create a string with spaces_len spaces - const default_allocator = @import("root").bun.default_allocator; + const default_allocator = bun.default_allocator; const flags_len = if (param.names.long) |l| l.len else 0; const num_spaces_after = max_spacing - flags_len; diff --git a/src/deps/zig-clap/clap/args.zig b/src/deps/zig-clap/clap/args.zig index b7dd99d761788c..d96b91ec426e50 100644 --- a/src/deps/zig-clap/clap/args.zig +++ b/src/deps/zig-clap/clap/args.zig @@ -49,7 +49,7 @@ const bun = @import("root").bun; pub const OsIterator = struct { const Error = process.ArgIterator.InitError; - arena: @import("root").bun.ArenaAllocator, + arena: bun.ArenaAllocator, remain: [][:0]const u8, /// The executable path (this is the first argument passed to the program) @@ -59,7 +59,7 @@ pub const OsIterator = struct { pub fn init(allocator: mem.Allocator) OsIterator { var res = OsIterator{ - .arena = @import("root").bun.ArenaAllocator.init(allocator), + .arena = bun.ArenaAllocator.init(allocator), .exe_arg = undefined, .remain = bun.argv(), }; @@ -90,12 +90,12 @@ pub const ShellIterator = struct { QuoteNotClosed, } || mem.Allocator.Error; - arena: @import("root").bun.ArenaAllocator, + arena: bun.ArenaAllocator, str: []const u8, pub fn init(allocator: mem.Allocator, str: []const u8) ShellIterator { return .{ - .arena = @import("root").bun.ArenaAllocator.init(allocator), + .arena = bun.ArenaAllocator.init(allocator), .str = str, }; } diff --git a/src/deps/zig-clap/clap/streaming.zig b/src/deps/zig-clap/clap/streaming.zig index 77094278113887..e861325a7ee5ed 100644 --- a/src/deps/zig-clap/clap/streaming.zig +++ b/src/deps/zig-clap/clap/streaming.zig @@ -1,7 +1,8 @@ const builtin = @import("builtin"); const clap = @import("../clap.zig"); const std = @import("std"); -const Output = @import("root").bun.Output; +const bun = @import("root").bun; +const Output = bun.Output; const args = clap.args; const debug = std.debug; diff --git a/src/fmt.zig b/src/fmt.zig index 259729247c4f88..8d6dae069fbe2f 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -99,7 +99,6 @@ pub inline fn utf16(slice_: []const u16) FormatUTF16 { pub const FormatUTF16 = struct { buf: []const u16, - escape_backslashes: bool = false, path_fmt_opts: ?PathFormatOptions = null, pub fn format(self: @This(), comptime _: []const u8, _: anytype, writer: anytype) !void { if (self.path_fmt_opts) |opts| { diff --git a/src/http.zig b/src/http.zig index 0fa3b00bb7e756..b8723a6fe0b855 100644 --- a/src/http.zig +++ b/src/http.zig @@ -259,6 +259,12 @@ const ProxyTunnel = struct { ssl.configureHTTPClient(hostname); BoringSSL.SSL_CTX_set_verify(ssl_ctx, BoringSSL.SSL_VERIFY_NONE, null); BoringSSL.SSL_set_verify(ssl, BoringSSL.SSL_VERIFY_NONE, null); + // TODO: change this to ssl_renegotiate_explicit for optimization + // if we allow renegotiation, we need to set the mode here + // https://github.com/oven-sh/bun/issues/6197 + // https://github.com/oven-sh/bun/issues/5363 + // renegotiation is only valid for <= TLS1_2_VERSION + BoringSSL.SSL_set_renegotiate_mode(ssl, BoringSSL.ssl_renegotiate_freely); return ProxyTunnel{ .ssl = ssl, .ssl_ctx = ssl_ctx, .in_bio = in_bio, .out_bio = out_bio, .read_buffer = bun.default_allocator.alloc(u8, 16 * 1024) catch unreachable, .partial_data = null }; } unreachable; @@ -466,10 +472,10 @@ fn NewHTTPContext(comptime ssl: bool) type { return client.firstCall(comptime ssl, socket); } else { // if authorized it self is false, this means that the connection was rejected - return client.onConnectError( - comptime ssl, - socket, - ); + socket.ext(**anyopaque).?.* = bun.cast(**anyopaque, ActiveSocket.init(&dead_socket).ptr()); + if (client.state.stage != .done and client.state.stage != .fail) + client.fail(error.ConnectionRefused); + return; } } @@ -1062,11 +1068,9 @@ pub fn onTimeout( pub fn onConnectError( client: *HTTPClient, comptime is_ssl: bool, - socket: NewHTTPContext(is_ssl).HTTPSocket, + _: NewHTTPContext(is_ssl).HTTPSocket, ) void { - _ = socket; log("onConnectError {s}\n", .{client.url.href}); - if (client.state.stage != .done and client.state.stage != .fail) client.fail(error.ConnectionRefused); } diff --git a/src/install/install.zig b/src/install/install.zig index 65dc60cc14d66f..90da7aae7bde0d 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -874,7 +874,6 @@ pub const ExtractData = struct { pub const PackageInstall = struct { cache_dir: std.fs.Dir, - destination_dir: std.fs.Dir, cache_dir_subpath: stringZ = "", destination_dir_subpath: stringZ = "", destination_dir_subpath_buf: []u8, @@ -886,7 +885,7 @@ pub const PackageInstall = struct { package_name: string, package_version: string, file_count: u32 = 0, - node_modules: *const NodeModulesFolder, + node_modules: *const PackageManager.NodeModulesFolder, const debug = Output.scoped(.install, true); @@ -963,7 +962,12 @@ pub const PackageInstall = struct { // 1. verify that .bun-tag exists (was it installed from bun?) // 2. check .bun-tag against the resolved version - fn verifyGitResolution(this: *PackageInstall, repo: *const Repository, buf: []const u8) bool { + fn verifyGitResolution( + this: *PackageInstall, + repo: *const Repository, + buf: []const u8, + root_node_modules_dir: std.fs.Dir, + ) bool { bun.copy(u8, this.destination_dir_subpath_buf[this.destination_dir_subpath.len..], std.fs.path.sep_str ++ ".bun-tag"); this.destination_dir_subpath_buf[this.destination_dir_subpath.len + std.fs.path.sep_str.len + ".bun-tag".len] = 0; const bun_tag_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + ".bun-tag".len :0]; @@ -971,8 +975,13 @@ pub const PackageInstall = struct { var git_tag_stack_fallback = std.heap.stackFallback(2048, bun.default_allocator); const allocator = git_tag_stack_fallback.get(); + var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false; + defer { + if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); + } + const bun_tag_file = File.readFrom( - this.destination_dir, + destination_dir, bun_tag_path, allocator, ).unwrap() catch return false; @@ -985,15 +994,16 @@ pub const PackageInstall = struct { this: *PackageInstall, resolution: *const Resolution, buf: []const u8, + root_node_modules_dir: std.fs.Dir, ) bool { return switch (resolution.tag) { - .git => this.verifyGitResolution(&resolution.value.git, buf), - .github => this.verifyGitResolution(&resolution.value.github, buf), - else => this.verifyPackageJSONNameAndVersion(), + .git => this.verifyGitResolution(&resolution.value.git, buf, root_node_modules_dir), + .github => this.verifyGitResolution(&resolution.value.github, buf, root_node_modules_dir), + else => this.verifyPackageJSONNameAndVersion(root_node_modules_dir), }; } - fn verifyPackageJSONNameAndVersion(this: *PackageInstall) bool { + fn verifyPackageJSONNameAndVersion(this: *PackageInstall, root_node_modules_dir: std.fs.Dir) bool { const allocator = this.allocator; var total: usize = 0; var read: usize = 0; @@ -1018,7 +1028,12 @@ pub const PackageInstall = struct { const package_json_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + "package.json".len :0]; defer this.destination_dir_subpath_buf[this.destination_dir_subpath.len] = 0; - var package_json_file = File.openat(this.destination_dir, package_json_path, std.os.O.RDONLY, 0).unwrap() catch return false; + var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false; + defer { + if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); + } + + var package_json_file = File.openat(destination_dir, package_json_path, std.os.O.RDONLY, 0).unwrap() catch return false; defer package_json_file.close(); // Heuristic: most package.jsons will be less than 2048 bytes. @@ -1151,7 +1166,7 @@ pub const PackageInstall = struct { else Method.hardlink; - fn installWithClonefileEachDir(this: *PackageInstall) !Result { + fn installWithClonefileEachDir(this: *PackageInstall, destination_dir: std.fs.Dir) !Result { var cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result{ .fail = .{ .err = err, .step = .opening_cache_dir }, }; @@ -1212,7 +1227,7 @@ pub const PackageInstall = struct { } }; - var subdir = this.destination_dir.makeOpenPath(bun.span(this.destination_dir_subpath), .{}) catch |err| return Result{ + var subdir = destination_dir.makeOpenPath(bun.span(this.destination_dir_subpath), .{}) catch |err| return Result{ .fail = .{ .err = err, .step = .opening_dest_dir }, }; @@ -1231,14 +1246,14 @@ pub const PackageInstall = struct { } // https://www.unix.com/man-page/mojave/2/fclonefileat/ - fn installWithClonefile(this: *PackageInstall) !Result { + fn installWithClonefile(this: *PackageInstall, destination_dir: std.fs.Dir) !Result { if (comptime !Environment.isMac) @compileError("clonefileat() is macOS only."); if (this.destination_dir_subpath[0] == '@') { if (strings.indexOfCharZ(this.destination_dir_subpath, std.fs.path.sep)) |slash| { this.destination_dir_subpath_buf[slash] = 0; const subdir = this.destination_dir_subpath_buf[0..slash :0]; - this.destination_dir.makeDirZ(subdir) catch {}; + destination_dir.makeDirZ(subdir) catch {}; this.destination_dir_subpath_buf[slash] = std.fs.path.sep; } } @@ -1246,7 +1261,7 @@ pub const PackageInstall = struct { return switch (C.clonefileat( this.cache_dir.fd, this.cache_dir_subpath, - this.destination_dir.fd, + destination_dir.fd, this.destination_dir_subpath, 0, )) { @@ -1259,7 +1274,7 @@ pub const PackageInstall = struct { // But, this can happen if this package contains a node_modules folder // We want to continue installing as many packages as we can, so we shouldn't block while downloading // We use the slow path in this case - .EXIST => try this.installWithClonefileEachDir(), + .EXIST => try this.installWithClonefileEachDir(destination_dir), .ACCES => return error.AccessDenied, else => error.Unexpected, }, @@ -1286,8 +1301,8 @@ pub const PackageInstall = struct { threadlocal var node_fs_for_package_installer: bun.JSC.Node.NodeFS = .{}; - fn initInstallDir(this: *PackageInstall, state: *InstallDirState) Result { - const destbase = this.destination_dir; + fn initInstallDir(this: *PackageInstall, state: *InstallDirState, destination_dir: std.fs.Dir) Result { + const destbase = destination_dir; const destpath = this.destination_dir_subpath; state.cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result{ @@ -1298,10 +1313,7 @@ pub const PackageInstall = struct { this.allocator, &[_]bun.OSPathSlice{}, &[_]bun.OSPathSlice{}, - ) catch |err| { - state.cached_package_dir.close(); - return Result.fail(err, .opening_cache_dir); - }; + ) catch bun.outOfMemory(); if (!Environment.isWindows) { state.subdir = destbase.makeOpenPath(bun.span(destpath), .{ @@ -1364,9 +1376,9 @@ pub const PackageInstall = struct { return Result.success(); } - fn installWithCopyfile(this: *PackageInstall) Result { + fn installWithCopyfile(this: *PackageInstall, destination_dir: std.fs.Dir) Result { var state = InstallDirState{}; - const res = this.initInstallDir(&state); + const res = this.initInstallDir(&state, destination_dir); if (res.isFail()) return res; defer state.deinit(); @@ -1642,9 +1654,9 @@ pub const PackageInstall = struct { } }; - fn installWithHardlink(this: *PackageInstall) !Result { + fn installWithHardlink(this: *PackageInstall, dest_dir: std.fs.Dir) !Result { var state = InstallDirState{}; - const res = this.initInstallDir(&state); + const res = this.initInstallDir(&state, dest_dir); if (res.isFail()) return res; defer state.deinit(); @@ -1745,9 +1757,9 @@ pub const PackageInstall = struct { }; } - fn installWithSymlink(this: *PackageInstall) !Result { + fn installWithSymlink(this: *PackageInstall, dest_dir: std.fs.Dir) !Result { var state = InstallDirState{}; - const res = this.initInstallDir(&state); + const res = this.initInstallDir(&state, dest_dir); if (res.isFail()) return res; defer state.deinit(); @@ -1887,29 +1899,14 @@ pub const PackageInstall = struct { }; } - pub fn uninstall(this: *PackageInstall) void { - this.destination_dir.deleteTree(bun.span(this.destination_dir_subpath)) catch {}; + pub fn uninstall(this: *PackageInstall, destination_dir: std.fs.Dir) void { + destination_dir.deleteTree(bun.span(this.destination_dir_subpath)) catch {}; } - pub fn uninstallBeforeInstall(this: *PackageInstall) void { - // TODO(dylan-conway): depth first package installation to allow lifecycle scripts to start earlier - // - // if (this.install_order == .depth_first) { - // var subpath_dir = this.destination_dir.open(this.destination_dir_subpath, .{}) catch return; - // defer subpath_dir.close(); - // var iter = subpath_dir.iterateAssumeFirstIteration(); - // while (iter.next() catch null) |entry| { - // // skip node_modules because installation is depth first - // if (entry.kind != .directory or !strings.eqlComptime(entry.name, "node_modules")) { - // this.destination_dir.deleteTree(entry.name) catch {}; - // } - // } - // } else { - // this.destination_dir.deleteTree(bun.span(this.destination_dir_subpath)) catch {}; - // } + pub fn uninstallBeforeInstall(this: *PackageInstall, destination_dir: std.fs.Dir) void { var rand_path_buf: [48]u8 = undefined; const temp_path = std.fmt.bufPrintZ(&rand_path_buf, ".old-{}", .{std.fmt.fmtSliceHexUpper(std.mem.asBytes(&bun.fastRandom()))}) catch unreachable; - switch (bun.sys.renameat(bun.toFD(this.destination_dir), this.destination_dir_subpath, bun.toFD(this.destination_dir), temp_path)) { + switch (bun.sys.renameat(bun.toFD(destination_dir), this.destination_dir_subpath, bun.toFD(destination_dir), temp_path)) { .err => { // if it fails, that means the directory doesn't exist or was inaccessible }, @@ -1944,7 +1941,7 @@ pub const PackageInstall = struct { var unintall_task = @fieldParentPtr(@This(), "task", task); var debug_timer = bun.Output.DebugTimer.start(); defer { - _ = PackageManager.instance.pending_tasks.fetchSub(1, .Monotonic); + _ = PackageManager.instance.decrementPendingTasks(); PackageManager.instance.wake(); } @@ -1986,8 +1983,7 @@ pub const PackageInstall = struct { .absolute_path = bun.default_allocator.dupeZ(u8, bun.path.joinAbsString(FileSystem.instance.top_level_dir, &.{ this.node_modules.path.items, temp_path }, .auto)) catch bun.outOfMemory(), }); PackageManager.instance.thread_pool.schedule(bun.ThreadPool.Batch.from(&task.task)); - _ = PackageManager.instance.pending_tasks.fetchAdd(1, .Monotonic); - PackageManager.instance.total_tasks += 1; + _ = PackageManager.instance.incrementPendingTasks(1); }, } } @@ -2043,11 +2039,11 @@ pub const PackageInstall = struct { return false; } - pub fn installFromLink(this: *PackageInstall, skip_delete: bool) Result { + pub fn installFromLink(this: *PackageInstall, skip_delete: bool, destination_dir: std.fs.Dir) Result { const dest_path = this.destination_dir_subpath; // If this fails, we don't care. // we'll catch it the next error - if (!skip_delete and !strings.eqlComptime(dest_path, ".")) this.uninstallBeforeInstall(); + if (!skip_delete and !strings.eqlComptime(dest_path, ".")) this.uninstallBeforeInstall(destination_dir); const subdir = std.fs.path.dirname(dest_path); @@ -2066,7 +2062,7 @@ pub const PackageInstall = struct { // When we're linking on Windows, we want to avoid keeping the source directory handle open if (comptime Environment.isWindows) { var wbuf: bun.WPathBuffer = undefined; - const dest_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(this.destination_dir.fd, &wbuf, dest_buf.len, 0); + const dest_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(destination_dir.fd, &wbuf, dest_buf.len, 0); if (dest_path_length == 0) { const e = bun.windows.Win32Error.get(); const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; @@ -2104,16 +2100,16 @@ pub const PackageInstall = struct { const dest_z = dest_buf[0..offset :0]; to_buf[to_path.len] = 0; - const to_path_z = to_buf[0..to_path.len :0]; + const target_z = to_buf[0..to_path.len :0]; // https://github.com/npm/cli/blob/162c82e845d410ede643466f9f8af78a312296cc/workspaces/arborist/lib/arborist/reify.js#L738 // https://github.com/npm/cli/commit/0e58e6f6b8f0cd62294642a502c17561aaf46553 - switch (bun.sys.symlinkOrJunctionOnWindows(to_path_z, dest_z)) { + switch (bun.sys.symlinkOrJunctionOnWindows(dest_z, target_z)) { .err => |err_| brk: { var err = err_; if (err.getErrno() == .EXIST) { - _ = bun.sys.unlink(to_path_z); - switch (bun.sys.symlinkOrJunctionOnWindows(to_path_z, dest_z)) { + _ = bun.sys.unlink(target_z); + switch (bun.sys.symlinkOrJunctionOnWindows(dest_z, target_z)) { .err => |e| err = e, .result => break :brk, } @@ -2130,13 +2126,13 @@ pub const PackageInstall = struct { } } else { var dest_dir = if (subdir) |dir| brk: { - break :brk bun.MakePath.makeOpenPath(this.destination_dir, dir, .{}) catch |err| return Result{ + break :brk bun.MakePath.makeOpenPath(destination_dir, dir, .{}) catch |err| return Result{ .fail = .{ .err = err, .step = .linking, }, }; - } else this.destination_dir; + } else destination_dir; defer { if (subdir != null) dest_dir.close(); } @@ -2169,16 +2165,30 @@ pub const PackageInstall = struct { }; } - pub fn install(this: *PackageInstall, skip_delete: bool) Result { + pub fn getInstallMethod(this: *const PackageInstall) Method { + return if (strings.eqlComptime(this.cache_dir_subpath, ".") or strings.hasPrefixComptime(this.cache_dir_subpath, "..")) + Method.symlink + else + supported_method; + } + + pub fn packageMissingFromCache(this: *PackageInstall, manager: *PackageManager, package_id: PackageID) bool { + return switch (manager.getPreinstallState(package_id)) { + .done => false, + else => brk: { + const exists = Syscall.directoryExistsAt(this.cache_dir.fd, this.cache_dir_subpath).unwrap() catch false; + if (exists) manager.setPreinstallState(package_id, manager.lockfile, .done); + break :brk !exists; + }, + }; + } + pub fn install(this: *PackageInstall, skip_delete: bool, destination_dir: std.fs.Dir) Result { // If this fails, we don't care. // we'll catch it the next error - if (!skip_delete and !strings.eqlComptime(this.destination_dir_subpath, ".")) this.uninstallBeforeInstall(); + if (!skip_delete and !strings.eqlComptime(this.destination_dir_subpath, ".")) this.uninstallBeforeInstall(destination_dir); - var supported_method_to_use = if (strings.eqlComptime(this.cache_dir_subpath, ".") or strings.hasPrefixComptime(this.cache_dir_subpath, "..")) - Method.symlink - else - supported_method; + var supported_method_to_use = this.getInstallMethod(); switch (supported_method_to_use) { .clonefile => { @@ -2186,7 +2196,7 @@ pub const PackageInstall = struct { // First, attempt to use clonefile // if that fails due to ENOTSUP, mark it as unsupported and then fall back to copyfile - if (this.installWithClonefile()) |result| { + if (this.installWithClonefile(destination_dir)) |result| { return result; } else |err| { switch (err) { @@ -2206,7 +2216,7 @@ pub const PackageInstall = struct { }, .clonefile_each_dir => { if (comptime Environment.isMac) { - if (this.installWithClonefileEachDir()) |result| { + if (this.installWithClonefileEachDir(destination_dir)) |result| { return result; } else |err| { switch (err) { @@ -2225,7 +2235,7 @@ pub const PackageInstall = struct { } }, .hardlink => { - if (this.installWithHardlink()) |result| { + if (this.installWithHardlink(destination_dir)) |result| { return result; } else |err| outer: { if (comptime !Environment.isWindows) { @@ -2250,7 +2260,7 @@ pub const PackageInstall = struct { if (comptime Environment.isWindows) { supported_method_to_use = .copyfile; } else { - if (this.installWithSymlink()) |result| { + if (this.installWithSymlink(destination_dir)) |result| { return result; } else |err| { switch (err) { @@ -2272,69 +2282,25 @@ pub const PackageInstall = struct { }; // TODO: linux io_uring - return this.installWithCopyfile(); + return this.installWithCopyfile(destination_dir); } }; -const NodeModulesFolder = struct { - fd: ?bun.FileDescriptor = null, +pub const Resolution = @import("./resolution.zig").Resolution; +const Progress = std.Progress; +const TaggedPointer = @import("../tagged_pointer.zig"); + +const DependencyInstallContext = struct { tree_id: Lockfile.Tree.Id = 0, path: std.ArrayList(u8) = std.ArrayList(u8).init(bun.default_allocator), - - pub fn deinit(this: *NodeModulesFolder) void { - if (this.fd) |fd| { - this.fd = null; - if (std.fs.cwd().fd == fd.cast()) { - return; - } - - _ = bun.sys.close(fd); - } - - this.path.clearAndFree(); - } - - pub fn dir(this: *NodeModulesFolder, root: std.fs.Dir) !std.fs.Dir { - if (this.fd) |fd| { - return fd.asDir(); - } - - if (root.fd == std.fs.cwd().fd) { - this.fd = bun.toFD(std.fs.cwd()); - return root; - } - - const out = brk: { - if (comptime Environment.isPosix) { - break :brk try root.makeOpenPath(this.path.items, .{ .iterate = true, .access_sub_paths = true }); - } - - try bun.MakePath.makePath(u8, root, this.path.items); - break :brk (try bun.sys.openDirAtWindowsA(bun.toFD(root), this.path.items, .{ - .can_rename_or_delete = false, - .create = true, - .read_only = false, - }).unwrap()).asDir(); - }; - this.fd = bun.toFD(out.fd); - return out; - } + dependency_id: DependencyID, }; -pub const Resolution = @import("./resolution.zig").Resolution; -const Progress = std.Progress; -const TaggedPointer = @import("../tagged_pointer.zig"); -const TaskCallbackContext = union(Tag) { +const TaskCallbackContext = union(enum) { dependency: DependencyID, - node_modules_folder: NodeModulesFolder, + dependency_install_context: DependencyInstallContext, root_dependency: DependencyID, root_request_id: PackageID, - pub const Tag = enum { - dependency, - node_modules_folder, - root_dependency, - root_request_id, - }; }; const TaskCallbackList = std.ArrayListUnmanaged(TaskCallbackContext); @@ -2808,17 +2774,6 @@ pub const PackageManager = struct { return this.global_link_dir_path; } - fn ensurePreinstallStateListCapacity(this: *PackageManager, count: usize) !void { - if (this.preinstall_state.items.len >= count) { - return; - } - - const offset = this.preinstall_state.items.len; - try this.preinstall_state.ensureTotalCapacity(this.allocator, count); - this.preinstall_state.expandToCapacity(); - @memset(this.preinstall_state.items[offset..], PreinstallState.unknown); - } - pub fn formatLaterVersionInCache( this: *PackageManager, name: []const u8, @@ -2860,19 +2815,30 @@ pub const PackageManager = struct { } } + fn ensurePreinstallStateListCapacity(this: *PackageManager, count: usize) !void { + if (this.preinstall_state.items.len >= count) { + return; + } + + const offset = this.preinstall_state.items.len; + try this.preinstall_state.ensureTotalCapacity(this.allocator, count); + this.preinstall_state.expandToCapacity(); + @memset(this.preinstall_state.items[offset..], PreinstallState.unknown); + } + pub fn setPreinstallState(this: *PackageManager, package_id: PackageID, lockfile: *Lockfile, value: PreinstallState) void { this.ensurePreinstallStateListCapacity(lockfile.packages.len) catch return; this.preinstall_state.items[package_id] = value; } - pub fn getPreinstallState(this: *PackageManager, package_id: PackageID, _: *Lockfile) PreinstallState { + pub fn getPreinstallState(this: *PackageManager, package_id: PackageID) PreinstallState { if (package_id >= this.preinstall_state.items.len) { return PreinstallState.unknown; } return this.preinstall_state.items[package_id]; } pub fn determinePreinstallState(manager: *PackageManager, this: Package, lockfile: *Lockfile) PreinstallState { - switch (manager.getPreinstallState(this.meta.id, lockfile)) { + switch (manager.getPreinstallState(this.meta.id)) { .unknown => { // Do not automatically start downloading packages which are disabled @@ -3338,10 +3304,7 @@ pub const PackageManager = struct { } pub fn isFolderInCache(this: *PackageManager, folder_path: stringZ) bool { - // TODO: is this slow? - var dir = this.getCacheDirectory().openDirZ(folder_path, .{}) catch return false; - dir.close(); - return true; + return bun.sys.directoryExistsAt(this.getCacheDirectory(), folder_path).unwrap() catch false; } pub fn pathForCachedNPMPath( @@ -4306,7 +4269,7 @@ pub const PackageManager = struct { } if (result.network_task) |network_task| { - if (this.getPreinstallState(result.package.meta.id, this.lockfile) == .extract) { + if (this.getPreinstallState(result.package.meta.id) == .extract) { this.setPreinstallState(result.package.meta.id, this.lockfile, .extracting); this.enqueueNetworkTask(network_task); } @@ -4802,8 +4765,7 @@ pub const PackageManager = struct { pub fn scheduleTasks(manager: *PackageManager) usize { const count = manager.task_batch.len + manager.network_resolve_batch.len + manager.network_tarball_batch.len; - _ = manager.pending_tasks.fetchAdd(@truncate(count), .Monotonic); - manager.total_tasks += @as(u32, @truncate(count)); + _ = manager.incrementPendingTasks(@truncate(count)); manager.thread_pool.schedule(manager.task_batch); manager.network_resolve_batch.push(manager.network_tarball_batch); HTTP.http_thread.schedule(manager.network_resolve_batch); @@ -5199,7 +5161,7 @@ pub const PackageManager = struct { var network_tasks_iter = network_tasks_batch.iterator(); while (network_tasks_iter.next()) |task| { if (comptime Environment.allow_assert) bun.assert(manager.pendingTaskCount() > 0); - _ = manager.pending_tasks.fetchSub(1, .Monotonic); + _ = manager.decrementPendingTasks(); // We cannot free the network task at the end of this scope. // It may continue to be referenced in a future task. @@ -5525,7 +5487,7 @@ pub const PackageManager = struct { while (resolve_tasks_iter.next()) |task| { if (comptime Environment.allow_assert) bun.assert(manager.pendingTaskCount() > 0); defer manager.preallocated_resolve_tasks.put(task); - _ = manager.pending_tasks.fetchSub(1, .Monotonic); + _ = manager.decrementPendingTasks(); if (task.log.msgs.items.len > 0) { switch (Output.enable_ansi_colors) { @@ -6976,13 +6938,11 @@ pub const PackageManager = struct { pub fn init(ctx: Command.Context, comptime subcommand: Subcommand) !*PackageManager { const cli = try CommandLineArguments.parse(ctx.allocator, subcommand); - - var _ctx = ctx; - return initWithCLI(&_ctx, cli, subcommand); + return initWithCLI(ctx, cli, subcommand); } fn initWithCLI( - ctx: *Command.Context, + ctx: Command.Context, cli: CommandLineArguments, comptime subcommand: Subcommand, ) !*PackageManager { @@ -7500,7 +7460,7 @@ pub const PackageManager = struct { } manager.global_dir = try Options.openGlobalDir(explicit_global_dir); - try manager.setupGlobalDir(&ctx); + try manager.setupGlobalDir(ctx); break :brk manager.global_dir.?.makeOpenPath("node_modules", .{}) catch |err| { if (manager.options.log_level != .silent) @@ -7689,7 +7649,7 @@ pub const PackageManager = struct { } manager.global_dir = try Options.openGlobalDir(explicit_global_dir); - try manager.setupGlobalDir(&ctx); + try manager.setupGlobalDir(ctx); break :brk manager.global_dir.?.makeOpenPath("node_modules", .{}) catch |err| { if (manager.options.log_level != .silent) @@ -8693,6 +8653,43 @@ pub const PackageManager = struct { } } + pub const NodeModulesFolder = struct { + tree_id: Lockfile.Tree.Id = 0, + path: std.ArrayList(u8) = std.ArrayList(u8).init(bun.default_allocator), + + pub fn deinit(this: *NodeModulesFolder) void { + this.path.clearAndFree(); + } + + pub fn openDir(this: *const NodeModulesFolder, root: std.fs.Dir) !std.fs.Dir { + if (comptime Environment.isPosix) { + return root.openDir(this.path.items, .{ .iterate = true, .access_sub_paths = true }); + } + + return (try bun.sys.openDirAtWindowsA(bun.toFD(root), this.path.items, .{ + .can_rename_or_delete = false, + .create = true, + .read_only = false, + }).unwrap()).asDir(); + } + + pub fn makeAndOpenDir(this: *NodeModulesFolder, root: std.fs.Dir) !std.fs.Dir { + const out = brk: { + if (comptime Environment.isPosix) { + break :brk try root.makeOpenPath(this.path.items, .{ .iterate = true, .access_sub_paths = true }); + } + + try bun.MakePath.makePath(u8, root, this.path.items); + break :brk (try bun.sys.openDirAtWindowsA(bun.toFD(root), this.path.items, .{ + .can_rename_or_delete = false, + .create = true, + .read_only = false, + }).unwrap()).asDir(); + }; + return out; + } + }; + pub const PackageInstaller = struct { manager: *PackageManager, lockfile: *Lockfile, @@ -8733,11 +8730,18 @@ pub const PackageManager = struct { tree_id: Lockfile.Tree.Id, }) = .{}, + pending_installs_to_tree_id: []std.ArrayListUnmanaged(DependencyInstallContext), + trusted_dependencies_from_update_requests: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void), /// Increments the number of installed packages for a tree id and runs available scripts /// if the tree is finished. - pub fn incrementTreeInstallCount(this: *PackageInstaller, tree_id: Lockfile.Tree.Id, comptime log_level: Options.LogLevel) void { + pub fn incrementTreeInstallCount( + this: *PackageInstaller, + tree_id: Lockfile.Tree.Id, + comptime should_install_packages: bool, + comptime log_level: Options.LogLevel, + ) void { if (comptime Environment.allow_assert) { bun.assert(tree_id != Lockfile.Tree.invalid_id); } @@ -8759,6 +8763,10 @@ pub const PackageManager = struct { if (!is_not_done) { this.completed_trees.set(tree_id); + if (comptime should_install_packages) { + const force = false; + this.installAvailablePackages(log_level, force); + } this.runAvailableScripts(log_level); } } @@ -8800,6 +8808,51 @@ pub const PackageManager = struct { } } + pub fn installAvailablePackages(this: *PackageInstaller, comptime log_level: Options.LogLevel, comptime force: bool) void { + const prev_node_modules = this.node_modules; + defer this.node_modules = prev_node_modules; + const prev_tree_id = this.current_tree_id; + defer this.current_tree_id = prev_tree_id; + + const lockfile = this.lockfile; + const resolutions = lockfile.buffers.resolutions.items; + + for (this.pending_installs_to_tree_id, 0..) |*pending_installs, i| { + if (force or this.canInstallPackageForTree(this.lockfile.buffers.trees.items, @intCast(i))) { + defer pending_installs.clearRetainingCapacity(); + + // If installing these packages completes the tree, we don't allow it + // to call `installAvailablePackages` recursively. Starting at id 0 and + // going up ensures we will reach any trees that will be able to install + // packages upon completing the current tree + for (pending_installs.items) |context| { + const package_id = resolutions[context.dependency_id]; + const name = lockfile.str(&this.names[package_id]); + const resolution = &this.resolutions[package_id]; + this.node_modules.tree_id = context.tree_id; + this.node_modules.path = context.path; + this.current_tree_id = context.tree_id; + + const needs_verify = false; + const is_pending_package_install = true; + this.installPackageWithNameAndResolution( + // This id might be different from the id used to enqueue the task. Important + // to use the correct one because the package might be aliased with a different + // name + context.dependency_id, + package_id, + log_level, + name, + resolution, + needs_verify, + is_pending_package_install, + ); + this.node_modules.deinit(); + } + } + } + } + pub fn completeRemainingScripts(this: *PackageInstaller, comptime log_level: Options.LogLevel) void { for (this.pending_lifecycle_scripts.items) |entry| { const package_name = entry.list.package_name; @@ -8861,19 +8914,34 @@ pub const PackageManager = struct { LifecycleScriptSubprocess.alive_count.load(.Monotonic) < this.manager.options.max_concurrent_lifecycle_scripts; } - pub fn printTreeDeps(this: *PackageInstaller) void { - for (this.tree_ids_to_trees_the_id_depends_on, 0..) |deps, j| { - std.debug.print("tree #{d:3}: ", .{j}); - for (0..this.lockfile.buffers.trees.items.len) |tree_id| { - std.debug.print("{d} ", .{@intFromBool(deps.isSet(tree_id))}); - } - std.debug.print("\n", .{}); + /// If all parents of the tree have finished installing their packages, the package can be installed + pub fn canInstallPackageForTree(this: *const PackageInstaller, trees: []Lockfile.Tree, package_tree_id: Lockfile.Tree.Id) bool { + var curr_tree_id = trees[package_tree_id].parent; + while (curr_tree_id != Lockfile.Tree.invalid_id) { + if (!this.completed_trees.isSet(curr_tree_id)) return false; + curr_tree_id = trees[curr_tree_id].parent; } + + return true; } + // pub fn printTreeDeps(this: *PackageInstaller) void { + // for (this.tree_ids_to_trees_the_id_depends_on, 0..) |deps, j| { + // std.debug.print("tree #{d:3}: ", .{j}); + // for (0..this.lockfile.buffers.trees.items.len) |tree_id| { + // std.debug.print("{d} ", .{@intFromBool(deps.isSet(tree_id))}); + // } + // std.debug.print("\n", .{}); + // } + // } + pub fn deinit(this: *PackageInstaller) void { const allocator = this.manager.allocator; this.pending_lifecycle_scripts.deinit(this.manager.allocator); + for (this.pending_installs_to_tree_id) |*pending_installs| { + pending_installs.deinit(this.manager.allocator); + } + this.manager.allocator.free(this.pending_installs_to_tree_id); this.completed_trees.deinit(allocator); allocator.free(this.tree_install_counts); this.tree_ids_to_trees_the_id_depends_on.deinit(allocator); @@ -8924,28 +8992,32 @@ pub const PackageManager = struct { } for (callbacks.items) |*cb| { - this.node_modules = cb.node_modules_folder; - var dir = this.node_modules.dir(this.root_node_modules_folder) catch |err| { - if (log_level != .silent) { - Output.err(err, "Failed to open node_modules folder for {s} in {s}", .{ name, bun.fmt.fmtPath(u8, this.node_modules.path.items, .{}) }); - } - this.summary.fail += 1; - continue; - }; - defer dir.close(); - this.node_modules.fd = null; - this.current_tree_id = cb.node_modules_folder.tree_id; - cb.node_modules_folder = .{}; + const context = cb.dependency_install_context; + const callback_package_id = this.lockfile.buffers.resolutions.items[context.dependency_id]; + const callback_resolution = &this.resolutions[callback_package_id]; + this.node_modules.tree_id = context.tree_id; + this.node_modules.path = context.path; + this.current_tree_id = context.tree_id; + const needs_verify = false; + const is_pending_package_install = false; this.installPackageWithNameAndResolution( - dependency_id, - package_id, + // This id might be different from the id used to enqueue the task. Important + // to use the correct one because the package might be aliased with a different + // name + context.dependency_id, + callback_package_id, log_level, name, - resolution, - dir, + callback_resolution, + needs_verify, + is_pending_package_install, ); this.node_modules.deinit(); } + } else { + if (comptime Environment.allow_assert) { + Output.panic("Ran callback to install enqueued packages, but there was no task associated with it. {d} {any}", .{ dependency_id, data.* }); + } } } @@ -9038,7 +9110,14 @@ pub const PackageManager = struct { comptime log_level: Options.LogLevel, name: string, resolution: *const Resolution, - destination_dir: std.fs.Dir, + + // false when coming from download. if the package was downloaded + // it was already determined to need an install + comptime needs_verify: bool, + + // we don't want to allow more package installs through + // pending packages if we're already draining them. + comptime is_pending_package_install: bool, ) void { const buf = this.lockfile.buffers.string_bytes.items; @@ -9057,7 +9136,6 @@ pub const PackageManager = struct { .progress = this.progress, .cache_dir = undefined, .cache_dir_subpath = undefined, - .destination_dir = destination_dir, .destination_dir_subpath = destination_dir_subpath, .destination_dir_subpath_buf = &this.destination_dir_subpath_buf, .allocator = this.lockfile.allocator, @@ -9138,6 +9216,7 @@ pub const PackageManager = struct { Output.flush(); this.summary.fail += 1; + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }; @@ -9168,21 +9247,128 @@ pub const PackageManager = struct { if (comptime Environment.allow_assert) { @panic("bad"); } - this.incrementTreeInstallCount(this.current_tree_id, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }, } - const needs_install = this.force_install or this.skip_verify_installed_version_number or !installer.verify(resolution, buf); + const needs_install = this.force_install or this.skip_verify_installed_version_number or !needs_verify or !installer.verify( + resolution, + buf, + this.root_node_modules_folder, + ); this.summary.skipped += @intFromBool(!needs_install); if (needs_install) { - const result: PackageInstall.Result = switch (resolution.tag) { - .symlink, .workspace => installer.installFromLink(this.skip_delete), - else => installer.install(this.skip_delete), + if (resolution.tag.canEnqueueInstallTask() and installer.packageMissingFromCache(this.manager, package_id)) { + if (comptime Environment.allow_assert) { + bun.assert(resolution.canEnqueueInstallTask()); + } + + const context: TaskCallbackContext = .{ + .dependency_install_context = .{ + .tree_id = this.current_tree_id, + .path = this.node_modules.path.clone() catch bun.outOfMemory(), + .dependency_id = dependency_id, + }, + }; + switch (resolution.tag) { + .git => { + this.manager.enqueueGitForCheckout( + dependency_id, + alias, + resolution, + context, + ); + }, + .github => { + const url = this.manager.allocGitHubURL(&resolution.value.github); + defer this.manager.allocator.free(url); + this.manager.enqueueTarballForDownload( + dependency_id, + package_id, + url, + context, + ); + }, + .local_tarball => { + this.manager.enqueueTarballForReading( + dependency_id, + alias, + resolution, + context, + ); + }, + .remote_tarball => { + this.manager.enqueueTarballForDownload( + dependency_id, + package_id, + resolution.value.remote_tarball.slice(buf), + context, + ); + }, + .npm => { + if (comptime Environment.isDebug) { + // Very old versions of Bun didn't store the tarball url when it didn't seem necessary + // This caused bugs. We can't assert on it because they could come from old lockfiles + if (resolution.value.npm.url.isEmpty()) { + Output.debugWarn("package {s}@{} missing tarball_url", .{ name, resolution.fmt(buf, .posix) }); + } + } + + this.manager.enqueuePackageForDownload( + name, + dependency_id, + package_id, + resolution.value.npm.version, + resolution.value.npm.url.slice(buf), + context, + ); + }, + else => { + if (comptime Environment.allow_assert) { + @panic("unreachable, handled above"); + } + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + this.summary.fail += 1; + }, + } + + return; + } + + if (!is_pending_package_install and !this.canInstallPackageForTree(this.lockfile.buffers.trees.items, this.current_tree_id)) { + this.pending_installs_to_tree_id[this.current_tree_id].append(this.manager.allocator, .{ + .dependency_id = dependency_id, + .tree_id = this.current_tree_id, + .path = this.node_modules.path.clone() catch bun.outOfMemory(), + }) catch bun.outOfMemory(); + return; + } + + // creating this directory now, right before installing package + var destination_dir = this.node_modules.makeAndOpenDir(this.root_node_modules_folder) catch |err| { + if (log_level != .silent) { + Output.err(err, "Failed to open node_modules folder for {s} in {s}", .{ + name, + bun.fmt.fmtPath(u8, this.node_modules.path.items, .{}), + }); + } + this.summary.fail += 1; + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + return; }; - switch (result) { + defer { + if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); + } + + const install_result = switch (resolution.tag) { + .symlink, .workspace => installer.installFromLink(this.skip_delete, destination_dir), + else => installer.install(this.skip_delete, destination_dir), + }; + + switch (install_result) { .success => { const is_duplicate = this.successfully_installed.isSet(package_id); this.summary.success += @as(u32, @intFromBool(!is_duplicate)); @@ -9228,7 +9414,7 @@ pub const PackageManager = struct { } if (this.manager.options.enable.fail_early) { - installer.uninstall(); + installer.uninstall(destination_dir); Global.crash(); } } @@ -9281,140 +9467,91 @@ pub const PackageManager = struct { } } - this.incrementTreeInstallCount(this.current_tree_id, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); }, .fail => |cause| { - if (cause.isPackageMissingFromCache()) { - const context: TaskCallbackContext = .{ - .node_modules_folder = .{ - .fd = null, - .tree_id = this.current_tree_id, - .path = this.node_modules.path.clone() catch bun.outOfMemory(), - }, + if (comptime Environment.allow_assert) { + bun.assert(!cause.isPackageMissingFromCache() or (resolution.tag != .symlink and resolution.tag != .workspace)); + } + + // even if the package failed to install, we still need to increment the install + // counter for this tree + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + + if (cause.err == error.DanglingSymlink) { + Output.prettyErrorln( + "error: {s} \"link:{s}\" not found (try running 'bun link' in the intended package's folder)", + .{ @errorName(cause.err), this.names[package_id].slice(buf) }, + ); + this.summary.fail += 1; + } else if (cause.err == error.AccessDenied) { + // there are two states this can happen + // - Access Denied because node_modules/ is unwritable + // - Access Denied because this specific package is unwritable + // in the case of the former, the logs are extremely noisy, so we + // will exit early, otherwise set a flag to not re-stat + const Singleton = struct { + var node_modules_is_ok = false; }; - switch (resolution.tag) { - .git => { - this.manager.enqueueGitForCheckout( - dependency_id, - alias, - resolution, - context, - ); - }, - .github => { - const url = this.manager.allocGitHubURL(&resolution.value.github); - defer this.manager.allocator.free(url); - this.manager.enqueueTarballForDownload( - dependency_id, - package_id, - url, - context, - ); - }, - .local_tarball => { - this.manager.enqueueTarballForReading( - dependency_id, - alias, - resolution, - context, - ); - }, - .remote_tarball => { - this.manager.enqueueTarballForDownload( - dependency_id, - package_id, - resolution.value.remote_tarball.slice(buf), - context, - ); - }, - .npm => { - if (comptime Environment.isDebug) { - // Very old versions of Bun didn't store the tarball url when it didn't seem necessary - // This caused bugs. We can't assert on it because they could come from old lockfiles - if (resolution.value.npm.url.isEmpty()) { - Output.debugWarn("package {s}@{} missing tarball_url", .{ name, resolution.fmt(buf, .posix) }); + if (!Singleton.node_modules_is_ok) { + if (!Environment.isWindows) { + const stat = bun.sys.fstat(bun.toFD(destination_dir)).unwrap() catch |err| { + Output.err("EACCES", "Permission denied while installing {s}", .{ + this.names[package_id].slice(buf), + }); + if (Environment.isDebug) { + Output.err(err, "Failed to stat node_modules", .{}); } - } - - this.manager.enqueuePackageForDownload( - name, - dependency_id, - package_id, - resolution.value.npm.version, - resolution.value.npm.url.slice(buf), - context, - ); - }, - else => { - Output.prettyErrorln( - "error: {s} installing {s} ({s})", - .{ @errorName(cause.err), this.names[package_id].slice(buf), result.fail.step.name() }, - ); - this.summary.fail += 1; - }, - } - } else { - // even if the package failed to install, we still need to increment the install - // counter for this tree - this.incrementTreeInstallCount(this.current_tree_id, log_level); - if (cause.err == error.DanglingSymlink) { - Output.prettyErrorln( - "error: {s} \"link:{s}\" not found (try running 'bun link' in the intended package's folder)", - .{ @errorName(cause.err), this.names[package_id].slice(buf) }, - ); - this.summary.fail += 1; - } else if (cause.err == error.AccessDenied) { - // there are two states this can happen - // - Access Denied because node_modules/ is unwritable - // - Access Denied because this specific package is unwritable - // in the case of the former, the logs are extremely noisy, so we - // will exit early, otherwise set a flag to not re-stat - const Singleton = struct { - var node_modules_is_ok = false; - }; - if (!Singleton.node_modules_is_ok) { - if (!Environment.isWindows) { - const stat = bun.sys.fstat(bun.toFD(destination_dir)).unwrap() catch |err| { - Output.err("EACCES", "Permission denied while installing {s}", .{ - this.names[package_id].slice(buf), - }); - if (Environment.isDebug) { - Output.err(err, "Failed to stat node_modules", .{}); - } - Global.exit(1); - }; + Global.exit(1); + }; - const is_writable = if (stat.uid == bun.C.getuid()) - stat.mode & bun.S.IWUSR > 0 - else if (stat.gid == bun.C.getgid()) - stat.mode & bun.S.IWGRP > 0 - else - stat.mode & bun.S.IWOTH > 0; + const is_writable = if (stat.uid == bun.C.getuid()) + stat.mode & bun.S.IWUSR > 0 + else if (stat.gid == bun.C.getgid()) + stat.mode & bun.S.IWGRP > 0 + else + stat.mode & bun.S.IWOTH > 0; - if (!is_writable) { - Output.err("EACCES", "Permission denied while writing packages into node_modules.", .{}); - Global.exit(1); - } + if (!is_writable) { + Output.err("EACCES", "Permission denied while writing packages into node_modules.", .{}); + Global.exit(1); } - Singleton.node_modules_is_ok = true; } + Singleton.node_modules_is_ok = true; + } - Output.err("EACCES", "Permission denied while installing {s}", .{ - this.names[package_id].slice(buf), - }); + Output.err("EACCES", "Permission denied while installing {s}", .{ + this.names[package_id].slice(buf), + }); - this.summary.fail += 1; - } else { - Output.prettyErrorln( - "error: {s} installing {s} ({s})", - .{ @errorName(cause.err), this.names[package_id].slice(buf), result.fail.step.name() }, - ); - this.summary.fail += 1; - } + this.summary.fail += 1; + } else { + Output.prettyErrorln( + "error: {s} installing {s} ({s})", + .{ @errorName(cause.err), this.names[package_id].slice(buf), install_result.fail.step.name() }, + ); + this.summary.fail += 1; } }, } } else { + defer this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + + var destination_dir = this.node_modules.makeAndOpenDir(this.root_node_modules_folder) catch |err| { + if (log_level != .silent) { + Output.err(err, "Failed to open node_modules folder for {s} in {s}", .{ + name, + bun.fmt.fmtPath(u8, this.node_modules.path.items, .{}), + }); + } + this.summary.fail += 1; + return; + }; + + defer { + if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); + } + const name_hash: TruncatedPackageNameHash = @truncate(this.lockfile.buffers.dependencies.items[dependency_id].name_hash); const is_trusted, const is_trusted_through_update_request, const add_to_lockfile = brk: { // trusted through a --trust dependency. need to enqueue scripts, write to package.json, and add to lockfile @@ -9520,24 +9657,33 @@ pub const PackageManager = struct { pub fn installPackage( this: *PackageInstaller, dependency_id: DependencyID, - destination_dir: std.fs.Dir, comptime log_level: Options.LogLevel, ) void { const package_id = this.lockfile.buffers.resolutions.items[dependency_id]; const meta = &this.metas[package_id]; + const is_pending_package_install = false; if (meta.isDisabled()) { if (comptime log_level.showProgress()) { this.node.completeOne(); } - this.incrementTreeInstallCount(this.current_tree_id, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; } const name = this.lockfile.str(&this.names[package_id]); const resolution = &this.resolutions[package_id]; - this.installPackageWithNameAndResolution(dependency_id, package_id, log_level, name, resolution, destination_dir); + const needs_verify = true; + this.installPackageWithNameAndResolution( + dependency_id, + package_id, + log_level, + name, + resolution, + needs_verify, + is_pending_package_install, + ); } }; @@ -9785,15 +9931,6 @@ pub const PackageManager = struct { if (options.enable.force_install) { skip_verify_installed_version_number = true; skip_delete = false; - - // TODO(dylan-conway): depth first installation - // var node_modules_iter = node_modules_folder.iterateAssumeFirstIteration(); - // defer node_modules_iter.reset(); - // while (try node_modules_iter.next()) |entry| { - // if (entry.kind != .directory or !strings.eqlComptime(entry.name, ".cache")) { - // node_modules_folder.deleteTree(entry.name) catch {}; - // } - // } } var summary = PackageInstall.Summary{}; @@ -9809,8 +9946,9 @@ pub const PackageManager = struct { // to make mistakes harder var parts = this.lockfile.packages.slice(); + const trees = this.lockfile.buffers.trees.items; + const completed_trees, const tree_ids_to_trees_the_id_depends_on, const tree_install_counts = trees: { - const trees = this.lockfile.buffers.trees.items; const completed_trees = try Bitset.initEmpty(this.allocator, trees.len); var tree_ids_to_trees_the_id_depends_on = try Bitset.List.initEmpty(this.allocator, trees.len, trees.len); @@ -9857,6 +9995,17 @@ pub const PackageManager = struct { }; }; + // Each tree (other than the root tree) can accumulate packages it cannot install until + // each of it's parent trees have installed their packages. We keep arrays of these pending + // packages for each tree, and drain them when a tree is completed (each of it's immediate + // dependencies are installed). + // + // Trees are drained breadth first because if the current tree is completed from + // the remaining pending installs, then any child tree has a higher chance of + // being able to install it's dependencies + const pending_installs_to_tree_id = this.allocator.alloc(std.ArrayListUnmanaged(DependencyInstallContext), trees.len) catch bun.outOfMemory(); + @memset(pending_installs_to_tree_id, .{}); + const trusted_dependencies_from_update_requests: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void) = trusted_deps: { // find all deps originating from --trust packages from cli @@ -9897,7 +10046,6 @@ pub const PackageManager = struct { .lockfile = this.lockfile, .node = &install_node, .node_modules = .{ - .fd = bun.toFD(node_modules_folder.fd), .path = std.ArrayList(u8).fromOwnedSlice( this.allocator, try this.allocator.dupe( @@ -9923,6 +10071,7 @@ pub const PackageManager = struct { .completed_trees = completed_trees, .tree_install_counts = tree_install_counts, .trusted_dependencies_from_update_requests = trusted_dependencies_from_update_requests, + .pending_installs_to_tree_id = pending_installs_to_tree_id, }; }; @@ -9939,12 +10088,6 @@ pub const PackageManager = struct { var remaining = node_modules.dependencies; installer.current_tree_id = node_modules.tree_id; - var destination_dir = try installer.node_modules.dir(node_modules_folder); - defer { - installer.node_modules.fd = null; - destination_dir.close(); - } - if (comptime Environment.allow_assert) { bun.assert(node_modules.dependencies.len == this.lockfile.buffers.trees.items[installer.current_tree_id].dependencies.len); } @@ -9957,7 +10100,7 @@ pub const PackageManager = struct { while (remaining.len > unroll_count) { comptime var i: usize = 0; inline while (i < unroll_count) : (i += 1) { - installer.installPackage(remaining[i], destination_dir, comptime log_level); + installer.installPackage(remaining[i], comptime log_level); } remaining = remaining[unroll_count..]; @@ -9983,7 +10126,7 @@ pub const PackageManager = struct { } for (remaining) |dependency_id| { - installer.installPackage(dependency_id, destination_dir, log_level); + installer.installPackage(dependency_id, log_level); } try this.runTasks( @@ -10054,6 +10197,14 @@ pub const PackageManager = struct { this.tickLifecycleScripts(); } + for (installer.pending_installs_to_tree_id) |pending_installs| { + if (comptime Environment.allow_assert) { + bun.assert(pending_installs.items.len == 0); + } + const force = true; + installer.installAvailablePackages(log_level, force); + } + this.finished_installing.store(true, .Monotonic); if (comptime log_level.showProgress()) { scripts_node.activate(); @@ -10085,7 +10236,16 @@ pub const PackageManager = struct { return manager.pending_tasks.load(.Monotonic); } - pub fn setupGlobalDir(manager: *PackageManager, ctx: *const Command.Context) !void { + pub inline fn incrementPendingTasks(manager: *PackageManager, count: u32) u32 { + manager.total_tasks += count; + return manager.pending_tasks.fetchAdd(count, .Monotonic); + } + + pub inline fn decrementPendingTasks(manager: *PackageManager) u32 { + return manager.pending_tasks.fetchSub(1, .Monotonic); + } + + pub fn setupGlobalDir(manager: *PackageManager, ctx: Command.Context) !void { manager.options.global_bin_dir = try Options.openGlobalBinDir(ctx.install); var out_buffer: [bun.MAX_PATH_BYTES]u8 = undefined; const result = try bun.getFdPath(manager.options.global_bin_dir.fd, &out_buffer); @@ -10610,7 +10770,7 @@ pub const PackageManager = struct { } if (manager.options.global) { - try manager.setupGlobalDir(&ctx); + try manager.setupGlobalDir(ctx); } const packages_len_before_install = manager.lockfile.packages.len; diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index e794f35d046854..e03c400b43b8de 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -738,6 +738,13 @@ pub fn cleanWithLogger( // We will only shrink the number of packages here. // never grow + // preinstall_state is used during installPackages. the indexes(package ids) need + // to be remapped + var preinstall_state = PackageManager.instance.preinstall_state; + var old_preinstall_state = preinstall_state.clone(old.allocator) catch bun.outOfMemory(); + defer old_preinstall_state.deinit(old.allocator); + @memset(preinstall_state.items, .unknown); + if (updates.len > 0) { try old.preprocessUpdateRequests(updates, exact_versions); } @@ -775,6 +782,7 @@ pub fn cleanWithLogger( .mapping = package_id_mapping, .clone_queue = clone_queue_, .log = log, + .old_preinstall_state = old_preinstall_state, }; // try clone_queue.ensureUnusedCapacity(root.dependencies.len); @@ -925,6 +933,7 @@ const Cloner = struct { trees: Tree.List = Tree.List{}, trees_count: u32 = 1, log: *logger.Log, + old_preinstall_state: std.ArrayListUnmanaged(Install.PreinstallState), pub fn flush(this: *Cloner) anyerror!void { const max_package_id = this.old.packages.len; @@ -1450,7 +1459,7 @@ pub const Printer = struct { needs_comma = false; } const version_name = dependency_version.literal.slice(string_buf); - const needs_quote = always_needs_quote or std.mem.indexOfAny(u8, version_name, " |\t-/!") != null or strings.hasPrefixComptime(version_name, "npm:"); + const needs_quote = always_needs_quote or bun.strings.indexAnyComptime(version_name, " |\t-/!") != null or strings.hasPrefixComptime(version_name, "npm:"); if (needs_quote) { try writer.writeByte('"'); @@ -2841,6 +2850,10 @@ pub const Package = extern struct { package_id_mapping[this.meta.id] = new_package.meta.id; + if (PackageManager.instance.preinstall_state.items.len > 0) { + PackageManager.instance.preinstall_state.items[new_package.meta.id] = cloner.old_preinstall_state.items[this.meta.id]; + } + for (old_dependencies, dependencies) |old_dep, *new_dep| { new_dep.* = try old_dep.clone( old_string_buf, diff --git a/src/install/resolution.zig b/src/install/resolution.zig index f50cf2c6f0ac05..555b5951f45479 100644 --- a/src/install/resolution.zig +++ b/src/install/resolution.zig @@ -28,6 +28,10 @@ pub const Resolution = extern struct { return this.tag.isGit(); } + pub fn canEnqueueInstallTask(this: *const Resolution) bool { + return this.tag.canEnqueueInstallTask(); + } + pub fn order( lhs: *const Resolution, rhs: *const Resolution, @@ -337,5 +341,9 @@ pub const Resolution = extern struct { pub fn isGit(this: Tag) bool { return this == .git or this == .github or this == .gitlab; } + + pub fn canEnqueueInstallTask(this: Tag) bool { + return this == .npm or this == .local_tarball or this == .remote_tarball or this == .git or this == .github; + } }; }; diff --git a/src/install/windows-shim/bun_shim_impl.zig b/src/install/windows-shim/bun_shim_impl.zig index bfa434533aa7ea..92950482acfbcd 100644 --- a/src/install/windows-shim/bun_shim_impl.zig +++ b/src/install/windows-shim/bun_shim_impl.zig @@ -909,9 +909,9 @@ pub const FromBunRunContext = struct { /// Was --bun passed? force_use_bun: bool, /// A pointer to a function that can launch `Run.boot` - direct_launch_with_bun_js: *const fn (wpath: []u16, args: *const CommandContext) void, + direct_launch_with_bun_js: *const fn (wpath: []u16, args: CommandContext) void, /// Command.Context - cli_context: *const CommandContext, + cli_context: CommandContext, /// Passed directly to CreateProcessW's lpEnvironment with CREATE_UNICODE_ENVIRONMENT environment: ?[*]const u16, }; diff --git a/src/io/fifo.zig b/src/io/fifo.zig index 64e47d8fc7b814..fbb9a91d73d000 100644 --- a/src/io/fifo.zig +++ b/src/io/fifo.zig @@ -1,5 +1,6 @@ const std = @import("std"); -const assert = @import("root").bun.assert; +const bun = @import("root").bun; +const assert = bun.assert; /// An intrusive first in/first out linked list. /// The element type T must have a field called "next" of type ?*T diff --git a/src/io/heap.zig b/src/io/heap.zig index 95ccfdb6e5eed8..1100b596854b69 100644 --- a/src/io/heap.zig +++ b/src/io/heap.zig @@ -1,5 +1,6 @@ const std = @import("std"); -const assert = @import("root").bun.assert; +const bun = @import("root").bun; +const assert = bun.assert; /// An intrusive heap implementation backed by a pairing heap[1] implementation. /// diff --git a/src/io/time.zig b/src/io/time.zig index c3e7dd3e6f1719..917f6bcb625469 100644 --- a/src/io/time.zig +++ b/src/io/time.zig @@ -1,5 +1,6 @@ const std = @import("std"); -const assert = @import("root").bun.assert; +const bun = @import("root").bun; +const assert = bun.assert; const is_darwin = @import("builtin").target.isDarwin(); pub const Time = struct { diff --git a/src/js/bun/sqlite.ts b/src/js/bun/sqlite.ts index a01b55c6b8164a..6744c569fea41b 100644 --- a/src/js/bun/sqlite.ts +++ b/src/js/bun/sqlite.ts @@ -31,6 +31,48 @@ const constants = { SQLITE_PREPARE_PERSISTENT: 0x01, SQLITE_PREPARE_NORMALIZE: 0x02, SQLITE_PREPARE_NO_VTAB: 0x04, + + SQLITE_FCNTL_LOCKSTATE: 1, + SQLITE_FCNTL_GET_LOCKPROXYFILE: 2, + SQLITE_FCNTL_SET_LOCKPROXYFILE: 3, + SQLITE_FCNTL_LAST_ERRNO: 4, + SQLITE_FCNTL_SIZE_HINT: 5, + SQLITE_FCNTL_CHUNK_SIZE: 6, + SQLITE_FCNTL_FILE_POINTER: 7, + SQLITE_FCNTL_SYNC_OMITTED: 8, + SQLITE_FCNTL_WIN32_AV_RETRY: 9, + SQLITE_FCNTL_PERSIST_WAL: 10, + SQLITE_FCNTL_OVERWRITE: 11, + SQLITE_FCNTL_VFSNAME: 12, + SQLITE_FCNTL_POWERSAFE_OVERWRITE: 13, + SQLITE_FCNTL_PRAGMA: 14, + SQLITE_FCNTL_BUSYHANDLER: 15, + SQLITE_FCNTL_TEMPFILENAME: 16, + SQLITE_FCNTL_MMAP_SIZE: 18, + SQLITE_FCNTL_TRACE: 19, + SQLITE_FCNTL_HAS_MOVED: 20, + SQLITE_FCNTL_SYNC: 21, + SQLITE_FCNTL_COMMIT_PHASETWO: 22, + SQLITE_FCNTL_WIN32_SET_HANDLE: 23, + SQLITE_FCNTL_WAL_BLOCK: 24, + SQLITE_FCNTL_ZIPVFS: 25, + SQLITE_FCNTL_RBU: 26, + SQLITE_FCNTL_VFS_POINTER: 27, + SQLITE_FCNTL_JOURNAL_POINTER: 28, + SQLITE_FCNTL_WIN32_GET_HANDLE: 29, + SQLITE_FCNTL_PDB: 30, + SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: 31, + SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: 32, + SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: 33, + SQLITE_FCNTL_LOCK_TIMEOUT: 34, + SQLITE_FCNTL_DATA_VERSION: 35, + SQLITE_FCNTL_SIZE_LIMIT: 36, + SQLITE_FCNTL_CKPT_DONE: 37, + SQLITE_FCNTL_RESERVE_BYTES: 38, + SQLITE_FCNTL_CKPT_START: 39, + SQLITE_FCNTL_EXTERNAL_READER: 40, + SQLITE_FCNTL_CKSM_FILE: 41, + SQLITE_FCNTL_RESET_CACHE: 42, }; var SQL; @@ -161,6 +203,12 @@ class Statement { this.isFinalized = true; return this.#raw.finalize(...args); } + + [Symbol.dispose]() { + if (!this.isFinalized) { + this.finalize(); + } + } } var cachedCount = Symbol.for("Bun.Database.cache.count"); @@ -213,7 +261,7 @@ class Database { SQL = $cpp("JSSQLStatement.cpp", "createJSSQLStatementConstructor"); } - this.#handle = SQL.open(anonymous ? ":memory:" : filename, flags); + this.#handle = SQL.open(anonymous ? ":memory:" : filename, flags, this); this.filename = filename; } @@ -222,7 +270,7 @@ class Database { #cachedQueriesLengths = []; #cachedQueriesValues = []; filename; - + #hasClosed = false; get handle() { return this.#handle; } @@ -255,6 +303,12 @@ class Database { return new Database(serialized, isReadOnly ? constants.SQLITE_OPEN_READONLY : 0); } + [Symbol.dispose]() { + if (!this.#hasClosed) { + this.close(true); + } + } + static setCustomSQLite(path) { if (!SQL) { SQL = $cpp("JSSQLStatement.cpp", "createJSSQLStatementConstructor"); @@ -263,13 +317,24 @@ class Database { return SQL.setCustomSQLite(path); } - close() { + fileControl(cmd, arg) { + const handle = this.#handle; + + if (arguments.length <= 2) { + return SQL.fcntl(handle, null, arguments[0], arguments[1]); + } + + return SQL.fcntl(handle, ...arguments); + } + + close(throwOnError = false) { this.clearQueryCache(); - return SQL.close(this.#handle); + this.#hasClosed = true; + return SQL.close(this.#handle, throwOnError); } clearQueryCache() { for (let item of this.#cachedQueriesValues) { - item.finalize(); + item?.finalize?.(); } this.#cachedQueriesKeys.length = 0; this.#cachedQueriesValues.length = 0; diff --git a/src/js/node/child_process.js b/src/js/node/child_process.js index 96f4b255f1074c..c3ddb32a1f46be 100644 --- a/src/js/node/child_process.js +++ b/src/js/node/child_process.js @@ -84,10 +84,6 @@ var ReadableFromWeb; // Section 1. Exported child_process functions //------------------------------------------------------------------------------ -// TODO: Implement these props when Windows is supported -// * windowsVerbatimArguments?: boolean; -// * windowsHide?: boolean; - // Copyright Joyent, Inc. and other Node contributors. // // Permission is hereby granted, free of charge, to any person obtaining a @@ -134,6 +130,8 @@ function spawnTimeoutFunction(child, timeoutHolder) { * gid?: number; * serialization?: string; * shell?: boolean | string; + * windowsHide?: boolean; + * windowsVerbatimArguments?: boolean; * signal?: AbortSignal; * timeout?: number; * killSignal?: string | number; @@ -243,10 +241,14 @@ function execFile(file, args, options, callback) { const child = spawn(file, args, { cwd: options.cwd, env: options.env, - // gid: options.gid, + timeout: options.timeout, + killSignal: options.killSignal, + uid: options.uid, + gid: options.gid, + windowsHide: options.windowsHide, + windowsVerbatimArguments: options.windowsVerbatimArguments, shell: options.shell, signal: options.signal, - // uid: options.uid, }); let encoding; @@ -437,6 +439,7 @@ function execFile(file, args, options, callback) { * uid?: number; * gid?: number; * windowsHide?: boolean; + * windowsVerbatimArguments?: boolean; * }} [options] * @param {( * error?: Error, @@ -496,6 +499,8 @@ Object.defineProperty(exec, Symbol.for("nodejs.util.promisify.custom"), { * maxBuffer?: number; * encoding?: string; * shell?: boolean | string; + * windowsHide?: boolean; + * windowsVerbatimArguments?: boolean; * }} [options] * @returns {{ * pid: number; @@ -551,6 +556,8 @@ function spawnSync(file, args, options) { env: options.env || undefined, cwd: options.cwd || undefined, stdio: bunStdio, + windowsVerbatimArguments: options.windowsVerbatimArguments, + windowsHide: options.windowsHide, }); const result = { @@ -866,12 +873,19 @@ function normalizeSpawnArguments(file, args, options) { cwd = getValidatedPath(cwd, "options.cwd"); } - // TODO: Gid check - // TODO: Uid check - var detached = false; - const { detached: detachedOption } = options; - if (detachedOption != null) { - detached = !!detachedOption; + // Validate detached, if present. + if (options.detached != null) { + validateBoolean(options.detached, "options.detached"); + } + + // Validate the uid, if present. + if (options.uid != null && !isInt32(options.uid)) { + throw new ERR_INVALID_ARG_TYPE("options.uid", "int32", options.uid); + } + + // Validate the gid, if present. + if (options.gid != null && !isInt32(options.gid)) { + throw new ERR_INVALID_ARG_TYPE("options.gid", "int32", options.gid); } // Validate the shell, if present. @@ -885,6 +899,11 @@ function normalizeSpawnArguments(file, args, options) { validateArgumentNullCheck(options.argv0, "options.argv0"); } + // Validate windowsHide, if present. + if (options.windowsHide != null) { + validateBoolean(options.windowsHide, "options.windowsHide"); + } + let { windowsVerbatimArguments } = options; if (windowsVerbatimArguments != null) { validateBoolean(windowsVerbatimArguments, "options.windowsVerbatimArguments"); @@ -900,7 +919,7 @@ function normalizeSpawnArguments(file, args, options) { else file = process.env.comspec || "cmd.exe"; // '/d /s /c' is used only for cmd.exe. if (/^(?:.*\\)?cmd(?:\.exe)?$/i.exec(file) !== null) { - args = ["/d", "/s", "/c", command]; + args = ["/d", "/s", "/c", `"${command}"`]; windowsVerbatimArguments = true; } else { args = ["-c", command]; @@ -929,7 +948,18 @@ function normalizeSpawnArguments(file, args, options) { // TODO: Windows env support here... - return { ...options, detached, file, args, cwd, envPairs }; + return { + // Make a shallow copy so we don't clobber the user's options object. + __proto__: null, + ...options, + args, + cwd, + detached: !!options.detached, + envPairs, + file, + windowsHide: !!options.windowsHide, + windowsVerbatimArguments: !!windowsVerbatimArguments, + }; } function checkExecSyncError(ret, args, cmd) { @@ -1205,6 +1235,8 @@ class ChildProcess extends EventEmitter { ipc: ipc ? this.#emitIpcMessage.bind(this) : undefined, serialization, argv0, + windowsHide: !!options.windowsHide, + windowsVerbatimArguments: !!options.windowsVerbatimArguments, }); this.pid = this.#handle.pid; diff --git a/src/js/node/fs.js b/src/js/node/fs.js index c1fa424fd52052..7f24fd8b9cedf7 100644 --- a/src/js/node/fs.js +++ b/src/js/node/fs.js @@ -138,8 +138,16 @@ var access = function access(...args) { appendFile = function appendFile(...args) { callbackify(fs.appendFile, args); }, - close = function close(...args) { - callbackify(fs.close, args); + close = function close(fd, callback) { + if ($isCallable(callback)) { + fs.close(fd).then(() => callback(), callback); + } else if (callback == undefined) { + fs.close(fd).then(() => {}); + } else { + const err = new TypeError("Callback must be a function"); + err.code = "ERR_INVALID_ARG_TYPE"; + throw err; + } }, rm = function rm(...args) { callbackify(fs.rm, args); diff --git a/src/js/node/http.ts b/src/js/node/http.ts index 3924eeee0be0ac..d64fa888265f37 100644 --- a/src/js/node/http.ts +++ b/src/js/node/http.ts @@ -1388,6 +1388,7 @@ class ClientRequest extends OutgoingMessage { #timeoutTimer?: Timer = undefined; #options; #finished; + #tls; get path() { return this.#path; @@ -1462,6 +1463,7 @@ class ClientRequest extends OutgoingMessage { timeout: false, // Disable auto gzip/deflate decompress: false, + tls: this.#tls, }; if (!!$debug) { @@ -1681,6 +1683,7 @@ class ClientRequest extends OutgoingMessage { this.#reusedSocket = false; this.#host = host; this.#protocol = protocol; + this.#tls = options.tls; const timeout = options.timeout; if (timeout !== undefined && timeout !== 0) { diff --git a/src/js/node/os.ts b/src/js/node/os.ts index 2d05c301c3b3eb..87682a72a4c7de 100644 --- a/src/js/node/os.ts +++ b/src/js/node/os.ts @@ -122,4 +122,26 @@ function bound(obj) { }; } -export default bound($zig("node_os.zig", "OS.create")); +const out = bound($zig("node_os.zig", "OS.create")); + +symbolToStringify(out, "arch"); +symbolToStringify(out, "availableParallelism"); +symbolToStringify(out, "endianness"); +symbolToStringify(out, "freemem"); +symbolToStringify(out, "homedir"); +symbolToStringify(out, "hostname"); +symbolToStringify(out, "platform"); +symbolToStringify(out, "release"); +symbolToStringify(out, "tmpdir"); +symbolToStringify(out, "totalmem"); +symbolToStringify(out, "type"); +symbolToStringify(out, "uptime"); +symbolToStringify(out, "version"); +symbolToStringify(out, "machine"); +function symbolToStringify(obj, key) { + obj[key][Symbol.toPrimitive] = function (hint) { + return obj[key](); + }; +} + +export default out; diff --git a/src/js/node/tls.js b/src/js/node/tls.js index 35572a26fce4f6..4fb925015d5f41 100644 --- a/src/js/node/tls.js +++ b/src/js/node/tls.js @@ -399,18 +399,50 @@ const TLSSocket = (function (InternalTLSSocket) { return !!this.#session; } - renegotiate() { + renegotiate(options, callback) { if (this.#renegotiationDisabled) { + // if renegotiation is disabled should emit error event in nextTick for nodejs compatibility const error = new Error("ERR_TLS_RENEGOTIATION_DISABLED: TLS session renegotiation disabled for this socket"); error.name = "ERR_TLS_RENEGOTIATION_DISABLED"; - throw error; + typeof callback === "function" && process.nextTick(callback, error); + return false; } - throw Error("Not implented in Bun yet"); + const socket = this[bunSocketInternal]; + // if the socket is detached we can't renegotiate, nodejs do a noop too (we should not return false or true here) + if (!socket) return; + + if (options) { + let requestCert = !!this._requestCert; + let rejectUnauthorized = !!this._rejectUnauthorized; + + if (options.requestCert !== undefined) requestCert = !!options.requestCert; + if (options.rejectUnauthorized !== undefined) rejectUnauthorized = !!options.rejectUnauthorized; + + if (requestCert !== this._requestCert || rejectUnauthorized !== this._rejectUnauthorized) { + socket.setVerifyMode(requestCert, rejectUnauthorized); + this._requestCert = requestCert; + this._rejectUnauthorized = rejectUnauthorized; + } + } + try { + socket.renegotiate(); + // if renegotiate is successful should emit secure event when done + typeof callback === "function" && this.once("secure", () => callback(null)); + return true; + } catch (err) { + // if renegotiate fails should emit error event in nextTick for nodejs compatibility + typeof callback === "function" && process.nextTick(callback, err); + return false; + } } + disableRenegotiation() { this.#renegotiationDisabled = true; + // disable renegotiation on the socket + return this[bunSocketInternal]?.disableRenegotiation(); } + getTLSTicket() { return this[bunSocketInternal]?.getTLSTicket(); } @@ -485,7 +517,8 @@ const TLSSocket = (function (InternalTLSSocket) { } }, ); - +let CLIENT_RENEG_LIMIT = 3, + CLIENT_RENEG_WINDOW = 600; class Server extends NetServer { key; cert; @@ -592,6 +625,8 @@ class Server extends NetServer { rejectUnauthorized: this._rejectUnauthorized, requestCert: isClient ? true : this._requestCert, ALPNProtocols: this.ALPNProtocols, + clientRenegotiationLimit: CLIENT_RENEG_LIMIT, + clientRenegotiationWindow: CLIENT_RENEG_WINDOW, }, SocketClass, ]; @@ -601,9 +636,7 @@ class Server extends NetServer { function createServer(options, connectionListener) { return new Server(options, connectionListener); } -const CLIENT_RENEG_LIMIT = 3, - CLIENT_RENEG_WINDOW = 600, - DEFAULT_ECDH_CURVE = "auto", +const DEFAULT_ECDH_CURVE = "auto", // https://github.com/Jarred-Sumner/uSockets/blob/fafc241e8664243fc0c51d69684d5d02b9805134/src/crypto/openssl.c#L519-L523 DEFAULT_CIPHERS = "DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", diff --git a/src/libarchive/libarchive.zig b/src/libarchive/libarchive.zig index c6ddb0c738bce2..a1c565c44c9043 100644 --- a/src/libarchive/libarchive.zig +++ b/src/libarchive/libarchive.zig @@ -376,10 +376,10 @@ pub const Archive = struct { filename_hash: u64 = 0, found: bool = false, fd: FileDescriptorType = .zero, - pub fn init(filepath: string, estimated_size: usize, allocator: std.mem.Allocator) !Plucker { + pub fn init(filepath: bun.OSPathSlice, estimated_size: usize, allocator: std.mem.Allocator) !Plucker { return Plucker{ .contents = try MutableString.init(allocator, estimated_size), - .filename_hash = bun.hash(filepath), + .filename_hash = bun.hash(std.mem.sliceAsBytes(filepath)), .fd = .zero, .found = false, }; diff --git a/src/resolver/resolve_path.zig b/src/resolver/resolve_path.zig index 18c3916005d650..fe8b753b8abb6a 100644 --- a/src/resolver/resolve_path.zig +++ b/src/resolver/resolve_path.zig @@ -569,11 +569,10 @@ fn windowsVolumeNameLenT(comptime T: type, path: []const T) struct { usize, usiz } } } else { - // TODO(dylan-conway): use strings.indexOfAny instead of std - if (std.mem.indexOfAny(T, path[3..], comptime strings.literal(T, "/\\"))) |idx| { + if (bun.strings.indexAnyComptimeT(T, path[3..], comptime strings.literal(T, "/\\"))) |idx| { // TODO: handle input "//abc//def" should be picked up as a unc path if (path.len > idx + 4 and !Platform.windows.isSeparatorT(T, path[idx + 4])) { - if (std.mem.indexOfAny(T, path[idx + 4 ..], comptime strings.literal(T, "/\\"))) |idx2| { + if (bun.strings.indexAnyComptimeT(T, path[idx + 4 ..], comptime strings.literal(T, "/\\"))) |idx2| { return .{ idx + idx2 + 4, idx + 3 }; } else { return .{ path.len, idx + 3 }; @@ -623,15 +622,9 @@ pub fn windowsFilesystemRootT(comptime T: type, path: []const T) []const T { !Platform.windows.isSeparatorT(T, path[2]) and path[2] != '.') { - if (comptime T == u8) { - if (strings.indexOfAny(path[3..], "/\\")) |idx| { - // TODO: handle input "//abc//def" should be picked up as a unc path - return path[0 .. idx + 4]; - } - } else { - if (std.mem.indexOfAny(T, path[3..], "/\\")) |idx| { - return path[0 .. idx + 4]; - } + if (bun.strings.indexAnyComptimeT(T, path[3..], "/\\")) |idx| { + // TODO: handle input "//abc//def" should be picked up as a unc path + return path[0 .. idx + 4]; } } if (isSepAnyT(T, path[0])) return path[0..1]; diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index 92afb977cec59a..1d87d1e2026c38 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -1935,7 +1935,7 @@ pub const Resolver = struct { const dir_path_for_resolution = manager.pathForResolution(resolved_package_id, resolution, bufs(.path_in_global_disk_cache)) catch |err| { // if it's missing, we need to install it if (err == error.FileNotFound) { - switch (manager.getPreinstallState(resolved_package_id, manager.lockfile)) { + switch (manager.getPreinstallState(resolved_package_id)) { .done => { var path = Fs.Path.init(import_path); path.is_disabled = true; diff --git a/src/shell/interpreter.zig b/src/shell/interpreter.zig index a1bb6f05ccde27..6f185daa848546 100644 --- a/src/shell/interpreter.zig +++ b/src/shell/interpreter.zig @@ -605,7 +605,7 @@ pub const EnvMap = struct { /// This interpreter works by basically turning the AST into a state machine so /// that execution can be suspended and resumed to support async. pub const Interpreter = struct { - command_ctx: *const bun.CLI.Command.Context, + command_ctx: bun.CLI.Command.Context, event_loop: JSC.EventLoopHandle, /// This is the arena used to allocate the input shell script's AST nodes, /// tokens, and a string pool used to store all strings. @@ -1082,7 +1082,7 @@ pub const Interpreter = struct { /// If all initialization allocations succeed, the arena will be copied /// into the interpreter struct, so it is not a stale reference and safe to call `arena.deinit()` on error. pub fn init( - ctx: *const bun.CLI.Command.Context, + ctx: bun.CLI.Command.Context, event_loop: JSC.EventLoopHandle, allocator: Allocator, arena: *bun.ArenaAllocator, @@ -1183,7 +1183,7 @@ pub const Interpreter = struct { return .{ .result = interpreter }; } - pub fn initAndRunFromFile(ctx: *const bun.CLI.Command.Context, mini: *JSC.MiniEventLoop, path: []const u8) !bun.shell.ExitCode { + pub fn initAndRunFromFile(ctx: bun.CLI.Command.Context, mini: *JSC.MiniEventLoop, path: []const u8) !bun.shell.ExitCode { var arena = bun.ArenaAllocator.init(bun.default_allocator); const src = src: { var file = try std.fs.cwd().openFile(path, .{}); @@ -1257,7 +1257,7 @@ pub const Interpreter = struct { return code; } - pub fn initAndRunFromSource(ctx: *bun.CLI.Command.Context, mini: *JSC.MiniEventLoop, path_for_errors: []const u8, src: []const u8) !ExitCode { + pub fn initAndRunFromSource(ctx: bun.CLI.Command.Context, mini: *JSC.MiniEventLoop, path_for_errors: []const u8, src: []const u8) !ExitCode { bun.Analytics.Features.standalone_shell += 1; var arena = bun.ArenaAllocator.init(bun.default_allocator); defer arena.deinit(); @@ -2910,10 +2910,7 @@ pub const Interpreter = struct { parent: ParentPtr, io: IO, ) *Binary { - var binary = interpreter.allocator.create(Binary) catch |err| { - std.debug.print("Ruh roh: {any}\n", .{err}); - @panic("Ruh roh"); - }; + var binary = interpreter.allocator.create(Binary) catch |err| std.debug.panic("Ruh roh: {any}\n", .{err}); binary.node = node; binary.base = .{ .kind = .binary, .interpreter = interpreter, .shell = shell_state }; binary.parent = parent; @@ -4350,10 +4347,7 @@ pub const Interpreter = struct { parent: ParentPtr, io: IO, ) *Cmd { - var cmd = interpreter.allocator.create(Cmd) catch |err| { - std.debug.print("Ruh roh: {any}\n", .{err}); - @panic("Ruh roh"); - }; + var cmd = interpreter.allocator.create(Cmd) catch |err| std.debug.panic("Ruh roh: {any}\n", .{err}); cmd.* = .{ .base = .{ .kind = .cmd, .interpreter = interpreter, .shell = shell_state }, .node = node, @@ -11539,10 +11533,7 @@ pub fn StatePtrUnion(comptime TypesValue: anytype) type { } fn unknownTag(tag: Ptr.TagInt) void { - if (comptime bun.Environment.allow_assert) { - std.debug.print("Bad tag: {d}\n", .{tag}); - @panic("Bad tag"); - } + if (bun.Environment.allow_assert) std.debug.panic("Bad tag: {d}\n", .{tag}); } fn tagInt(this: @This()) Ptr.TagInt { diff --git a/src/shell/shell.zig b/src/shell/shell.zig index 09110e17bde07d..fbb62f1a17cdc7 100644 --- a/src/shell/shell.zig +++ b/src/shell/shell.zig @@ -2105,19 +2105,19 @@ pub const Token = union(TokenTag) { }; } - pub fn debug(self: Token, buf: []const u8) void { - switch (self) { - .Var => |txt| { - std.debug.print("(var) {s}\n", .{buf[txt.start..txt.end]}); - }, - .Text => |txt| { - std.debug.print("(txt) {s}\n", .{buf[txt.start..txt.end]}); - }, - else => { - std.debug.print("{s}\n", .{@tagName(self)}); - }, - } - } + // pub fn debug(self: Token, buf: []const u8) void { + // switch (self) { + // .Var => |txt| { + // std.debug.print("(var) {s}\n", .{buf[txt.start..txt.end]}); + // }, + // .Text => |txt| { + // std.debug.print("(txt) {s}\n", .{buf[txt.start..txt.end]}); + // }, + // else => { + // std.debug.print("{s}\n", .{@tagName(self)}); + // }, + // } + // } }; pub const LexerAscii = NewLexer(.ascii); @@ -3254,13 +3254,13 @@ pub fn NewLexer(comptime encoding: StringEncoding) type { return self.chars.read_char(); } - fn debug_tokens(self: *const @This()) void { - std.debug.print("Tokens: \n", .{}); - for (self.tokens.items, 0..) |tok, i| { - std.debug.print("{d}: ", .{i}); - tok.debug(self.strpool.items[0..self.strpool.items.len]); - } - } + // fn debug_tokens(self: *const @This()) void { + // std.debug.print("Tokens: \n", .{}); + // for (self.tokens.items, 0..) |tok, i| { + // std.debug.print("{d}: ", .{i}); + // tok.debug(self.strpool.items[0..self.strpool.items.len]); + // } + // } }; } diff --git a/src/string_immutable.zig b/src/string_immutable.zig index 4f5e9ed361a2ca..aee4513b461dcd 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -225,6 +225,15 @@ pub inline fn indexAnyComptime(target: string, comptime chars: string) ?usize { return null; } +pub inline fn indexAnyComptimeT(comptime T: type, target: []const T, comptime chars: []const T) ?usize { + for (target, 0..) |parent, i| { + inline for (chars) |char| { + if (char == parent) return i; + } + } + return null; +} + pub inline fn indexEqualAny(in: anytype, target: string) ?usize { for (in, 0..) |str, i| if (eqlLong(str, target, true)) return i; return null; diff --git a/src/sys.zig b/src/sys.zig index 33c34857552a73..457e309aae579e 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -1733,11 +1733,11 @@ pub const WindowsSymlinkOptions = packed struct { pub var has_failed_to_create_symlink = false; }; -pub fn symlinkOrJunctionOnWindows(sym: [:0]const u8, target: [:0]const u8) Maybe(void) { +pub fn symlinkOrJunctionOnWindows(dest: [:0]const u8, target: [:0]const u8) Maybe(void) { if (!WindowsSymlinkOptions.has_failed_to_create_symlink) { var sym16: bun.WPathBuffer = undefined; var target16: bun.WPathBuffer = undefined; - const sym_path = bun.strings.toNTPath(&sym16, sym); + const sym_path = bun.strings.toNTPath(&sym16, dest); const target_path = bun.strings.toNTPath(&target16, target); switch (symlinkW(sym_path, target_path, .{ .directory = true })) { .result => { @@ -1751,17 +1751,17 @@ pub fn symlinkOrJunctionOnWindows(sym: [:0]const u8, target: [:0]const u8) Maybe } } - return sys_uv.symlinkUV(sym, target, bun.windows.libuv.UV_FS_SYMLINK_JUNCTION); + return sys_uv.symlinkUV(target, dest, bun.windows.libuv.UV_FS_SYMLINK_JUNCTION); } -pub fn symlinkW(sym: [:0]const u16, target: [:0]const u16, options: WindowsSymlinkOptions) Maybe(void) { +pub fn symlinkW(dest: [:0]const u16, target: [:0]const u16, options: WindowsSymlinkOptions) Maybe(void) { while (true) { const flags = options.flags(); - if (windows.kernel32.CreateSymbolicLinkW(sym, target, flags) == 0) { + if (windows.kernel32.CreateSymbolicLinkW(dest, target, flags) == 0) { const errno = bun.windows.Win32Error.get(); log("CreateSymbolicLinkW({}, {}, {any}) = {s}", .{ - bun.fmt.fmtPath(u16, sym, .{}), + bun.fmt.fmtPath(u16, dest, .{}), bun.fmt.fmtPath(u16, target, .{}), flags, @tagName(errno), @@ -1788,7 +1788,7 @@ pub fn symlinkW(sym: [:0]const u16, target: [:0]const u16, options: WindowsSymli } log("CreateSymbolicLinkW({}, {}, {any}) = 0", .{ - bun.fmt.fmtPath(u16, sym, .{}), + bun.fmt.fmtPath(u16, dest, .{}), bun.fmt.fmtPath(u16, target, .{}), flags, }); @@ -2118,9 +2118,76 @@ pub fn exists(path: []const u8) bool { @compileError("TODO: existsOSPath"); } +pub fn directoryExistsAt(dir_: anytype, subpath: anytype) JSC.Maybe(bool) { + const has_sentinel = std.meta.sentinel(@TypeOf(subpath)) != null; + const dir_fd = bun.toFD(dir_); + if (comptime Environment.isWindows) { + var wbuf: bun.WPathBuffer = undefined; + const path = bun.strings.toNTPath(&wbuf, subpath); + const path_len_bytes: u16 = @truncate(path.len * 2); + var nt_name = w.UNICODE_STRING{ + .Length = path_len_bytes, + .MaximumLength = path_len_bytes, + .Buffer = @constCast(path.ptr), + }; + var attr = w.OBJECT_ATTRIBUTES{ + .Length = @sizeOf(w.OBJECT_ATTRIBUTES), + .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(path)) + null + else if (dir_fd == bun.invalid_fd) + std.fs.cwd().fd + else + dir_fd.cast(), + .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. + .ObjectName = &nt_name, + .SecurityDescriptor = null, + .SecurityQualityOfService = null, + }; + var basic_info: w.FILE_BASIC_INFORMATION = undefined; + const rc = kernel32.NtQueryAttributesFile(&attr, &basic_info); + if (JSC.Maybe(bool).errnoSysP(rc, .access, subpath)) |err| { + syslog("NtQueryAttributesFile({}, {}, O_DIRECTORY | O_RDONLY, 0) = {}", .{ dir_fd, bun.fmt.fmtOSPath(path, .{}), err }); + return err; + } + + const is_dir = basic_info.FileAttributes != kernel32.INVALID_FILE_ATTRIBUTES and + basic_info.FileAttributes & kernel32.FILE_ATTRIBUTE_DIRECTORY != 0; + syslog("NtQueryAttributesFile({}, {}, O_DIRECTORY | O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(path, .{}), @intFromBool(is_dir) }); + + return .{ + .result = is_dir, + }; + } + + if (comptime !has_sentinel) { + const path = std.os.toPosixPath(subpath) catch return JSC.Maybe(bool){ .err = Error.fromCode(.NAMETOOLONG, .access) }; + return directoryExistsAt(dir_fd, path); + } + + if (comptime Environment.isLinux) { + // avoid loading the libc symbol for this to reduce chances of GLIBC minimum version requirements + const rc = linux.faccessat(dir_fd.cast(), subpath, linux.F_OK, 0); + syslog("faccessat({}, {}, O_DIRECTORY | O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(subpath, .{}), if (rc == 0) 0 else @intFromEnum(linux.getErrno(rc)) }); + if (rc == 0) { + return JSC.Maybe(bool){ .result = true }; + } + + return JSC.Maybe(bool){ .result = false }; + } + + // on other platforms use faccessat from libc + const rc = std.c.faccessat(dir_fd.cast(), subpath, std.os.F_OK, 0); + syslog("faccessat({}, {}, O_DIRECTORY | O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(subpath, .{}), if (rc == 0) 0 else @intFromEnum(std.c.getErrno(rc)) }); + if (rc == 0) { + return JSC.Maybe(bool){ .result = true }; + } + + return JSC.Maybe(bool){ .result = false }; +} + pub fn existsAt(fd: bun.FileDescriptor, subpath: []const u8) bool { if (comptime Environment.isPosix) { - return system.faccessat(bun.toFD(fd), &(std.os.toPosixPath(subpath) catch return false), 0, 0) == 0; + return system.faccessat(fd.cast(), &(std.os.toPosixPath(subpath) catch return false), 0, 0) == 0; } if (comptime Environment.isWindows) { diff --git a/src/which.zig b/src/which.zig index 3295877f2fd7f9..5aaaa8cd0f97e8 100644 --- a/src/which.zig +++ b/src/which.zig @@ -79,7 +79,7 @@ pub fn endsWithExtension(str: []const u8) bool { const file_ext = str[str.len - 3 ..]; inline for (win_extensions) |ext| { comptime bun.assert(ext.len == 3); - if (bun.strings.eqlComptimeCheckLenWithType(u8, file_ext, ext, false)) return true; + if (bun.strings.eqlCaseInsensitiveASCIIICheckLength(file_ext, ext)) return true; } return false; } diff --git a/test/cli/install/bun-create.test.ts b/test/cli/install/bun-create.test.ts index 49162855f8c909..a8515c1d5d744c 100644 --- a/test/cli/install/bun-create.test.ts +++ b/test/cli/install/bun-create.test.ts @@ -1,7 +1,7 @@ import { spawn, spawnSync } from "bun"; import { afterEach, beforeEach, expect, it, describe } from "bun:test"; import { bunExe, bunEnv as env } from "harness"; -import { mkdtemp, realpath, rm, mkdir, stat } from "fs/promises"; +import { mkdtemp, realpath, rm, mkdir, stat, exists } from "fs/promises"; import { tmpdir } from "os"; import { join } from "path"; @@ -100,3 +100,23 @@ it("should create template from local folder", async () => { const dirStat = await stat(`${x_dir}/${testTemplate}`); expect(dirStat.isDirectory()).toBe(true); }); + +for (const repo of ["https://github.com/dylan-conway/create-test", "github.com/dylan-conway/create-test"]) { + it(`should create and install github template from ${repo}`, async () => { + const { stderr, stdout, exited } = spawn({ + cmd: [bunExe(), "create", repo], + cwd: x_dir, + stdout: "pipe", + stderr: "pipe", + env, + }); + + const err = await Bun.readableStreamToText(stderr); + expect(err).not.toContain("error:"); + const out = await Bun.readableStreamToText(stdout); + expect(out).toContain("Success! dylan-conway/create-test loaded into create-test"); + expect(await exists(join(x_dir, "create-test", "node_modules", "jquery"))).toBe(true); + + expect(await exited).toBe(0); + }); +} diff --git a/test/js/bun/net/socket.test.ts b/test/js/bun/net/socket.test.ts index af131410fa6800..c46a46b43950e3 100644 --- a/test/js/bun/net/socket.test.ts +++ b/test/js/bun/net/socket.test.ts @@ -1,7 +1,7 @@ import { expect, it } from "bun:test"; import { bunEnv, bunExe, expectMaxObjectTypeCount, isWindows } from "harness"; import { connect, fileURLToPath, SocketHandler, spawn } from "bun"; - +import type { Socket } from "bun"; it("should coerce '0' to 0", async () => { const listener = Bun.listen({ // @ts-expect-error @@ -271,3 +271,88 @@ it("socket.timeout works", async () => { it("should allow large amounts of data to be sent and received", async () => { expect([fileURLToPath(new URL("./socket-huge-fixture.js", import.meta.url))]).toRun(); }, 60_000); + +it("it should not crash when getting a ReferenceError on client socket open", async () => { + const server = Bun.serve({ + port: 8080, + hostname: "localhost", + fetch() { + return new Response("Hello World"); + }, + }); + try { + const { resolve, reject, promise } = Promise.withResolvers(); + let client: Socket | null = null; + const timeout = setTimeout(() => { + client?.end(); + reject(new Error("Timeout")); + }, 1000); + client = await Bun.connect({ + port: server.port, + hostname: server.hostname, + socket: { + open(socket) { + // ReferenceError: Can't find variable: bytes + // @ts-expect-error + socket.write(bytes); + }, + error(socket, error) { + clearTimeout(timeout); + resolve(error); + }, + close(socket) { + // we need the close handler + resolve({ message: "Closed" }); + }, + data(socket, data) {}, + }, + }); + + const result: any = await promise; + expect(result?.message).toBe("Can't find variable: bytes"); + } finally { + server.stop(true); + } +}); + +it("it should not crash when returning a Error on client socket open", async () => { + const server = Bun.serve({ + port: 8080, + hostname: "localhost", + fetch() { + return new Response("Hello World"); + }, + }); + try { + const { resolve, reject, promise } = Promise.withResolvers(); + let client: Socket | null = null; + const timeout = setTimeout(() => { + client?.end(); + reject(new Error("Timeout")); + }, 1000); + client = await Bun.connect({ + port: server.port, + hostname: server.hostname, + socket: { + //@ts-ignore + open(socket) { + return new Error("CustomError"); + }, + error(socket, error) { + clearTimeout(timeout); + resolve(error); + }, + close(socket) { + // we need the close handler + resolve({ message: "Closed" }); + }, + data(socket, data) {}, + }, + }); + + const result: any = await promise; + expect(result?.message).toBe("CustomError"); + } finally { + server.stop(true); + } +}); diff --git a/test/js/bun/sqlite/sqlite.test.js b/test/js/bun/sqlite/sqlite.test.js index 4578acb730e8a0..60a9d01b1cdeed 100644 --- a/test/js/bun/sqlite/sqlite.test.js +++ b/test/js/bun/sqlite/sqlite.test.js @@ -1,8 +1,8 @@ import { expect, it, describe } from "bun:test"; import { Database, constants, SQLiteError } from "bun:sqlite"; -import { existsSync, fstat, realpathSync, rmSync, writeFileSync } from "fs"; +import { existsSync, fstat, readdirSync, realpathSync, rmSync, writeFileSync } from "fs"; import { spawnSync } from "bun"; -import { bunExe } from "harness"; +import { bunExe, isWindows, tempDirWithFiles } from "harness"; import { tmpdir } from "os"; import path from "path"; @@ -777,3 +777,180 @@ it.skipIf( expect(db.prepare("SELECT SQRT(0.25)").all()).toEqual([{ "SQRT(0.25)": 0.5 }]); expect(db.prepare("SELECT TAN(0.25)").all()).toEqual([{ "TAN(0.25)": 0.25534192122103627 }]); }); + +it("issue#6597", () => { + // better-sqlite3 returns the last value of duplicate fields + const db = new Database(":memory:"); + db.run("CREATE TABLE Users (Id INTEGER PRIMARY KEY, Name VARCHAR(255), CreatedAt TIMESTAMP)"); + db.run( + "CREATE TABLE Cars (Id INTEGER PRIMARY KEY, Driver INTEGER, CreatedAt TIMESTAMP, FOREIGN KEY (Driver) REFERENCES Users(Id))", + ); + db.run('INSERT INTO Users (Id, Name, CreatedAt) VALUES (1, "Alice", "2022-01-01");'); + db.run('INSERT INTO Cars (Id, Driver, CreatedAt) VALUES (2, 1, "2023-01-01");'); + const result = db.prepare("SELECT * FROM Cars JOIN Users ON Driver=Users.Id").get(); + expect(result).toStrictEqual({ + Id: 1, + Driver: 1, + CreatedAt: "2022-01-01", + Name: "Alice", + }); + db.close(); +}); + +it("issue#6597 with many columns", () => { + // better-sqlite3 returns the last value of duplicate fields + const db = new Database(":memory:"); + const count = 100; + const columns = Array.from({ length: count }, (_, i) => `col${i}`); + const values_foo = Array.from({ length: count }, (_, i) => `'foo${i}'`); + const values_bar = Array.from({ length: count }, (_, i) => `'bar${i}'`); + values_bar[0] = values_foo[0]; + db.run(`CREATE TABLE foo (${columns.join(",")})`); + db.run(`CREATE TABLE bar (${columns.join(",")})`); + db.run(`INSERT INTO foo (${columns.join(",")}) VALUES (${values_foo.join(",")})`); + db.run(`INSERT INTO bar (${columns.join(",")}) VALUES (${values_bar.join(",")})`); + const result = db.prepare("SELECT * FROM foo JOIN bar ON foo.col0 = bar.col0").get(); + expect(result.col0).toBe("foo0"); + for (let i = 1; i < count; i++) { + expect(result[`col${i}`]).toBe(`bar${i}`); + } + db.close(); +}); + +it("issue#7147", () => { + const db = new Database(":memory:"); + db.exec("CREATE TABLE foos (foo_id INTEGER NOT NULL PRIMARY KEY, foo_a TEXT, foo_b TEXT)"); + db.exec( + "CREATE TABLE bars (bar_id INTEGER NOT NULL PRIMARY KEY, foo_id INTEGER NOT NULL, bar_a INTEGER, bar_b INTEGER, FOREIGN KEY (foo_id) REFERENCES foos (foo_id))", + ); + db.exec("INSERT INTO foos VALUES (1, 'foo_1', 'foo_2')"); + db.exec("INSERT INTO bars VALUES (1, 1, 'bar_1', 'bar_2')"); + db.exec("INSERT INTO bars VALUES (2, 1, 'baz_3', 'baz_4')"); + const query = db.query("SELECT f.*, b.* FROM foos f JOIN bars b ON b.foo_id = f.foo_id"); + const result = query.all(); + expect(result).toStrictEqual([ + { + foo_id: 1, + foo_a: "foo_1", + foo_b: "foo_2", + bar_id: 1, + bar_a: "bar_1", + bar_b: "bar_2", + }, + { + foo_id: 1, + foo_a: "foo_1", + foo_b: "foo_2", + bar_id: 2, + bar_a: "baz_3", + bar_b: "baz_4", + }, + ]); + db.close(); +}); + +it("should close with WAL enabled", () => { + const dir = tempDirWithFiles("sqlite-wal-test", { "empty.txt": "" }); + const file = path.join(dir, "my.db"); + const db = new Database(file); + db.exec("PRAGMA journal_mode = WAL"); + db.fileControl(constants.SQLITE_FCNTL_PERSIST_WAL, 0); + db.exec("CREATE TABLE foo (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)"); + db.exec("INSERT INTO foo (name) VALUES ('foo')"); + expect(db.query("SELECT * FROM foo").all()).toEqual([{ id: 1, name: "foo" }]); + db.exec("PRAGMA wal_checkpoint(truncate)"); + db.close(); + expect(readdirSync(dir).sort()).toEqual(["empty.txt", "my.db"]); +}); + +it("close(true) should throw an error if the database is in use", () => { + const db = new Database(":memory:"); + db.exec("CREATE TABLE foo (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)"); + db.exec("INSERT INTO foo (name) VALUES ('foo')"); + const prepared = db.prepare("SELECT * FROM foo"); + expect(() => db.close(true)).toThrow("database is locked"); + prepared.finalize(); + expect(() => db.close(true)).not.toThrow(); +}); + +it("close() should NOT throw an error if the database is in use", () => { + const db = new Database(":memory:"); + db.exec("CREATE TABLE foo (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)"); + db.exec("INSERT INTO foo (name) VALUES ('foo')"); + const prepared = db.prepare("SELECT * FROM foo"); + expect(() => db.close()).not.toThrow("database is locked"); +}); + +it("should dispose AND throw an error if the database is in use", () => { + expect(() => { + let prepared; + { + using db = new Database(":memory:"); + db.exec("CREATE TABLE foo (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)"); + db.exec("INSERT INTO foo (name) VALUES ('foo')"); + prepared = db.prepare("SELECT * FROM foo"); + } + }).toThrow("database is locked"); +}); + +it("should dispose", () => { + expect(() => { + { + using db = new Database(":memory:"); + db.exec("CREATE TABLE foo (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)"); + db.exec("INSERT INTO foo (name) VALUES ('foo')"); + } + }).not.toThrow(); +}); + +it("can continue to use existing statements after database has been GC'd", async () => { + let called = false; + const registry = new FinalizationRegistry(() => { + called = true; + }); + function leakTheStatement() { + const db = new Database(":memory:"); + db.exec("CREATE TABLE foo (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)"); + db.exec("INSERT INTO foo (name) VALUES ('foo')"); + const prepared = db.prepare("SELECT * FROM foo"); + registry.register(db); + return prepared; + } + + const stmt = leakTheStatement(); + Bun.gc(true); + await Bun.sleep(1); + Bun.gc(true); + expect(stmt.all()).toEqual([{ id: 1, name: "foo" }]); + stmt.finalize(); + expect(() => stmt.all()).toThrow(); + if (!isWindows) { + // on Windows, FinalizationRegistry is more flaky than on POSIX. + expect(called).toBe(true); + } +}); + +it("statements should be disposable", () => { + { + using db = new Database("mydb.sqlite"); + using query = db.query("select 'Hello world' as message;"); + console.log(query.get()); // => { message: "Hello world" } + } +}); + +it("query should work if the cached statement was finalized", () => { + { + let prevQuery; + using db = new Database("mydb.sqlite"); + { + using query = db.query("select 'Hello world' as message;"); + prevQuery = query; + query.get(); + } + { + using query = db.query("select 'Hello world' as message;"); + expect(() => query.get()).not.toThrow(); + } + expect(() => prevQuery.get()).toThrow(); + } +}); diff --git a/test/js/bun/util/which.test.ts b/test/js/bun/util/which.test.ts index ffd02386a31fab..edb4670e32a2b8 100644 --- a/test/js/bun/util/which.test.ts +++ b/test/js/bun/util/which.test.ts @@ -33,6 +33,7 @@ if (isWindows) { test("which", () => { expect(which("cmd")).toBe("C:\\Windows\\system32\\cmd.exe"); expect(which("cmd.exe")).toBe("C:\\Windows\\system32\\cmd.exe"); + expect(which("cmd.EXE")).toBe("C:\\Windows\\system32\\cmd.EXE"); expect(which("cmd.bat")).toBe(null); const exe = basename(process.execPath); const dir = join(process.execPath, "../"); diff --git a/test/js/node/child_process/child-process-exec.test.ts b/test/js/node/child_process/child-process-exec.test.ts index bc25dfdcb3aaca..d06cd3f165f14b 100644 --- a/test/js/node/child_process/child-process-exec.test.ts +++ b/test/js/node/child_process/child-process-exec.test.ts @@ -1,4 +1,5 @@ import { test, expect, describe } from "bun:test"; +import { bunExe } from "harness"; import { exec } from "node:child_process"; // https://github.com/oven-sh/bun/issues/5319 @@ -89,3 +90,17 @@ describe("child_process.exec", () => { }); }); }); + +test("exec with verbatim arguments", async () => { + const { resolve, reject, promise } = Promise.withResolvers(); + + const fixture = require.resolve("./fixtures/child-process-echo-argv.js"); + const child = exec(`${bunExe()} ${fixture} tasklist /FI "IMAGENAME eq chrome.exe"`, (err, stdout, stderr) => { + if (err) return reject(err); + return resolve({ stdout, stderr }); + }); + expect(!!child).toBe(true); + + const { stdout } = await promise; + expect(stdout.trim().split("\n")).toEqual([`tasklist`, `/FI`, `IMAGENAME eq chrome.exe`]); +}); diff --git a/test/js/node/child_process/fixtures/child-process-echo-argv.js b/test/js/node/child_process/fixtures/child-process-echo-argv.js new file mode 100644 index 00000000000000..fc1a53f2ea6a18 --- /dev/null +++ b/test/js/node/child_process/fixtures/child-process-echo-argv.js @@ -0,0 +1,3 @@ +for (const item of process.argv.slice(2)) { + console.log(item); +} diff --git a/test/js/node/fs/fs.test.ts b/test/js/node/fs/fs.test.ts index 7b838229f7d276..25ba2fa42558aa 100644 --- a/test/js/node/fs/fs.test.ts +++ b/test/js/node/fs/fs.test.ts @@ -250,6 +250,17 @@ it("Dirent.name setter", () => { expect(dirent.name).toBe("hello"); }); +it("writeFileSync should correctly resolve ../..", () => { + const base = join(tmpdir(), `fs-test-${Math.random().toString(36).slice(2)}`); + const path = join(base, "foo", "bar"); + mkdirSync(path, { recursive: true }); + const cwd = process.cwd(); + process.chdir(path); + writeFileSync("../../test.txt", "hello"); + expect(readFileSync(join(base, "test.txt"), "utf8")).toBe("hello"); + process.chdir(cwd); +}); + it("writeFileSync in append should not truncate the file", () => { const path = join(tmpdir(), "writeFileSync-should-not-append-" + (Date.now() * 10000).toString(16)); var str = ""; @@ -2971,3 +2982,19 @@ describe.if(isWindows)("windows path handling", () => { }); } }); + +it("using writeFile on an fd does not truncate it", () => { + const filepath = join(tmpdir(), `file-${Math.random().toString(32).slice(2)}.txt`); + const fd = fs.openSync(filepath, "w+"); + fs.writeFileSync(fd, "x"); + fs.writeFileSync(fd, "x"); + fs.closeSync(fd); + const content = fs.readFileSync(filepath, "utf8"); + expect(content).toBe("xx"); +}); + +it("fs.close with one arg works", () => { + const filepath = join(tmpdir(), `file-${Math.random().toString(32).slice(2)}.txt`); + const fd = fs.openSync(filepath, "w+"); + fs.close(fd); +}); diff --git a/test/js/node/http/node-http.test.ts b/test/js/node/http/node-http.test.ts index f4199cd206cf78..17f7d9d546ed74 100644 --- a/test/js/node/http/node-http.test.ts +++ b/test/js/node/http/node-http.test.ts @@ -1746,3 +1746,62 @@ if (process.platform !== "win32") { expect([joinPath(import.meta.dir, "node-http-ref-fixture.js")]).toRun(); }); } + +it("#10177 response.write with non-ascii latin1 should not cause duplicated character or segfault", done => { + // x = ascii + // รก = latin1 supplementary character + // ๐Ÿ“™ = emoji + // ๐Ÿ‘๐Ÿฝ = its a grapheme of ๐Ÿ‘ ๐ŸŸค + // "\u{1F600}" = utf16 + const chars = ["x", "รก", "๐Ÿ“™", "๐Ÿ‘๐Ÿฝ", "\u{1F600}"]; + + // 128 = small than waterMark, 256 = waterMark, 1024 = large than waterMark + // 8Kb = small than cork buffer + // 16Kb = cork buffer + // 32Kb = large than cork buffer + const start_size = 128; + const increment_step = 1024; + const end_size = 32 * 1024; + let expected = ""; + + function finish(err) { + server.closeAllConnections(); + Bun.gc(true); + done(err); + } + const server = require("http") + .createServer((_, response) => { + response.write(expected); + response.write(""); + response.end(); + }) + .listen(0, "localhost", async (err, hostname, port) => { + expect(err).toBeFalsy(); + expect(port).toBeGreaterThan(0); + + for (const char of chars) { + for (let size = start_size; size <= end_size; size += increment_step) { + expected = char + "-".repeat(size) + "x"; + + try { + const url = `http://${hostname}:${port}`; + const count = 20; + const all = []; + const batchSize = 20; + while (all.length < count) { + const batch = Array.from({ length: batchSize }, () => fetch(url).then(a => a.text())); + + all.push(...(await Promise.all(batch))); + } + + for (const result of all) { + expect(result).toBe(expected); + } + } catch (err) { + return finish(err); + } + } + } + finish(); + }); +}); diff --git a/test/js/node/os/os.test.js b/test/js/node/os/os.test.js index 88ff5b1d75b01c..ba0ded80b49d9b 100644 --- a/test/js/node/os/os.test.js +++ b/test/js/node/os/os.test.js @@ -1,4 +1,4 @@ -import { it, expect } from "bun:test"; +import { it, expect, describe } from "bun:test"; import * as os from "node:os"; import { realpathSync } from "fs"; import { isWindows } from "harness"; @@ -183,3 +183,33 @@ it("loadavg", () => { expect(loadavg.length).toBe(3); expect(loadavg.every(avg => typeof avg === "number")).toBeTrue(); }); + +// https://github.com/oven-sh/bun/issues/10259 +describe("toString works like node", () => { + const exportsWithStrings = [ + "arch", + "availableParallelism", + "endianness", + "freemem", + "homedir", + "hostname", + "platform", + "release", + "tmpdir", + "totalmem", + "type", + "uptime", + "version", + "machine", + ]; + for (const key of exportsWithStrings) { + // node implements Symbol.toPrimitive, not toString! + it(`${key}.toString()`, () => { + expect(os[key].toString()).toStartWith("function"); + }); + + it(`${key} + ''`, () => { + expect(os[key] + "").toBe(os[key]() + ""); + }); + } +}); diff --git a/test/js/node/tls/renegotiation-feature.js b/test/js/node/tls/renegotiation-feature.js new file mode 100644 index 00000000000000..c1107601915e86 --- /dev/null +++ b/test/js/node/tls/renegotiation-feature.js @@ -0,0 +1,29 @@ +const server = require("https").createServer( + { + cert: "-----BEGIN CERTIFICATE-----\nMIIDrzCCApegAwIBAgIUHaenuNcUAu0tjDZGpc7fK4EX78gwDQYJKoZIhvcNAQEL\nBQAwaTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYwFAYDVQQHDA1TYW4gRnJh\nbmNpc2NvMQ0wCwYDVQQKDARPdmVuMREwDwYDVQQLDAhUZWFtIEJ1bjETMBEGA1UE\nAwwKc2VydmVyLWJ1bjAeFw0yMzA5MDYyMzI3MzRaFw0yNTA5MDUyMzI3MzRaMGkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNj\nbzENMAsGA1UECgwET3ZlbjERMA8GA1UECwwIVGVhbSBCdW4xEzARBgNVBAMMCnNl\ncnZlci1idW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC+7odzr3yI\nYewRNRGIubF5hzT7Bym2dDab4yhaKf5drL+rcA0J15BM8QJ9iSmL1ovg7x35Q2MB\nKw3rl/Yyy3aJS8whZTUze522El72iZbdNbS+oH6GxB2gcZB6hmUehPjHIUH4icwP\ndwVUeR6fB7vkfDddLXe0Tb4qsO1EK8H0mr5PiQSXfj39Yc1QHY7/gZ/xeSrt/6yn\n0oH9HbjF2XLSL2j6cQPKEayartHN0SwzwLi0eWSzcziVPSQV7c6Lg9UuIHbKlgOF\nzDpcp1p1lRqv2yrT25im/dS6oy9XX+p7EfZxqeqpXX2fr5WKxgnzxI3sW93PG8FU\nIDHtnUsoHX3RAgMBAAGjTzBNMCwGA1UdEQQlMCOCCWxvY2FsaG9zdIcEfwAAAYcQ\nAAAAAAAAAAAAAAAAAAAAATAdBgNVHQ4EFgQUF3y/su4J/8ScpK+rM2LwTct6EQow\nDQYJKoZIhvcNAQELBQADggEBAGWGWp59Bmrk3Gt0bidFLEbvlOgGPWCT9ZrJUjgc\nhY44E+/t4gIBdoKOSwxo1tjtz7WsC2IYReLTXh1vTsgEitk0Bf4y7P40+pBwwZwK\naeIF9+PC6ZoAkXGFRoyEalaPVQDBg/DPOMRG9OH0lKfen9OGkZxmmjRLJzbyfAhU\noI/hExIjV8vehcvaJXmkfybJDYOYkN4BCNqPQHNf87ZNdFCb9Zgxwp/Ou+47J5k4\n5plQ+K7trfKXG3ABMbOJXNt1b0sH8jnpAsyHY4DLEQqxKYADbXsr3YX/yy6c0eOo\nX2bHGD1+zGsb7lGyNyoZrCZ0233glrEM4UxmvldBcWwOWfk=\n-----END CERTIFICATE-----\n", + key: "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+7odzr3yIYewR\nNRGIubF5hzT7Bym2dDab4yhaKf5drL+rcA0J15BM8QJ9iSmL1ovg7x35Q2MBKw3r\nl/Yyy3aJS8whZTUze522El72iZbdNbS+oH6GxB2gcZB6hmUehPjHIUH4icwPdwVU\neR6fB7vkfDddLXe0Tb4qsO1EK8H0mr5PiQSXfj39Yc1QHY7/gZ/xeSrt/6yn0oH9\nHbjF2XLSL2j6cQPKEayartHN0SwzwLi0eWSzcziVPSQV7c6Lg9UuIHbKlgOFzDpc\np1p1lRqv2yrT25im/dS6oy9XX+p7EfZxqeqpXX2fr5WKxgnzxI3sW93PG8FUIDHt\nnUsoHX3RAgMBAAECggEAAckMqkn+ER3c7YMsKRLc5bUE9ELe+ftUwfA6G+oXVorn\nE+uWCXGdNqI+TOZkQpurQBWn9IzTwv19QY+H740cxo0ozZVSPE4v4czIilv9XlVw\n3YCNa2uMxeqp76WMbz1xEhaFEgn6ASTVf3hxYJYKM0ljhPX8Vb8wWwlLONxr4w4X\nOnQAB5QE7i7LVRsQIpWKnGsALePeQjzhzUZDhz0UnTyGU6GfC+V+hN3RkC34A8oK\njR3/Wsjahev0Rpb+9Pbu3SgTrZTtQ+srlRrEsDG0wVqxkIk9ueSMOHlEtQ7zYZsk\nlX59Bb8LHNGQD5o+H1EDaC6OCsgzUAAJtDRZsPiZEQKBgQDs+YtVsc9RDMoC0x2y\nlVnP6IUDXt+2UXndZfJI3YS+wsfxiEkgK7G3AhjgB+C+DKEJzptVxP+212hHnXgr\n1gfW/x4g7OWBu4IxFmZ2J/Ojor+prhHJdCvD0VqnMzauzqLTe92aexiexXQGm+WW\nwRl3YZLmkft3rzs3ZPhc1G2X9QKBgQDOQq3rrxcvxSYaDZAb+6B/H7ZE4natMCiz\nLx/cWT8n+/CrJI2v3kDfdPl9yyXIOGrsqFgR3uhiUJnz+oeZFFHfYpslb8KvimHx\nKI+qcVDcprmYyXj2Lrf3fvj4pKorc+8TgOBDUpXIFhFDyM+0DmHLfq+7UqvjU9Hs\nkjER7baQ7QKBgQDTh508jU/FxWi9RL4Jnw9gaunwrEt9bxUc79dp+3J25V+c1k6Q\nDPDBr3mM4PtYKeXF30sBMKwiBf3rj0CpwI+W9ntqYIwtVbdNIfWsGtV8h9YWHG98\nJ9q5HLOS9EAnogPuS27walj7wL1k+NvjydJ1of+DGWQi3aQ6OkMIegap0QKBgBlR\nzCHLa5A8plG6an9U4z3Xubs5BZJ6//QHC+Uzu3IAFmob4Zy+Lr5/kITlpCyw6EdG\n3xDKiUJQXKW7kluzR92hMCRnVMHRvfYpoYEtydxcRxo/WS73SzQBjTSQmicdYzLE\ntkLtZ1+ZfeMRSpXy0gR198KKAnm0d2eQBqAJy0h9AoGBAM80zkd+LehBKq87Zoh7\ndtREVWslRD1C5HvFcAxYxBybcKzVpL89jIRGKB8SoZkF7edzhqvVzAMP0FFsEgCh\naClYGtO+uo+B91+5v2CCqowRJUGfbFOtCuSPR7+B3LDK8pkjK2SQ0mFPUfRA5z0z\nNVWtC0EYNBTRkqhYtqr3ZpUc\n-----END PRIVATE KEY-----\n", + rejectUnauthorized: false, + hostname: "localhost", + minVersion: "TLSv1.2", + // force maxVersion to be TLSv1.2 so that renegotiation is allowed + maxVersion: "TLSv1.2", + }, + (req, res) => { + const client = res.socket; + client.renegotiate({ requestCert: true, rejectUnauthorized: false }, err => { + if (err) { + res.writeHead(500, { "Content-Type": "text/plain" }); + res.end("Error"); + } else { + res.writeHead(200, { "Content-Type": "text/plain" }); + res.end("Hello World"); + } + }); + }, +); + +server.listen(0, () => { + const { port } = server.address(); + const url = `https://localhost:${port}`; + console.log(url); +}); diff --git a/test/js/node/tls/renegotiation.test.ts b/test/js/node/tls/renegotiation.test.ts new file mode 100644 index 00000000000000..14393034a0cb37 --- /dev/null +++ b/test/js/node/tls/renegotiation.test.ts @@ -0,0 +1,152 @@ +import { expect, it, beforeAll, afterAll } from "bun:test"; +import { join } from "path"; +import type { Subprocess } from "bun"; +import type { IncomingMessage } from "http"; +let url: URL; +let process: Subprocess<"ignore", "pipe", "ignore"> | null = null; +beforeAll(async () => { + process = Bun.spawn(["node", join(import.meta.dir, "renegotiation-feature.js")], { + stdout: "pipe", + stderr: "inherit", + stdin: "ignore", + }); + const { value } = await process.stdout.getReader().read(); + url = new URL(new TextDecoder().decode(value)); +}); + +afterAll(() => { + process?.kill(); +}); + +it("allow renegotiation in fetch", async () => { + const body = await fetch(url, { + verbose: true, + keepalive: false, + tls: { rejectUnauthorized: false }, + }).then(res => res.text()); + expect(body).toBe("Hello World"); +}); + +it("should fail if renegotiation fails using fetch", async () => { + try { + await fetch(url, { + verbose: true, + keepalive: false, + tls: { rejectUnauthorized: true }, + }).then(res => res.text()); + expect.unreachable(); + } catch (e: any) { + expect(e.code).toBe("DEPTH_ZERO_SELF_SIGNED_CERT"); + } +}); + +it("allow renegotiation in https module", async () => { + const { promise, resolve, reject } = Promise.withResolvers(); + const req = require("https").request( + { + hostname: url.hostname, + port: url.port, + path: url.pathname, + method: "GET", + keepalive: false, + tls: { rejectUnauthorized: false }, + }, + (res: IncomingMessage) => { + res.setEncoding("utf8"); + let data = ""; + + res.on("data", (chunk: string) => { + data += chunk; + }); + + res.on("error", reject); + res.on("end", () => resolve(data)); + }, + ); + req.on("error", reject); + req.end(); + + const body = await promise; + expect(body).toBe("Hello World"); +}); + +it("should fail if renegotiation fails using https", async () => { + const { promise, resolve, reject } = Promise.withResolvers(); + const req = require("https").request( + { + hostname: url.hostname, + port: url.port, + path: url.pathname, + method: "GET", + keepalive: false, + tls: { rejectUnauthorized: true }, + }, + (res: IncomingMessage) => { + res.setEncoding("utf8"); + let data = ""; + + res.on("data", (chunk: string) => { + data += chunk; + }); + + res.on("error", reject); + res.on("end", () => resolve(data)); + }, + ); + req.on("error", reject); + req.end(); + + try { + await promise; + expect.unreachable(); + } catch (e: any) { + expect(e.code).toBe("DEPTH_ZERO_SELF_SIGNED_CERT"); + } +}); +it("allow renegotiation in tls module", async () => { + const { promise, resolve, reject } = Promise.withResolvers(); + + const socket = require("tls").connect({ + rejectUnauthorized: false, + host: url.hostname, + port: url.port, + }); + let data = ""; + socket.on("data", (chunk: Buffer) => { + data += chunk.toString(); + if (data.indexOf("0\r\n\r\n") !== -1) { + const result = data.split("\r\n\r\n")[1].split("\r\n")[1]; + resolve(result); + } + }); + socket.on("error", reject); + socket.write("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n"); + const body = await promise; + expect(body).toBe("Hello World"); +}); + +it("should fail if renegotiation fails using tls module", async () => { + const { promise, resolve, reject } = Promise.withResolvers(); + + const socket = require("tls").connect({ + rejectUnauthorized: true, + host: url.hostname, + port: url.port, + }); + let data = ""; + socket.on("data", (chunk: Buffer) => { + data += chunk.toString(); + if (data.indexOf("0\r\n\r\n") !== -1) { + const result = data.split("\r\n\r\n")[1].split("\r\n")[1]; + resolve(result); + } + }); + socket.on("error", reject); + socket.write("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n"); + try { + await promise; + expect.unreachable(); + } catch (e: any) { + expect(e.code).toBe("DEPTH_ZERO_SELF_SIGNED_CERT"); + } +});