From d9790256a460decdce9f5a621cc2075e7162b670 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 20:47:01 +0200 Subject: [PATCH 01/18] Update nix flake --- flake.lock | 48 ++++++++++++------------------------------------ flake.nix | 5 ----- 2 files changed, 12 insertions(+), 41 deletions(-) diff --git a/flake.lock b/flake.lock index cbdab3e8a915..5f6882016797 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1715274763, - "narHash": "sha256-3Iv1PGHJn9sV3HO4FlOVaaztOxa9uGLfOmUWrH7v7+A=", + "lastModified": 1722960479, + "narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=", "owner": "ipetkov", "repo": "crane", - "rev": "27025ab71bdca30e7ed0a16c88fd74c5970fc7f5", + "rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4", "type": "github" }, "original": { @@ -27,11 +27,11 @@ ] }, "locked": { - "lastModified": 1714641030, - "narHash": "sha256-yzcRNDoyVP7+SCNX0wmuDju1NUCt8Dz9+lyUXEI0dbI=", + "lastModified": 1722555600, + "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "e5d10a24b66c3ea8f150e47dfdb0416ab7c3390e", + "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", "type": "github" }, "original": { @@ -40,26 +40,6 @@ "type": "github" } }, - "flake-utils": { - "inputs": { - "systems": [ - "systems" - ] - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "gitignore": { "inputs": { "nixpkgs": [ @@ -82,11 +62,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1715266358, - "narHash": "sha256-doPgfj+7FFe9rfzWo1siAV2mVCasW+Bh8I1cToAXEE4=", + "lastModified": 1722813957, + "narHash": "sha256-IAoYyYnED7P8zrBFMnmp7ydaJfwTnwcnqxUElC1I26Y=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f1010e0469db743d14519a1efd37e23f8513d714", + "rev": "cb9a96f23c491c081b38eab96d22fa958043c9fa", "type": "github" }, "original": { @@ -99,7 +79,6 @@ "inputs": { "crane": "crane", "flake-parts": "flake-parts", - "flake-utils": "flake-utils", "gitignore": "gitignore", "nixpkgs": "nixpkgs", "rust-overlay": "rust-overlay", @@ -108,19 +87,16 @@ }, "rust-overlay": { "inputs": { - "flake-utils": [ - "flake-utils" - ], "nixpkgs": [ "nixpkgs" ] }, "locked": { - "lastModified": 1715307487, - "narHash": "sha256-yuDAys3JuJmhQUQGMMsl3BDQNZUYZDw0eA71OVh9FeY=", + "lastModified": 1722910815, + "narHash": "sha256-v6Vk/xlABhw2QzOa6xh3Jx/IvmlbKbOazFM+bDFQlWU=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "ec7a7caf50877bc32988c82653d6b3e6952a8c3f", + "rev": "7df2ac544c203d21b63aac23bfaec7f9b919a733", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index a76ab2edbea8..bda6ec6a565a 100644 --- a/flake.nix +++ b/flake.nix @@ -4,10 +4,6 @@ url = "github:ipetkov/crane"; inputs.nixpkgs.follows = "nixpkgs"; }; - flake-utils = { - url = "github:numtide/flake-utils"; - inputs.systems.follows = "systems"; - }; flake-parts = { url = "github:hercules-ci/flake-parts"; inputs.nixpkgs-lib.follows = "nixpkgs"; @@ -19,7 +15,6 @@ rust-overlay = { url = "github:oxalica/rust-overlay"; inputs.nixpkgs.follows = "nixpkgs"; - inputs.flake-utils.follows = "flake-utils"; }; nixpkgs.url = "nixpkgs/nixos-unstable"; systems.url = "github:nix-systems/default"; From 5dde56f677d75ec34c808db6b639114e6c26158e Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 20:47:11 +0200 Subject: [PATCH 02/18] Update Rust to 1.80 --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 593e44aa569a..7e63cd506c5f 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.78.0" +channel = "1.80.0" components = ["clippy", "rustfmt", "rust-src"] targets = [ # WASM target for serverless and edge environments. From 824e0264447c1f7adfbd02faf3a4887924351a33 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 20:51:18 +0200 Subject: [PATCH 03/18] Fix clippy::derive_ord_xor_partial_ord --- psl/parser-database/src/walkers/relation_field.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/psl/parser-database/src/walkers/relation_field.rs b/psl/parser-database/src/walkers/relation_field.rs index 5e387480d0b7..7e44b2ca0df7 100644 --- a/psl/parser-database/src/walkers/relation_field.rs +++ b/psl/parser-database/src/walkers/relation_field.rs @@ -169,7 +169,7 @@ impl<'db> RelationFieldWalker<'db> { } /// The relation name. -#[derive(Debug, Clone, PartialOrd)] +#[derive(Debug, Clone)] pub enum RelationName<'db> { /// A relation name specified in the AST. Explicit(&'db str), @@ -201,6 +201,12 @@ impl<'db> Ord for RelationName<'db> { } } +impl<'db> PartialOrd for RelationName<'db> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + impl<'db> std::hash::Hash for RelationName<'db> { fn hash(&self, state: &mut H) { match self { From 4fbdd7991a949f1737f23d0eecb5da6ca60b9174 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 20:53:23 +0200 Subject: [PATCH 04/18] Fix clippy::needless_borrows_for_generic_args --- psl/psl-core/src/mcf.rs | 2 +- query-engine/query-structure/src/selection_result.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/psl/psl-core/src/mcf.rs b/psl/psl-core/src/mcf.rs index c03edeb66993..75cde3bb5f01 100644 --- a/psl/psl-core/src/mcf.rs +++ b/psl/psl-core/src/mcf.rs @@ -8,7 +8,7 @@ pub use source::*; use serde::Serialize; pub fn config_to_mcf_json_value(mcf: &crate::Configuration, files: &Files) -> serde_json::Value { - serde_json::to_value(&model_to_serializable(mcf, files)).expect("Failed to render JSON.") + serde_json::to_value(model_to_serializable(mcf, files)).expect("Failed to render JSON.") } #[derive(Debug, Serialize)] diff --git a/query-engine/query-structure/src/selection_result.rs b/query-engine/query-structure/src/selection_result.rs index 74b097506fb6..c3d0641ce294 100644 --- a/query-engine/query-structure/src/selection_result.rs +++ b/query-engine/query-structure/src/selection_result.rs @@ -12,7 +12,7 @@ impl std::fmt::Debug for SelectionResult { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_list() .entries( - &self + self .pairs .iter() .map(|pair| (format!("{}", pair.0), pair.1.clone())) From 789bada283a8f0d5a637e3316ba5c632bfef6f4e Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 20:57:45 +0200 Subject: [PATCH 05/18] Update time dependency to fix compilation error --- Cargo.lock | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 815e33f8d184..b147341c7617 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1031,9 +1031,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.7" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] [[package]] name = "derivative" @@ -2824,6 +2827,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" version = "0.1.45" @@ -3338,6 +3347,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -4743,9 +4758,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -4772,9 +4787,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", @@ -5508,12 +5523,14 @@ dependencies = [ [[package]] name = "time" -version = "0.3.25" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", + "powerfmt", "serde", "time-core", "time-macros", @@ -5521,16 +5538,17 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.11" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] From 050ac9438fbadd23e207b14bf1bc747e671e4d9c Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 21:02:36 +0200 Subject: [PATCH 06/18] Fix auto-fixable clippy issues --- prisma-fmt/tests/code_actions/test_api.rs | 5 +---- .../black-box-tests/tests/helpers/mod.rs | 5 +---- .../extractors/filters/mod.rs | 2 +- .../extractors/filters/relation.rs | 10 +++++----- .../query-structure/src/selection_result.rs | 3 +-- schema-engine/core/src/rpc.rs | 2 +- .../tests/errors/error_tests.rs | 20 +++++++++---------- .../tests/migrations/indexes.rs | 8 ++++---- 8 files changed, 24 insertions(+), 31 deletions(-) diff --git a/prisma-fmt/tests/code_actions/test_api.rs b/prisma-fmt/tests/code_actions/test_api.rs index b09f517be9c5..95021c73cef7 100644 --- a/prisma-fmt/tests/code_actions/test_api.rs +++ b/prisma-fmt/tests/code_actions/test_api.rs @@ -90,10 +90,7 @@ pub(crate) fn test_scenario(scenario_name: &str) { .as_str() }; - let diagnostics = match parse_schema_diagnostics(&schema_files, initiating_file_name) { - Some(diagnostics) => diagnostics, - None => Vec::new(), - }; + let diagnostics = parse_schema_diagnostics(&schema_files, initiating_file_name).unwrap_or_default(); path.clear(); write!(path, "{SCENARIOS_PATH}/{scenario_name}/result.json").unwrap(); diff --git a/query-engine/black-box-tests/tests/helpers/mod.rs b/query-engine/black-box-tests/tests/helpers/mod.rs index 95dc53313b8d..efebea03ac3f 100644 --- a/query-engine/black-box-tests/tests/helpers/mod.rs +++ b/query-engine/black-box-tests/tests/helpers/mod.rs @@ -52,10 +52,7 @@ pub(crate) fn query_engine_cmd(dml: &str) -> (process::Command, String) { cmd.env_clear(); let port = generate_free_port(); - cmd.env("PRISMA_DML", dml) - .arg("--port") - .arg(&port.to_string()) - .arg("-g"); + cmd.env("PRISMA_DML", dml).arg("--port").arg(port.to_string()).arg("-g"); (cmd, format!("http://0.0.0.0:{}", port)) } diff --git a/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs b/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs index c87da451ff2b..9b2311deda27 100644 --- a/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs +++ b/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs @@ -309,7 +309,7 @@ fn extract_relation_filters( // Implicit is ParsedInputValue::Map(filter_map) => { - extract_filter(filter_map, &field.related_model()).map(|filter| vec![field.to_one_related(filter)]) + extract_filter(filter_map, field.related_model()).map(|filter| vec![field.to_one_related(filter)]) } x => Err(QueryGraphBuilderError::InputError(format!( diff --git a/query-engine/core/src/query_graph_builder/extractors/filters/relation.rs b/query-engine/core/src/query_graph_builder/extractors/filters/relation.rs index 47ec7ab9d193..3e497fc4d3a8 100644 --- a/query-engine/core/src/query_graph_builder/extractors/filters/relation.rs +++ b/query-engine/core/src/query_graph_builder/extractors/filters/relation.rs @@ -13,14 +13,14 @@ pub fn parse( match (filter_key, value) { // Relation list filters - (filters::SOME, Some(value)) => Ok(field.at_least_one_related(extract_filter(value, &field.related_model())?)), - (filters::NONE, Some(value)) => Ok(field.no_related(extract_filter(value, &field.related_model())?)), - (filters::EVERY, Some(value)) => Ok(field.every_related(extract_filter(value, &field.related_model())?)), + (filters::SOME, Some(value)) => Ok(field.at_least_one_related(extract_filter(value, field.related_model())?)), + (filters::NONE, Some(value)) => Ok(field.no_related(extract_filter(value, field.related_model())?)), + (filters::EVERY, Some(value)) => Ok(field.every_related(extract_filter(value, field.related_model())?)), // One-relation filters - (filters::IS, Some(value)) => Ok(field.to_one_related(extract_filter(value, &field.related_model())?)), + (filters::IS, Some(value)) => Ok(field.to_one_related(extract_filter(value, field.related_model())?)), (filters::IS, None) => Ok(field.one_relation_is_null()), - (filters::IS_NOT, Some(value)) => Ok(field.no_related(extract_filter(value, &field.related_model())?)), + (filters::IS_NOT, Some(value)) => Ok(field.no_related(extract_filter(value, field.related_model())?)), (filters::IS_NOT, None) => Ok(Filter::not(vec![field.one_relation_is_null()])), _ => Err(QueryGraphBuilderError::InputError(format!( diff --git a/query-engine/query-structure/src/selection_result.rs b/query-engine/query-structure/src/selection_result.rs index c3d0641ce294..b31a77aae3df 100644 --- a/query-engine/query-structure/src/selection_result.rs +++ b/query-engine/query-structure/src/selection_result.rs @@ -12,8 +12,7 @@ impl std::fmt::Debug for SelectionResult { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_list() .entries( - self - .pairs + self.pairs .iter() .map(|pair| (format!("{}", pair.0), pair.1.clone())) .collect_vec(), diff --git a/schema-engine/core/src/rpc.rs b/schema-engine/core/src/rpc.rs index 041961e8982f..b5467260f638 100644 --- a/schema-engine/core/src/rpc.rs +++ b/schema-engine/core/src/rpc.rs @@ -65,7 +65,7 @@ fn render(result: CoreResult) -> jsonrpc_core::Result JsonRpcError { - serde_json::to_value(&crate_error.to_user_facing()) + serde_json::to_value(crate_error.to_user_facing()) .map(|data| JsonRpcError { // We separate the JSON-RPC error code (defined by the JSON-RPC spec) from the // prisma error code, which is located in `data`. diff --git a/schema-engine/sql-migration-tests/tests/errors/error_tests.rs b/schema-engine/sql-migration-tests/tests/errors/error_tests.rs index 90d0f90c5250..ddcf1adb802a 100644 --- a/schema-engine/sql-migration-tests/tests/errors/error_tests.rs +++ b/schema-engine/sql-migration-tests/tests/errors/error_tests.rs @@ -49,7 +49,7 @@ fn authentication_failure_must_return_a_known_error_on_postgres(api: TestApi) { let user = db_url.username(); let host = db_url.host().unwrap().to_string(); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, "message": format!("Authentication failed against database server at `{host}`, the provided database credentials for `postgres` are not valid.\n\nPlease make sure to provide valid database credentials for the database server at `{host}`."), @@ -83,7 +83,7 @@ fn authentication_failure_must_return_a_known_error_on_mysql(api: TestApi) { let user = url.username(); let host = url.host().unwrap().to_string(); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, "message": format!("Authentication failed against database server at `{host}`, the provided database credentials for `{user}` are not valid.\n\nPlease make sure to provide valid database credentials for the database server at `{host}`."), @@ -118,7 +118,7 @@ fn authentication_failure_must_return_a_known_error_on_mssql(api: TestApi) { let error = tok(connection_error(dm)); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, "message": format!("Authentication failed against database server at `{host}`, the provided database credentials for `{user}` are not valid.\n\nPlease make sure to provide valid database credentials for the database server at `{host}`."), @@ -156,7 +156,7 @@ fn unreachable_database_must_return_a_proper_error_on_mysql(api: TestApi) { let port = url.port().unwrap(); let host = url.host().unwrap().to_string(); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, "message": format!("Can't reach database server at `{host}:{port}`\n\nPlease make sure your database server is running at `{host}:{port}`."), @@ -190,7 +190,7 @@ fn unreachable_database_must_return_a_proper_error_on_postgres(api: TestApi) { let host = url.host().unwrap().to_string(); let port = url.port().unwrap(); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, "message": format!("Can't reach database server at `{host}:{port}`\n\nPlease make sure your database server is running at `{host}:{port}`."), @@ -222,7 +222,7 @@ fn database_does_not_exist_must_return_a_proper_error(api: TestApi) { let error = tok(connection_error(dm)); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, "message": format!("Database `{database_name}` does not exist on the database server at `{database_host}:{database_port}`.", database_name = database_name, database_host = url.host().unwrap(), database_port = url.port().unwrap()), @@ -251,7 +251,7 @@ fn bad_datasource_url_and_provider_combinations_must_return_a_proper_error(api: let error = tok(connection_error(dm)); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let err_message: String = json_error["message"].as_str().unwrap().into(); @@ -293,7 +293,7 @@ fn connections_to_system_databases_must_be_rejected(api: TestApi) { let name = if name == &"" { "mysql" } else { name }; let error = tok(connection_error(dm)); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected = json!({ "is_panic": false, @@ -455,7 +455,7 @@ async fn connection_string_problems_give_a_nice_error() { .await .unwrap_err(); - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let details = match provider.0 { "sqlserver" => { @@ -509,7 +509,7 @@ async fn bad_connection_string_in_datamodel_returns_nice_error() { Err(e) => e, }; - let json_error = serde_json::to_value(&error.to_user_facing()).unwrap(); + let json_error = serde_json::to_value(error.to_user_facing()).unwrap(); let expected_json_error = json!({ "is_panic": false, diff --git a/schema-engine/sql-migration-tests/tests/migrations/indexes.rs b/schema-engine/sql-migration-tests/tests/migrations/indexes.rs index dded1559ba2a..77badfd061e4 100644 --- a/schema-engine/sql-migration-tests/tests/migrations/indexes.rs +++ b/schema-engine/sql-migration-tests/tests/migrations/indexes.rs @@ -946,7 +946,7 @@ fn adding_fulltext_index_to_an_existing_column(api: TestApi) { } "#}; - api.schema_push(&api.datamodel_with_provider(dm)).send().assert_green(); + api.schema_push(api.datamodel_with_provider(dm)).send().assert_green(); api.assert_schema() .assert_table("A", |table| table.assert_indexes_count(0)); @@ -961,7 +961,7 @@ fn adding_fulltext_index_to_an_existing_column(api: TestApi) { } "#}; - api.schema_push(&api.datamodel_with_provider(dm)).send().assert_green(); + api.schema_push(api.datamodel_with_provider(dm)).send().assert_green(); api.assert_schema().assert_table("A", |table| { table.assert_index_on_columns(&["a", "b"], |index| index.assert_is_fulltext()) @@ -980,7 +980,7 @@ fn changing_normal_index_to_a_fulltext_index(api: TestApi) { } "#}; - api.schema_push(&api.datamodel_with_provider(dm)).send().assert_green(); + api.schema_push(api.datamodel_with_provider(dm)).send().assert_green(); api.assert_schema().assert_table("A", |table| { table.assert_indexes_count(1); @@ -997,7 +997,7 @@ fn changing_normal_index_to_a_fulltext_index(api: TestApi) { } "#}; - api.schema_push(&api.datamodel_with_provider(dm)).send().assert_green(); + api.schema_push(api.datamodel_with_provider(dm)).send().assert_green(); api.assert_schema().assert_table("A", |table| { table.assert_indexes_count(1); From 5ee71a4b52a5002e643c8d1841810e0e934f75f2 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 23:13:07 +0200 Subject: [PATCH 07/18] Fix clippy::doc_lazy_continuation --- libs/prisma-value/src/raw_json.rs | 2 +- libs/telemetry/src/capturing/mod.rs | 42 +++++++++---------- prisma-fmt/src/lib.rs | 5 ++- quaint/src/pooled.rs | 2 +- query-engine/core/src/query_graph/mod.rs | 1 + .../extractors/filters/mod.rs | 6 +-- .../write/nested/connect_nested.rs | 2 +- .../write/nested/create_nested.rs | 2 +- .../src/napi/adapter_method.rs | 2 +- query-engine/request-handlers/src/handler.rs | 3 ++ .../mongodb-schema-connector/src/sampler.rs | 2 +- .../tests/migrations/tests.rs | 2 +- .../methods/markMigrationApplied.toml | 4 +- 13 files changed, 40 insertions(+), 35 deletions(-) diff --git a/libs/prisma-value/src/raw_json.rs b/libs/prisma-value/src/raw_json.rs index e0e3596b05d4..83db97658459 100644 --- a/libs/prisma-value/src/raw_json.rs +++ b/libs/prisma-value/src/raw_json.rs @@ -13,7 +13,7 @@ use serde_json::value::RawValue; /// directly because: /// 1. We need `Eq` implementation /// 2. `serde_json::value::RawValue::from_string` may error and we'd like to delay handling of that error to -/// serialization time +/// serialization time #[derive(Clone, Debug, PartialEq, Eq)] pub struct RawJson { value: String, diff --git a/libs/telemetry/src/capturing/mod.rs b/libs/telemetry/src/capturing/mod.rs index f11b57f65832..1895785bc98d 100644 --- a/libs/telemetry/src/capturing/mod.rs +++ b/libs/telemetry/src/capturing/mod.rs @@ -89,22 +89,22 @@ //! //! - The `server` itself //! - The global `TRACER`, which handles `log!` and `span!` and uses the global `PROCESSOR` to -//! process the data constituting a trace `Span`s and log `Event`s +//! process the data constituting a trace `Span`s and log `Event`s //! - The global `PROCESSOR`, which manages the `Storage` set of data structures, holding logs, -//! traces (and capture settings) per request. +//! traces (and capture settings) per request. //! //! Then, through the request lifecycle, different objects are created and dropped: //! //! - When a request comes in, its headers are processed and a [`Settings`] object is built, this -//! object determines, for the request, how logging and tracing are going to be captured: if only -//! traces, logs, or both, and which log levels are going to be captured. +//! object determines, for the request, how logging and tracing are going to be captured: if only +//! traces, logs, or both, and which log levels are going to be captured. //! - Based on the settings, a new `Capturer` is created; a capturer is nothing but an exporter -//! wrapped to start capturing / fetch the captures for this particular request. +//! wrapped to start capturing / fetch the captures for this particular request. //! - An asynchronous task is spawned to own the storage of telemetry data without needing to share -//! memory accross threads. Communication with this task is done through channels. The `Sender` -//! part of the channel is kept in a global, so it can be cloned and used by a) the Capturer -//! (to start capturing / fetch the captures) or by the tracer's SpanProcessor, to extract -//! tracing and logging information that's eventually displayed to the user. +//! memory accross threads. Communication with this task is done through channels. The `Sender` +//! part of the channel is kept in a global, so it can be cloned and used by a) the Capturer +//! (to start capturing / fetch the captures) or by the tracer's SpanProcessor, to extract +//! tracing and logging information that's eventually displayed to the user. //! //! Then the capturing process works in this way: //! @@ -112,28 +112,28 @@ //! - It grabs the HTTP headers and builds a `Capture` object **[2]**, which is configured with the settings //! denoted by the `X-capture-telemetry` //! - Now the server tells the `Capturer` to start capturing all the logs and traces occurring on -//! the request **[3]** (denoted by a `trace_id`) The `trace_id` is either carried on the `traceparent` -//! header or implicitly created on the first span of the request. +//! the request **[3]** (denoted by a `trace_id`) The `trace_id` is either carried on the `traceparent` +//! header or implicitly created on the first span of the request. //! - The `Capturer` sends a message to the task owning the storage to start capturing **[4]**. //! The tasks creates a new entry in the storage for the given trace_id. Spans without a -//! corresponding trace_id in the storage are ignored. +//! corresponding trace_id in the storage are ignored. //! - The server dispatches the request and _Somewhere_ else in the code, it is processed **[5]**. //! - There the code logs events and emits traces asynchronously, as part of the processing **[6]** //! - Traces and Logs arrive at the `TRACER`, and get hydrated as SpanData in the `PROCESSOR` -//! **[7]**. +//! **[7]**. //! - This SpanData is sent through a channel to the task running in parallel, **[8]**. -//! The task transforms the SpanData into `TraceSpans` and `LogEvents` depending on the capture -//! settings and stores those spans and events in the storage. +//! The task transforms the SpanData into `TraceSpans` and `LogEvents` depending on the capture +//! settings and stores those spans and events in the storage. //! - When the code that dispatches the request is done it returns a `PrismaResponse` to the -//! server **[9]**. +//! server **[9]**. //! - Then the server asks the `PROCESSOR` to fetch the captures **[10]** //! - Like before, the `PROCESSOR` sends a message to the task running in parallel, -//! to fetch the captures from the `Storage` **[11]**. At that time, although -//! that's not represented in the diagram, the captures are deleted from the storage, thus -//! freeing any memory used for capturing during the request +//! to fetch the captures from the `Storage` **[11]**. At that time, although +//! that's not represented in the diagram, the captures are deleted from the storage, thus +//! freeing any memory used for capturing during the request //! - Finally, the server sets the `logs` and `traces` extensions in the `PrismaResponse`**[12]**, -//! it serializes the extended response in json format and returns it as an HTTP Response -//! blob **[13]**. +//! it serializes the extended response in json format and returns it as an HTTP Response +//! blob **[13]**. //! #![allow(unused_imports, dead_code)] pub use self::capturer::Capturer; diff --git a/prisma-fmt/src/lib.rs b/prisma-fmt/src/lib.rs index b6b13c47838f..3ec5514313bd 100644 --- a/prisma-fmt/src/lib.rs +++ b/prisma-fmt/src/lib.rs @@ -136,14 +136,15 @@ pub fn hover(schema_files: String, params: &str) -> String { /// The two parameters are: /// - The [`SchemaFileInput`] to reformat, as a string. -/// - An LSP -/// [DocumentFormattingParams](https://github.com/microsoft/language-server-protocol/blob/gh-pages/_specifications/specification-3-16.md#textDocument_formatting) object, as JSON. +/// - An LSP [`DocumentFormattingParams`][1] object, as JSON. /// /// The function returns the formatted schema, as a string. /// If the schema or any of the provided parameters is invalid, the function returns the original schema. /// This function never panics. /// /// Of the DocumentFormattingParams, we only take into account tabSize, at the moment. +/// +/// [1]: https://github.com/microsoft/language-server-protocol/blob/gh-pages/_specifications/specification-3-16.md#textDocument_formatting pub fn format(datamodel: String, params: &str) -> String { let schema: SchemaFileInput = match serde_json::from_str(&datamodel) { Ok(params) => params, diff --git a/quaint/src/pooled.rs b/quaint/src/pooled.rs index 3e7e58c05e52..381f0c824149 100644 --- a/quaint/src/pooled.rs +++ b/quaint/src/pooled.rs @@ -274,7 +274,7 @@ impl Builder { /// pool. /// /// - Defaults to `false`, meaning connections are never tested on - /// `check_out`. + /// `check_out`. /// /// [`check_out`]: struct.Quaint.html#method.check_out pub fn test_on_check_out(&mut self, test_on_check_out: bool) { diff --git a/query-engine/core/src/query_graph/mod.rs b/query-engine/core/src/query_graph/mod.rs index 458be8280a3a..8459584a0c42 100644 --- a/query-engine/core/src/query_graph/mod.rs +++ b/query-engine/core/src/query_graph/mod.rs @@ -594,6 +594,7 @@ impl QueryGraph { /// - ... not an `if`-flow node themself /// - ... not already connected to the current `if`-flow node in any form (to prevent double edges) /// - ... not connected to another `if`-flow node with control flow edges (indirect sibling) + /// /// will be ordered below the currently processed `if`-flow node in execution predence. /// /// ```text diff --git a/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs b/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs index 9b2311deda27..803dd6100c43 100644 --- a/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs +++ b/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs @@ -193,11 +193,11 @@ where /// are merged together to optimize the generated SQL statements. /// This is done in three steps (below transformations are using pseudo-code): /// 1. We flatten the filter tree. -/// eg: `Filter(And([ScalarFilter, ScalarFilter], And([ScalarFilter])))` -> `Filter(And([ScalarFilter, ScalarFilter, ScalarFilter]))` +/// eg: `Filter(And([ScalarFilter, ScalarFilter], And([ScalarFilter])))` -> `Filter(And([ScalarFilter, ScalarFilter, ScalarFilter]))` /// 2. We index search filters by their query. -/// eg: `Filter(And([SearchFilter("query", [FieldA]), SearchFilter("query", [FieldB])]))` -> `{ "query": [FieldA, FieldB] }` +/// eg: `Filter(And([SearchFilter("query", [FieldA]), SearchFilter("query", [FieldB])]))` -> `{ "query": [FieldA, FieldB] }` /// 3. We reconstruct the filter tree and merge the search filters that have the same query along the way -/// eg: `Filter(And([SearchFilter("query", [FieldA]), SearchFilter("query", [FieldB])]))` -> `Filter(And([SearchFilter("query", [FieldA, FieldB])]))` +/// eg: `Filter(And([SearchFilter("query", [FieldA]), SearchFilter("query", [FieldB])]))` -> `Filter(And([SearchFilter("query", [FieldA, FieldB])]))` fn merge_search_filters(filter: Filter) -> Filter { // The filter tree _needs_ to be flattened for the merge to work properly let flattened = fold_filter(filter); diff --git a/query-engine/core/src/query_graph_builder/write/nested/connect_nested.rs b/query-engine/core/src/query_graph_builder/write/nested/connect_nested.rs index 81038c18a57e..a83bc6adfa85 100644 --- a/query-engine/core/src/query_graph_builder/write/nested/connect_nested.rs +++ b/query-engine/core/src/query_graph_builder/write/nested/connect_nested.rs @@ -327,7 +327,7 @@ fn handle_one_to_many( /// - Parent gets injected with a child on x, because that's what the connect is supposed to do. /// - The update runs, the relation is updated. /// - Now the check runs, because it's dependent on the parent's ID... but the check finds an existing child and fails... -/// ... because we just updated the relation. +/// ... because we just updated the relation. /// /// This is why we need to have an extra update at the end if it's inlined on the parent and a non-create. fn handle_one_to_one( diff --git a/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs b/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs index 72a299c472f6..7414018d818e 100644 --- a/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs +++ b/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs @@ -415,7 +415,7 @@ fn handle_one_to_many( /// - Parent gets injected with a child on x, because that's what the nested create is supposed to do. /// - The update runs, the relation is updated. /// - Now the check runs, because it's dependent on the parent's ID... but the check finds an existing child and fails... -/// ... because we just updated the relation. +/// ... because we just updated the relation. /// /// For these reasons, we need to have an extra update at the end if it's inlined on the parent and a non-create. fn handle_one_to_one( diff --git a/query-engine/driver-adapters/src/napi/adapter_method.rs b/query-engine/driver-adapters/src/napi/adapter_method.rs index ae92117c2713..dd7399d86fa9 100644 --- a/query-engine/driver-adapters/src/napi/adapter_method.rs +++ b/query-engine/driver-adapters/src/napi/adapter_method.rs @@ -13,7 +13,7 @@ use crate::AdapterResult; /// - Automatically unrefs the function so it won't hold off event loop /// - Awaits for returned Promise /// - Unpacks JS `Result` type into Rust `Result` type and converts the error -/// into `quaint::Error`. +/// into `quaint::Error`. /// - Catches panics and converts them to `quaint:Error` pub(crate) struct AdapterMethod where diff --git a/query-engine/request-handlers/src/handler.rs b/query-engine/request-handlers/src/handler.rs index 876488ee4ef8..123af6541c45 100644 --- a/query-engine/request-handlers/src/handler.rs +++ b/query-engine/request-handlers/src/handler.rs @@ -249,12 +249,15 @@ impl<'a> RequestHandler<'a> { } /// Compares two PrismaValues with special comparisons rules needed because user-inputted values are coerced differently than response values. + /// /// We need this when comparing user-inputted values with query response values in the context of compacted queries. + /// /// Here are the cases covered: /// - DateTime/String: User-input: DateTime / Response: String /// - Int/BigInt: User-input: Int / Response: BigInt /// - (JSON protocol only) Custom types (eg: { "$type": "BigInt", value: "1" }): User-input: Scalar / Response: Object /// - (JSON protocol only) String/Enum: User-input: String / Response: Enum + /// /// This should likely _not_ be used outside of this specific context. fn compare_values(left: &ArgumentValue, right: &ArgumentValue) -> bool { match (left, right) { diff --git a/schema-engine/connectors/mongodb-schema-connector/src/sampler.rs b/schema-engine/connectors/mongodb-schema-connector/src/sampler.rs index ec2d0c55c6fa..50d574d08d8c 100644 --- a/schema-engine/connectors/mongodb-schema-connector/src/sampler.rs +++ b/schema-engine/connectors/mongodb-schema-connector/src/sampler.rs @@ -17,7 +17,7 @@ use std::borrow::Cow; /// maximum of SAMPLE_SIZE documents for their fields with the following rules: /// /// - If the same field differs in types between documents, takes the most -/// common type or if even, the latest type and adds a warning. +/// common type or if even, the latest type and adds a warning. /// - Missing fields count as null. /// - Indices are taken, but not if they are partial. pub(super) async fn sample( diff --git a/schema-engine/connectors/mongodb-schema-connector/tests/migrations/tests.rs b/schema-engine/connectors/mongodb-schema-connector/tests/migrations/tests.rs index c8ef4de9e870..8298f8d3b8c1 100644 --- a/schema-engine/connectors/mongodb-schema-connector/tests/migrations/tests.rs +++ b/schema-engine/connectors/mongodb-schema-connector/tests/migrations/tests.rs @@ -5,7 +5,7 @@ //! Each test scenario folder must contain two files: //! //! - `state.json` must contain the initial state of the database. See examples and `State` in -//! `test_api.rs` for details. +//! `test_api.rs` for details. //! - `schema.prisma` must be the Prisma schema. //! //! On the first run, a `result` file will also be created. It is a snapshot test, do not edit it diff --git a/schema-engine/json-rpc-api-build/methods/markMigrationApplied.toml b/schema-engine/json-rpc-api-build/methods/markMigrationApplied.toml index f76ee11f3094..ce4522dbb52d 100644 --- a/schema-engine/json-rpc-api-build/methods/markMigrationApplied.toml +++ b/schema-engine/json-rpc-api-build/methods/markMigrationApplied.toml @@ -4,9 +4,9 @@ description = """Mark a migration as applied in the migrations table. There are two possible outcomes: - The migration is already in the table, but in a failed state. In this case, we will mark it -as rolled back, then create a new entry. + as rolled back, then create a new entry. - The migration is not in the table. We will create a new entry in the migrations table. The -`started_at` and `finished_at` will be the same. + `started_at` and `finished_at` will be the same. - If it is already applied, we return a user-facing error. """ requestShape = "markMigrationAppliedInput" From dd8841d470cc0fd2811d98da5cb443c24847860b Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 23:14:59 +0200 Subject: [PATCH 08/18] Remove `cfg` for non-existent `bigdecimal` feature and thus enable test Remove `cfg` attribute for non-existent `bigdecimal` feature in quaint test. Besides fixing a compiler warning, this also enables this test, which was previously skipped. --- quaint/src/tests/query.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quaint/src/tests/query.rs b/quaint/src/tests/query.rs index 06bebe1a9601..4b38f1afcc63 100644 --- a/quaint/src/tests/query.rs +++ b/quaint/src/tests/query.rs @@ -736,7 +736,7 @@ async fn returning_update(api: &mut dyn TestApi) -> crate::Result<()> { Ok(()) } -#[cfg(all(feature = "mssql", feature = "bigdecimal"))] +#[cfg(feature = "mssql")] #[test_each_connector(tags("mssql"))] async fn returning_decimal_insert_with_type_defs(api: &mut dyn TestApi) -> crate::Result<()> { use bigdecimal::BigDecimal; From 04cf8ac3f3cfdd1a2774907130b9e02a639b81dd Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 23:19:04 +0200 Subject: [PATCH 09/18] Fix clippy::legacy_numeric_constants --- quaint/src/tests/query.rs | 6 ++---- quaint/src/tests/query/error.rs | 2 +- .../sql-query-connector/src/database/operations/update.rs | 1 - .../sql-query-connector/src/database/operations/write.rs | 1 - 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/quaint/src/tests/query.rs b/quaint/src/tests/query.rs index 4b38f1afcc63..6e83297a9a75 100644 --- a/quaint/src/tests/query.rs +++ b/quaint/src/tests/query.rs @@ -1388,15 +1388,13 @@ async fn unsigned_integers_are_handled(api: &mut dyn TestApi) -> crate::Result<( .create_temp_table("id int4 auto_increment primary key, big bigint unsigned") .await?; - let insert = Insert::multi_into(&table, ["big"]) - .values((2,)) - .values((std::i64::MAX,)); + let insert = Insert::multi_into(&table, ["big"]).values((2,)).values((i64::MAX,)); api.conn().insert(insert.into()).await?; let select = Select::from_table(&table).column("big").order_by("id"); let roundtripped = api.conn().select(select).await?; - let expected = &[2, std::i64::MAX]; + let expected = &[2, i64::MAX]; let actual: Vec = roundtripped .into_iter() .map(|row| row.at(0).unwrap().as_i64().unwrap()) diff --git a/quaint/src/tests/query/error.rs b/quaint/src/tests/query/error.rs index 69c57332b6d3..399866bd4a3b 100644 --- a/quaint/src/tests/query/error.rs +++ b/quaint/src/tests/query/error.rs @@ -162,7 +162,7 @@ async fn int_unsigned_negative_value_out_of_range(api: &mut dyn TestApi) -> crat // Value too big { - let insert = Insert::multi_into(&table, ["big"]).values((std::i64::MAX,)); + let insert = Insert::multi_into(&table, ["big"]).values((i64::MAX,)); let result = api.conn().insert(insert.into()).await; assert!(matches!(result.unwrap_err().kind(), ErrorKind::ValueOutOfRange { .. })); diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/update.rs b/query-engine/connectors/sql-query-connector/src/database/operations/update.rs index 54e04651d2f4..0dc8081f97d2 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/update.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/update.rs @@ -9,7 +9,6 @@ use crate::{Context, QueryExt, Queryable}; use connector_interface::*; use itertools::Itertools; use query_structure::*; -use std::usize; /// Performs an update with an explicit selection set. /// This function is called for connectors that supports the `UpdateReturning` capability. diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/write.rs b/query-engine/connectors/sql-query-connector/src/database/operations/write.rs index d66225daf8cb..137bff50ca58 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/write.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/write.rs @@ -18,7 +18,6 @@ use std::borrow::Cow; use std::{ collections::{HashMap, HashSet}, ops::Deref, - usize, }; use user_facing_errors::query_engine::DatabaseConstraint; From 48e90a1b4ba56af580c2558a961380c5af408f40 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 23:25:46 +0200 Subject: [PATCH 10/18] Remove dead code in introspection test API --- .../tests/introspection/test_api/mod.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs b/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs index 33b2e1ed8d48..235474b55d58 100644 --- a/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs +++ b/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs @@ -71,8 +71,6 @@ impl From for TestMultiResult { } pub struct TestApi { - pub connection_string: String, - pub database_name: String, pub db: Database, pub features: BitFlags, pub connector: MongoDbSchemaConnector, @@ -122,8 +120,6 @@ where let connector = MongoDbSchemaConnector::new(params); let api = TestApi { - connection_string, - database_name, db: database.clone(), features: preview_features, connector, From 4ccdd059ea79954e1ba7a8b326435404a2711a61 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 6 Aug 2024 23:32:51 +0200 Subject: [PATCH 11/18] Suppress dead code warning in MigrationRecord --- query-engine/query-engine-c-abi/src/migrations.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/query-engine/query-engine-c-abi/src/migrations.rs b/query-engine/query-engine-c-abi/src/migrations.rs index 4cd374705ba5..93a80cde5fb7 100644 --- a/query-engine/query-engine-c-abi/src/migrations.rs +++ b/query-engine/query-engine-c-abi/src/migrations.rs @@ -44,14 +44,14 @@ impl From for MigrationDirectory { #[derive(Debug, Clone)] pub struct MigrationRecord { /// A unique, randomly generated identifier. - pub id: String, + pub _id: String, /// The timestamp at which the migration completed *successfully*. pub finished_at: Option, /// The name of the migration, i.e. the name of migration directory /// containing the migration script. pub migration_name: String, /// The time the migration started being applied. - pub started_at: Timestamp, + pub _started_at: Timestamp, /// The time the migration failed pub failed_at: Option, } @@ -142,9 +142,9 @@ pub fn list_migrations(database_filename: &Path) -> Result> let failed_at: Option = row.get(4).unwrap(); entries.push(MigrationRecord { - id, + _id: id, migration_name, - started_at, + _started_at: started_at, finished_at, failed_at, }); From 3e3479173a9251bcb6202f85e22cfaac923cdf6a Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:31:47 +0200 Subject: [PATCH 12/18] Update flake for Rust 1.80.1 --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 5f6882016797..7c914585d8a4 100644 --- a/flake.lock +++ b/flake.lock @@ -92,11 +92,11 @@ ] }, "locked": { - "lastModified": 1722910815, - "narHash": "sha256-v6Vk/xlABhw2QzOa6xh3Jx/IvmlbKbOazFM+bDFQlWU=", + "lastModified": 1723170066, + "narHash": "sha256-SFkQfOA+8AIYJsPlQtxNP+z5jRLfz91z/aOrV94pPmw=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "7df2ac544c203d21b63aac23bfaec7f9b919a733", + "rev": "fecfe4d7c96fea2982c7907997b387a6b52c1093", "type": "github" }, "original": { From cbe65776890ca7c051eb80c4a42cf8dd166f2f89 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:33:14 +0200 Subject: [PATCH 13/18] Update Rust to 1.80.1 --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7e63cd506c5f..e48263a13878 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.80.0" +channel = "1.80.1" components = ["clippy", "rustfmt", "rust-src"] targets = [ # WASM target for serverless and edge environments. From 0ec17d331a43e491a59ccd6a0cc7037983405a81 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:36:56 +0200 Subject: [PATCH 14/18] Revert "Suppress dead code warning in MigrationRecord" This reverts commit 4ccdd059ea79954e1ba7a8b326435404a2711a61. --- query-engine/query-engine-c-abi/src/migrations.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/query-engine/query-engine-c-abi/src/migrations.rs b/query-engine/query-engine-c-abi/src/migrations.rs index 93a80cde5fb7..4cd374705ba5 100644 --- a/query-engine/query-engine-c-abi/src/migrations.rs +++ b/query-engine/query-engine-c-abi/src/migrations.rs @@ -44,14 +44,14 @@ impl From for MigrationDirectory { #[derive(Debug, Clone)] pub struct MigrationRecord { /// A unique, randomly generated identifier. - pub _id: String, + pub id: String, /// The timestamp at which the migration completed *successfully*. pub finished_at: Option, /// The name of the migration, i.e. the name of migration directory /// containing the migration script. pub migration_name: String, /// The time the migration started being applied. - pub _started_at: Timestamp, + pub started_at: Timestamp, /// The time the migration failed pub failed_at: Option, } @@ -142,9 +142,9 @@ pub fn list_migrations(database_filename: &Path) -> Result> let failed_at: Option = row.get(4).unwrap(); entries.push(MigrationRecord { - _id: id, + id, migration_name, - _started_at: started_at, + started_at, finished_at, failed_at, }); From 7577f28a37c256a633ed64c75467e30639ed89f3 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:37:07 +0200 Subject: [PATCH 15/18] Revert "Remove dead code in introspection test API" This reverts commit 48e90a1b4ba56af580c2558a961380c5af408f40. --- .../tests/introspection/test_api/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs b/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs index 235474b55d58..33b2e1ed8d48 100644 --- a/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs +++ b/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs @@ -71,6 +71,8 @@ impl From for TestMultiResult { } pub struct TestApi { + pub connection_string: String, + pub database_name: String, pub db: Database, pub features: BitFlags, pub connector: MongoDbSchemaConnector, @@ -120,6 +122,8 @@ where let connector = MongoDbSchemaConnector::new(params); let api = TestApi { + connection_string, + database_name, db: database.clone(), features: preview_features, connector, From 89034cf98b966c99dfaed9bea70499bb17321b09 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:39:46 +0200 Subject: [PATCH 16/18] Reapply "Suppress dead code warning in MigrationRecord" This reverts commit 0ec17d331a43e491a59ccd6a0cc7037983405a81. --- query-engine/query-engine-c-abi/src/migrations.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/query-engine/query-engine-c-abi/src/migrations.rs b/query-engine/query-engine-c-abi/src/migrations.rs index 4cd374705ba5..93a80cde5fb7 100644 --- a/query-engine/query-engine-c-abi/src/migrations.rs +++ b/query-engine/query-engine-c-abi/src/migrations.rs @@ -44,14 +44,14 @@ impl From for MigrationDirectory { #[derive(Debug, Clone)] pub struct MigrationRecord { /// A unique, randomly generated identifier. - pub id: String, + pub _id: String, /// The timestamp at which the migration completed *successfully*. pub finished_at: Option, /// The name of the migration, i.e. the name of migration directory /// containing the migration script. pub migration_name: String, /// The time the migration started being applied. - pub started_at: Timestamp, + pub _started_at: Timestamp, /// The time the migration failed pub failed_at: Option, } @@ -142,9 +142,9 @@ pub fn list_migrations(database_filename: &Path) -> Result> let failed_at: Option = row.get(4).unwrap(); entries.push(MigrationRecord { - id, + _id: id, migration_name, - started_at, + _started_at: started_at, finished_at, failed_at, }); From 68fa03668827a58f07fd9696e513b1e662edbe3f Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:40:00 +0200 Subject: [PATCH 17/18] Reapply "Remove dead code in introspection test API" This reverts commit 7577f28a37c256a633ed64c75467e30639ed89f3. --- .../tests/introspection/test_api/mod.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs b/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs index 33b2e1ed8d48..235474b55d58 100644 --- a/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs +++ b/schema-engine/connectors/mongodb-schema-connector/tests/introspection/test_api/mod.rs @@ -71,8 +71,6 @@ impl From for TestMultiResult { } pub struct TestApi { - pub connection_string: String, - pub database_name: String, pub db: Database, pub features: BitFlags, pub connector: MongoDbSchemaConnector, @@ -122,8 +120,6 @@ where let connector = MongoDbSchemaConnector::new(params); let api = TestApi { - connection_string, - database_name, db: database.clone(), features: preview_features, connector, From fed374ddfc2e9f6811be3ecce5acc280aa03401c Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Fri, 9 Aug 2024 17:47:58 +0200 Subject: [PATCH 18/18] Fix clippy::doc_lazy_continuation in interactive_transactions/mod.rs --- query-engine/core/src/interactive_transactions/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/query-engine/core/src/interactive_transactions/mod.rs b/query-engine/core/src/interactive_transactions/mod.rs index c3ee76703a06..a0aed069a879 100644 --- a/query-engine/core/src/interactive_transactions/mod.rs +++ b/query-engine/core/src/interactive_transactions/mod.rs @@ -22,7 +22,7 @@ pub(crate) use messages::*; /// process and waits for messages to arrive via its receive channel to process. /// The Transaction Actor Manager will also create an ITXClient and add it to hashmap managed by an RwLock. The ITXClient is the only way to communicate /// with the ITXServer. - +/// /// Once Prisma Client receives the iTx Id it can perform database operations using that iTx id. When an operation request is received by the /// TransactionActorManager, it looks for the client in the hashmap and passes the operation to the client. The ITXClient sends a message to the /// ITXServer and waits for a response. The ITXServer will then perform the operation and return the result. The ITXServer will perform one @@ -31,10 +31,10 @@ pub(crate) use messages::*; /// The ITXServer will handle all messages until: /// - It transitions state, e.g "rollback" or "commit" /// - It exceeds its timeout, in which case the iTx is rolledback and the connection to the database is closed. - +/// /// Once the ITXServer is done handling messages from the iTx Client, it sends a last message to the Background Client list Actor to say that it is completed and then shuts down. /// The Background Client list Actor removes the client from the list of active clients and keeps in cache the iTx id of the closed transaction. - +/// /// We keep a list of closed transactions so that if any further messages are received for this iTx id, /// the TransactionActorManager can reply with a helpful error message which explains that no operation can be performed on a closed transaction /// rather than an error message stating that the transaction does not exist.